Update README.md
Browse files
README.md
CHANGED
|
@@ -53,8 +53,8 @@ import torch
|
|
| 53 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 54 |
|
| 55 |
# Load the model and tokenizer
|
| 56 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 57 |
-
model = AutoModelForCausalLM.from_pretrained('
|
| 58 |
model = model.to('cuda')
|
| 59 |
|
| 60 |
# Setup terminators
|
|
@@ -89,7 +89,7 @@ import torch
|
|
| 89 |
from transformers import pipeline
|
| 90 |
|
| 91 |
# Load the pipeline
|
| 92 |
-
pipe = pipeline("text-generation", model="
|
| 93 |
|
| 94 |
# Prepare prompt
|
| 95 |
messages = [
|
|
|
|
| 53 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 54 |
|
| 55 |
# Load the model and tokenizer
|
| 56 |
+
tokenizer = AutoTokenizer.from_pretrained("prem-research/prem-1B-chat")
|
| 57 |
+
model = AutoModelForCausalLM.from_pretrained('prem-research/prem-1B-chat', torch_dtype=torch.bfloat16)
|
| 58 |
model = model.to('cuda')
|
| 59 |
|
| 60 |
# Setup terminators
|
|
|
|
| 89 |
from transformers import pipeline
|
| 90 |
|
| 91 |
# Load the pipeline
|
| 92 |
+
pipe = pipeline("text-generation", model="prem-research/prem-1B-chat", torch_dtype=torch.bfloat16, device=0)
|
| 93 |
|
| 94 |
# Prepare prompt
|
| 95 |
messages = [
|