psinger commited on
Commit
c004858
·
1 Parent(s): 2d7c584

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -34,7 +34,7 @@ import torch
34
  from transformers import pipeline
35
 
36
  generate_text = pipeline(
37
- model="psinger/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
38
  torch_dtype=torch.float16,
39
  trust_remote_code=True,
40
  use_fast=False,
@@ -72,12 +72,12 @@ from h2oai_pipeline import H2OTextGenerationPipeline
72
  from transformers import AutoModelForCausalLM, AutoTokenizer
73
 
74
  tokenizer = AutoTokenizer.from_pretrained(
75
- "psinger/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
76
  use_fast=False,
77
  padding_side="left"
78
  )
79
  model = AutoModelForCausalLM.from_pretrained(
80
- "psinger/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
81
  torch_dtype=torch.float16,
82
  device_map={"": "cuda:0"}
83
  )
@@ -101,7 +101,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
101
  ```python
102
  from transformers import AutoModelForCausalLM, AutoTokenizer
103
 
104
- model_name = "psinger/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt" # either local folder or huggingface model name
105
  # Important: The prompt needs to be in the same format the model was trained with.
106
  # You can find an example prompt in the experiment logs.
107
  prompt = "<|prompt|>How are you?</s><|answer|>"
 
34
  from transformers import pipeline
35
 
36
  generate_text = pipeline(
37
+ model="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
38
  torch_dtype=torch.float16,
39
  trust_remote_code=True,
40
  use_fast=False,
 
72
  from transformers import AutoModelForCausalLM, AutoTokenizer
73
 
74
  tokenizer = AutoTokenizer.from_pretrained(
75
+ "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
76
  use_fast=False,
77
  padding_side="left"
78
  )
79
  model = AutoModelForCausalLM.from_pretrained(
80
+ "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
81
  torch_dtype=torch.float16,
82
  device_map={"": "cuda:0"}
83
  )
 
101
  ```python
102
  from transformers import AutoModelForCausalLM, AutoTokenizer
103
 
104
+ model_name = "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt" # either local folder or huggingface model name
105
  # Important: The prompt needs to be in the same format the model was trained with.
106
  # You can find an example prompt in the experiment logs.
107
  prompt = "<|prompt|>How are you?</s><|answer|>"