Sarah Ciston
commited on
Commit
·
07cb0df
1
Parent(s):
f1f8f5b
add model name
Browse files
index.js
CHANGED
@@ -4,7 +4,8 @@ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers
|
|
4 |
|
5 |
// import { pipeline } from '@xenova/transformers';
|
6 |
|
7 |
-
let pipe = await pipeline('text-generation');
|
|
|
8 |
|
9 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
10 |
// env.allowLocalModels = false;
|
@@ -29,12 +30,14 @@ var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
|
|
29 |
let out = await pipe(PREPROMPT + PROMPT);
|
30 |
console.log(out)
|
31 |
|
32 |
-
var result = await out.choices[0].message;
|
|
|
33 |
// console.log("role: ", result.role, "content: ", result.content);
|
34 |
|
35 |
//sends the text to a global var (not best way cant figure out better)
|
36 |
// window.modelOutput = result.content;
|
37 |
-
modelOutput = result.content
|
|
|
38 |
|
39 |
// console.log('huggingface loaded');
|
40 |
|
|
|
4 |
|
5 |
// import { pipeline } from '@xenova/transformers';
|
6 |
|
7 |
+
let pipe = await pipeline('text-generation', 'mistralai/Mistral-7B-Instruct-v0.2');
|
8 |
+
// models('gpt2', 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-8B')
|
9 |
|
10 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
11 |
// env.allowLocalModels = false;
|
|
|
30 |
let out = await pipe(PREPROMPT + PROMPT);
|
31 |
console.log(out)
|
32 |
|
33 |
+
// var result = await out.choices[0].message;
|
34 |
+
var result = await out.generated_text
|
35 |
// console.log("role: ", result.role, "content: ", result.content);
|
36 |
|
37 |
//sends the text to a global var (not best way cant figure out better)
|
38 |
// window.modelOutput = result.content;
|
39 |
+
// modelOutput = result.content
|
40 |
+
modelOutput = result
|
41 |
|
42 |
// console.log('huggingface loaded');
|
43 |
|