Sarah Ciston
commited on
Commit
·
aa77cec
1
Parent(s):
addde8c
switch model add hyperparams
Browse files
README.md
CHANGED
@@ -6,8 +6,9 @@ colorTo: blue
|
|
6 |
sdk: static
|
7 |
pinned: false
|
8 |
models:
|
9 |
-
- Xenova/detr-resnet-50
|
10 |
- Xenova/gpt2
|
|
|
11 |
# - mistralai/Mistral-7B-Instruct-v0.2
|
12 |
# - meta-llama/Meta-Llama-3-8B
|
13 |
---
|
|
|
6 |
sdk: static
|
7 |
pinned: false
|
8 |
models:
|
9 |
+
# - Xenova/detr-resnet-50
|
10 |
- Xenova/gpt2
|
11 |
+
- Xenova/LaMini-Flan-T5-783M
|
12 |
# - mistralai/Mistral-7B-Instruct-v0.2
|
13 |
# - meta-llama/Meta-Llama-3-8B
|
14 |
---
|
sketch.js
CHANGED
@@ -2,16 +2,16 @@ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers
|
|
2 |
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
3 |
// const inference = new HfInference();
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
let pipe = await pipeline('text-generation', 'Xenova/gpt2');
|
8 |
// models('Xenova/gpt2', 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-8B')
|
|
|
|
|
9 |
|
10 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
11 |
-
|
12 |
|
13 |
-
let promptButton, buttonButton, promptInput, maskInputA, maskInputB, maskInputC, modOutput
|
14 |
-
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B');
|
15 |
|
16 |
var inputArray = ["Brit", "Israeli", "German", "Palestinian"]
|
17 |
|
@@ -27,18 +27,13 @@ var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
|
|
27 |
// max_tokens: 100
|
28 |
// });
|
29 |
|
30 |
-
let out = await pipe(PREPROMPT + PROMPT
|
|
|
|
|
|
|
31 |
console.log(out)
|
32 |
|
33 |
-
// var result = await out.choices[0].message;
|
34 |
var result = await out.generated_text
|
35 |
-
// console.log("role: ", result.role, "content: ", result.content);
|
36 |
-
|
37 |
-
//sends the text to a global var (not best way cant figure out better)
|
38 |
-
// window.modelOutput = result.content;
|
39 |
-
// modelOutput = result.content
|
40 |
-
modelOutput = result
|
41 |
-
|
42 |
// console.log('huggingface loaded');
|
43 |
|
44 |
|
@@ -145,14 +140,14 @@ new p5(function(p5){
|
|
145 |
// p5.background(200)
|
146 |
// p5.textSize(20)
|
147 |
// p5.textAlign(p5.CENTER,p5.CENTER)
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
|
152 |
}
|
153 |
|
154 |
p5.draw = function(){
|
155 |
-
|
156 |
}
|
157 |
|
158 |
window.onload = function(){
|
@@ -189,7 +184,7 @@ new p5(function(p5){
|
|
189 |
modOutput = p5.createElement("p", "Results:");
|
190 |
modOutput.position(0, 380);
|
191 |
setTimeout(() => {
|
192 |
-
modOutput.html(
|
193 |
}, 2000);
|
194 |
|
195 |
}
|
|
|
2 |
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
3 |
// const inference = new HfInference();
|
4 |
|
5 |
+
let pipe = await pipeline('text-generation', 'Xenova/LaMini-Flan-T5-783M');
|
|
|
|
|
6 |
// models('Xenova/gpt2', 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-8B')
|
7 |
+
// list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
|
8 |
+
|
9 |
|
10 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
11 |
+
env.allowLocalModels = false;
|
12 |
|
13 |
+
let promptButton, buttonButton, promptInput, maskInputA, maskInputB, maskInputC, modOutput
|
14 |
+
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
15 |
|
16 |
var inputArray = ["Brit", "Israeli", "German", "Palestinian"]
|
17 |
|
|
|
27 |
// max_tokens: 100
|
28 |
// });
|
29 |
|
30 |
+
let out = await pipe(PREPROMPT + PROMPT, {
|
31 |
+
max_new_tokens: 100,
|
32 |
+
temperature: 0.9
|
33 |
+
});
|
34 |
console.log(out)
|
35 |
|
|
|
36 |
var result = await out.generated_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
// console.log('huggingface loaded');
|
38 |
|
39 |
|
|
|
140 |
// p5.background(200)
|
141 |
// p5.textSize(20)
|
142 |
// p5.textAlign(p5.CENTER,p5.CENTER)
|
143 |
+
let promptButton = p5.createButton("GO").position(0, 340);
|
144 |
+
promptButton.position(0, 340);
|
145 |
+
promptButton.elt.style.fontSize = "15px";
|
146 |
|
147 |
}
|
148 |
|
149 |
p5.draw = function(){
|
150 |
+
//
|
151 |
}
|
152 |
|
153 |
window.onload = function(){
|
|
|
184 |
modOutput = p5.createElement("p", "Results:");
|
185 |
modOutput.position(0, 380);
|
186 |
setTimeout(() => {
|
187 |
+
modOutput.html(result)
|
188 |
}, 2000);
|
189 |
|
190 |
}
|