Sarah Ciston
commited on
Commit
·
28962fb
1
Parent(s):
dcfef54
add model back into loop
Browse files
sketch.js
CHANGED
@@ -14,7 +14,7 @@ const inference = new HfInference();
|
|
14 |
|
15 |
|
16 |
|
17 |
-
let PROMPT, promptResult, submitButton, addButton, promptInput, inputValues, modelDisplay, modelResult
|
18 |
|
19 |
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
20 |
|
@@ -23,10 +23,6 @@ let blankArray = []
|
|
23 |
let MODELNAME = 'Xenova/gpt-3.5-turbo'
|
24 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
25 |
|
26 |
-
|
27 |
-
var PREPROMPT = `Please return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${inputValues}. Replace any [FILL] with an appropriate word of your choice.`
|
28 |
-
|
29 |
-
|
30 |
///// p5 STUFF
|
31 |
|
32 |
|
@@ -125,7 +121,7 @@ new p5(function(p5){
|
|
125 |
}
|
126 |
}
|
127 |
|
128 |
-
function getInputs(){
|
129 |
// Map the list of blanks text values to a new list
|
130 |
let inputValues = blankArray.map(i => i.value())
|
131 |
console.log(inputValues)
|
@@ -133,8 +129,11 @@ new p5(function(p5){
|
|
133 |
// Do model stuff in this function instead of in general
|
134 |
PROMPT = promptInput.value() // updated check of the prompt field
|
135 |
|
|
|
|
|
|
|
|
|
136 |
await runModel()
|
137 |
-
// BLANKS = inputValues // get ready to feed array list into model
|
138 |
}
|
139 |
|
140 |
// var modelResult = submitButton.mousePressed(runModel) = function(){
|
@@ -166,7 +165,7 @@ new p5(function(p5){
|
|
166 |
// var blankArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
|
167 |
|
168 |
|
169 |
-
async function runModel(
|
170 |
// Chat completion API
|
171 |
const out = await inference.chatCompletion({
|
172 |
model: MODELNAME,
|
@@ -175,24 +174,24 @@ async function runModel(prompt, blanks){
|
|
175 |
max_tokens: 100
|
176 |
});
|
177 |
|
178 |
-
//
|
179 |
-
//
|
180 |
-
//
|
181 |
-
//
|
182 |
-
// //
|
183 |
-
//
|
184 |
-
// //
|
185 |
-
// //
|
186 |
-
//
|
187 |
-
//
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
//
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
|
197 |
|
198 |
// Reference the elements that we will need
|
|
|
14 |
|
15 |
|
16 |
|
17 |
+
let PROMPT, PREPROMPT, promptResult, submitButton, addButton, promptInput, inputValues, modelDisplay, modelResult
|
18 |
|
19 |
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
20 |
|
|
|
23 |
let MODELNAME = 'Xenova/gpt-3.5-turbo'
|
24 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
25 |
|
|
|
|
|
|
|
|
|
26 |
///// p5 STUFF
|
27 |
|
28 |
|
|
|
121 |
}
|
122 |
}
|
123 |
|
124 |
+
async function getInputs(){
|
125 |
// Map the list of blanks text values to a new list
|
126 |
let inputValues = blankArray.map(i => i.value())
|
127 |
console.log(inputValues)
|
|
|
129 |
// Do model stuff in this function instead of in general
|
130 |
PROMPT = promptInput.value() // updated check of the prompt field
|
131 |
|
132 |
+
BLANKS = inputValues // get ready to feed array list into model
|
133 |
+
|
134 |
+
PREPROMPT = `Please return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${inputValues}. Replace any [FILL] with an appropriate word of your choice.`
|
135 |
+
|
136 |
await runModel()
|
|
|
137 |
}
|
138 |
|
139 |
// var modelResult = submitButton.mousePressed(runModel) = function(){
|
|
|
165 |
// var blankArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
|
166 |
|
167 |
|
168 |
+
async function runModel(){
|
169 |
// Chat completion API
|
170 |
const out = await inference.chatCompletion({
|
171 |
model: MODELNAME,
|
|
|
174 |
max_tokens: 100
|
175 |
});
|
176 |
|
177 |
+
// let out = await pipe(PREPROMPT + PROMPT)
|
178 |
+
// let out = await pipe(PREPROMPT + PROMPT, {
|
179 |
+
// max_new_tokens: 250,
|
180 |
+
// temperature: 0.9,
|
181 |
+
// // return_full_text: False,
|
182 |
+
// repetition_penalty: 1.5,
|
183 |
+
// // no_repeat_ngram_size: 2,
|
184 |
+
// // num_beams: 2,
|
185 |
+
// num_return_sequences: 1
|
186 |
+
// });
|
187 |
+
console.log(out)
|
188 |
+
|
189 |
+
var modelResult = await out.choices[0].message.content
|
190 |
+
// var modelResult = await out[0].generated_text
|
191 |
+
console.log(modelResult);
|
192 |
+
|
193 |
+
return modelResult
|
194 |
+
}
|
195 |
|
196 |
|
197 |
// Reference the elements that we will need
|