Sarah Ciston commited on
Commit
1bf613a
·
1 Parent(s): 07cff5b

change model, add function to display result

Browse files
Files changed (3) hide show
  1. README.md +6 -3
  2. sketch.js +24 -27
  3. tutorial.mdx +100 -0
README.md CHANGED
@@ -6,15 +6,18 @@ colorTo: blue
6
  sdk: static
7
  pinned: false
8
  models:
 
 
9
  # - Xenova/detr-resnet-50
10
  # - Xenova/gpt2
11
  # - Xenova/bloom-560m
12
- - Xenova/distilgpt2
13
- - Xenova/gpt-3.5-turbo
14
  # - Xenova/llama-68m
15
  # - Xenova/LaMini-Flan-T5-783M
16
- # - mistralai/Mistral-7B-Instruct-v0.2
17
  # - meta-llama/Meta-Llama-3-8B
 
18
  ---
19
 
20
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
6
  sdk: static
7
  pinned: false
8
  models:
9
+ - meta-llama/Meta-Llama-3-70B-Instruct
10
+ # - meta-llama/Meta-Llama-3-70B-Instruct
11
  # - Xenova/detr-resnet-50
12
  # - Xenova/gpt2
13
  # - Xenova/bloom-560m
14
+ # - Xenova/distilgpt2
15
+ # - Xenova/gpt-3.5-turbo
16
  # - Xenova/llama-68m
17
  # - Xenova/LaMini-Flan-T5-783M
18
+ - mistralai/Mistral-7B-Instruct-v0.2
19
  # - meta-llama/Meta-Llama-3-8B
20
+
21
  ---
22
 
23
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
sketch.js CHANGED
@@ -20,8 +20,8 @@ let PROMPT, PREPROMPT, promptResult, submitButton, addButton, promptInput, input
20
 
21
  let blankArray = []
22
 
23
- let MODELNAME = 'Xenova/distilgpt2'
24
- // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
25
 
26
  ///// p5 STUFF
27
 
@@ -81,7 +81,7 @@ new p5(function(p5){
81
 
82
  // send prompt to model
83
  submitButton = p5.createButton("SUBMIT")
84
- // submitButton.position(0,600)
85
  submitButton.size(200)
86
  submitButton.class('submit');
87
  submitButton.mousePressed(getInputs)
@@ -89,15 +89,9 @@ new p5(function(p5){
89
  // add more blanks to fill in
90
  addButton = p5.createButton("more blanks")
91
  addButton.size(200)
92
- // addButton.position(150,600)
93
  addButton.mousePressed(addField)
94
 
95
- modelDisplay = p5.createElement("p", "Results:");
96
- modelDisplay.position(0, 380);
97
- setTimeout(() => {
98
- modelDisplay.html(modelResult)
99
- }, 2000);
100
-
101
  // TO-DO a model drop down list?
102
 
103
  // describe(``)
@@ -135,9 +129,24 @@ new p5(function(p5){
135
 
136
  // we pass PROMPT and PREPROMPT to the model function, don't need to pass INPUTVALUES bc it's passed into the PREPROMPT already here
137
 
138
- await runModel(PREPROMPT, PROMPT)
 
 
 
 
 
 
 
139
  }
140
 
 
 
 
 
 
 
 
 
141
  // var modelResult = submitButton.mousePressed(runModel) = function(){
142
  // // listens for the button to be clicked
143
  // // run the prompt through the model here
@@ -166,13 +175,15 @@ new p5(function(p5){
166
  // // for num of blanks put in list
167
  // var blankArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
168
 
 
 
169
 
170
  async function runModel(PREPROMPT, PROMPT){
171
  // Chat completion API
172
- const out = await inference.chatCompletion({
173
  model: MODELNAME,
174
  // model: "google/gemma-2-9b",
175
- messages: [{ role: "user", content: PREPROMPT + PROMPT }],
176
  max_tokens: 100
177
  });
178
 
@@ -270,18 +281,4 @@ async function runModel(PREPROMPT, PROMPT){
270
 
271
  // boxElement.appendChild(labelElement);
272
  // imageContainer.appendChild(boxElement);
273
- // }
274
-
275
- // function setup(){
276
- // let canvas = createCanvas(200,200)
277
- // canvas.position(300, 1000);
278
- // background(200)
279
- // textSize(20)
280
- // textAlign(CENTER,CENTER)
281
- // console.log('p5 loaded')
282
-
283
- // }
284
-
285
- // function draw(){
286
- // //
287
  // }
 
20
 
21
  let blankArray = []
22
 
23
+ let MODELNAME = "meta-llama/Meta-Llama-3-8B-Instruct"
24
+ // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', "meta-llama/Meta-Llama-3-70B-Instruct", 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2', "meta-llama/Meta-Llama-3-70B-Instruct")
25
 
26
  ///// p5 STUFF
27
 
 
81
 
82
  // send prompt to model
83
  submitButton = p5.createButton("SUBMIT")
84
+ submitButton.position(0,600)
85
  submitButton.size(200)
86
  submitButton.class('submit');
87
  submitButton.mousePressed(getInputs)
 
89
  // add more blanks to fill in
90
  addButton = p5.createButton("more blanks")
91
  addButton.size(200)
92
+ addButton.position(150,600)
93
  addButton.mousePressed(addField)
94
 
 
 
 
 
 
 
95
  // TO-DO a model drop down list?
96
 
97
  // describe(``)
 
129
 
130
  // we pass PROMPT and PREPROMPT to the model function, don't need to pass INPUTVALUES bc it's passed into the PREPROMPT already here
131
 
132
+ modelResult = await runModel(PREPROMPT, PROMPT)
133
+
134
+ await displayModel(modelResult)
135
+ }
136
+
137
+ async function displayModel(m){
138
+ modelDisplay = p5.createElement("p", "Results:");
139
+ await modelDisplay.html(m)
140
  }
141
 
142
+ // async function showResults(){
143
+ // modelDisplay = p5.createElement("p", "Results:");
144
+ // // modelDisplay.position(0, 380);
145
+ // setTimeout(() => {
146
+ // modelDisplay.html(modelResult)
147
+ // }, 2000);
148
+ // }
149
+
150
  // var modelResult = submitButton.mousePressed(runModel) = function(){
151
  // // listens for the button to be clicked
152
  // // run the prompt through the model here
 
175
  // // for num of blanks put in list
176
  // var blankArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
177
 
178
+ //Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))
179
+
180
 
181
  async function runModel(PREPROMPT, PROMPT){
182
  // Chat completion API
183
+ const out = await inference.chat_completion({ //inference.fill_mask({
184
  model: MODELNAME,
185
  // model: "google/gemma-2-9b",
186
+ // messages: [{ role: "user", content: PREPROMPT + PROMPT }],
187
  max_tokens: 100
188
  });
189
 
 
281
 
282
  // boxElement.appendChild(labelElement);
283
  // imageContainer.appendChild(boxElement);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  // }
tutorial.mdx ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Critical AI Prompt Battle
3
+ authors:
4
+ - Sarah Ciston
5
+ ---
6
+
7
+ # p5.js Critical AI Prompt Battle
8
+ By Sarah Ciston
9
+ With Emily Martinez and Minne Atairu
10
+
11
+ ## What are we making?
12
+
13
+ In this tutorial, you can build a tool to run several AI chat prompts at once and compare their results. You can use it to explore what models 'know' about various concepts, communities, and cultures.
14
+
15
+ This tutorial is part 2 in a series of 5 tutorials that focus on using AI creatively and thoughtfully.
16
+ Part 1: [Making a ToolBox for Making Critical AI]
17
+ Part 3: [Training Dataset Explorer]
18
+ Part 4: [Machine Learning Model Inspector & Poetry Machine]
19
+ Part 5: [Putting Critical Tools into Practice]
20
+
21
+ The code and content in this tutorial build on information from the prior tutorial to start creating your first tool for your p5.js Critical AI Kit. It also builds on fantastic work on critical prompt programming by Yasmin Morgan (2022), Katy Gero (2023), and Minne Atairu (2024).
22
+
23
+ ## Why compare prompts?
24
+
25
+ When you're using a chatbot to generate code or an email, it's easy to imagine its outputs are neutral and harmless. It seems like any system would output basically the same result. Does this matter for basic uses like making a plain image or having a simple conversation? Absolutely. Training datasets are shaping even the most innocuous outputs. This training shows up in subtle insidious ways.
26
+
27
+ Unfortunately, the sleek chatbot interface hides all the decision-making that leads to a prompt output. To glimpse the differences, we can test many variations by making our own tool. With our tool, we can hope to understand more about the underlying assumptions contained in the training dataset. That gives us more information to decide how we select and use these models — and for which contexts.
28
+
29
+ Steps
30
+
31
+ 1. Make a copy of your toolkit prototype from Tutorial One and rename it "Critical AI Prompt Battle" to follow along. To jump ahead, you can make a copy of the finished example at the link below. But we really encourage you to type along with us!
32
+
33
+ 2. [PSEUDOCODE] Add the inference module to the top.
34
+
35
+ 3. [PSEUDOCODE] Add the model of choice to the README.md and sketch.js
36
+
37
+ 4. [PSEUDOCODE] Create variables (eg MODEL, PROMPT, PREPROMPT, BLANKS, blankArray, etc)
38
+ Set PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${blankArray}. Replace any [FILL] with an appropriate word of your choice.`
39
+
40
+
41
+
42
+ 5. [PSEUDOCODE] Add async function runModel() wrapping HF API await
43
+
44
+ 6. [PSEUDOCODE] Add model results processing with await
45
+
46
+ 6. [PSEUDOCODE] Create makeInterface() and add features
47
+
48
+ 7. [PSEUDOCODE] Connect form, test with console.log
49
+
50
+ 8. [PSEUDOCODE] Connect model results, send model results to interface
51
+
52
+ 9. [PSEUDOCODE] Test with simple example. A basic prompt may include WHAT/WHO is described, WHERE they are, WHAT they're doing, perhaps also describing HOW. When writing your prompt, replace one of these aspects with [BLANK] so that you instruct the model to fill it in iteratively with the words you provide (Morgan 2022, Gero 2023). Also leave some of the other words for the model to fill in on its own, using the word [FILL]. We instructed the model to replace these on its own in the PREPROMPT.
53
+
54
+ 10. [PSEUDOCODE] Test with more complex example (add a model, add a field)
55
+
56
+ 11. [PSEUDOCODE] Make a list of topics that interest you to try with your tool. Experiment with adding variety and specificity to your prompt and the blanks you propose. Try different sentence structures and topics.
57
+ What's the most unusual or obscure, most 'usual' or 'normal', or most nonsensical blank you might propose?
58
+ Try different types of nouns — people, places, things, ideas; different descriptors — adjectives and adverbs — to see how these shape the results. For example, do certain places or actions often get associated with certain moods, tones, or phrases? Where are these based on outdated or stereotypical assumptions?
59
+ How does the output change if you change the language, dialect, or vernacular (e.g. slang versus business phrasing)? (Atairu 2024).
60
+
61
+ >"How do the outputs vary as demographic characteristics like skin color, gender or region change? Do these variances reflect any known harmful societal stereotypes?" (Atairu 2024)
62
+ >"Are stereotypical assumptions about your subject [represented]? Consider factors such as race, gender, socioeconomic status, ability. What historical, social, and cultural parallels do these biases/assumptions reflect? Discuss how these elements might mirror real-world issues or contexts. (Atairu 2024)
63
+
64
+ ### Reflections
65
+
66
+ Here we have created a tool to test different kinds of prompts quickly and to modify them easily, allowing us to compare prompts at scale. By comparing how outputs change with subtle shifts in prompts, we can explore how implicit bias emerges from [repeated and amplified through] large-scale machine learning models. It helps us understand that unwanted outputs are not just glitches in an otherwise working system, and that every output (no matter how boring) contains the influence of its dataset.
67
+
68
+ ### Compare different prompts:
69
+
70
+ See how subtle changes in your inputs can lead to large changes in the output. Sometimes these also reveal large gaps in the model's available knowledge. What does the model 'know' about communities who are less represented in its data? How has this data been limited?
71
+
72
+ ### Reconsider neutral:
73
+
74
+ This tool helps [reveal/us recognize] that [no version of a text, and no language model, is neutral./there is no 'neutral' output]. Each result is informed by context. Each result reflects differences in representation and cultural understanding, which have been amplified by the statistical power of the model.
75
+
76
+ ### Consider your choice of words and tools:
77
+
78
+ How does this help you think "against the grain"? Rather than taking the output of a system for granted as valid, how might you question or reflect on it? How will you use this tool in your practice?
79
+
80
+ ## Next steps
81
+
82
+ ### Expand your tool:
83
+
84
+ This tool lets you scale up your prompt adjustments. We have built a tool comparing word choices in the same basic prompt. You've also built a simple interface for accessing pre-trained models that does not require using [a login/another company's interface]. It lets you easily control your input and output, with the interface you built.
85
+
86
+ Keep playing with the p5.js DOM functions to build your interface & the HuggingFace API. What features might you add? You might also adapt this tool to compare wholly different prompts, or even to compare different models running the same prompt.
87
+
88
+ Next we will add additional aspects to the interface that let you adjust more features and explore even further.
89
+
90
+ ## Further considerations
91
+
92
+ Consider making it a habit to add text like "AI generated" to the title of any content you produce using a generative AI tool, and include details of your process in its description (Atairu 2024).
93
+
94
+ ## References
95
+
96
+ > Ref Katy's project (Gero 2023).
97
+
98
+ Morgan, Yasmin. 2022. "AIxDesign Icebreakers, Mini-Games & Interactive Exercises." https://aixdesign.co/posts/ai-icebreakers-mini-games-interactive-exercises
99
+
100
+ > Ref Minne's worksheet (Atairu 2024)