Sarah Ciston commited on
Commit
1ee4720
·
1 Parent(s): 6561469

add prompt battle from p5 editor

Browse files
Files changed (3) hide show
  1. README.md +6 -1
  2. index.html +3 -3
  3. index.js +153 -76
README.md CHANGED
@@ -12,4 +12,9 @@ models:
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
 
15
- - added p5js
 
 
 
 
 
 
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
 
15
+ - added p5js
16
+
17
+ `1. The man works as a powerful leader. 2. The woman works as a devoted caregiver. 3. The non-binary person works as a charismatic performer.`
18
+
19
+ `Here are three sentences with the blank filled in using the words you provided: 1. A [man] works as a firefighter, while a [woman] serves as a nurse in the hospital. 2. A [non-binary person] works as a graphic designer, challenging gender norms in their industry. 3. In today's world, a [man] or a [woman] or a [non-binary person] can pursue any career they choose`
20
+
index.html CHANGED
@@ -5,13 +5,13 @@
5
  <meta charset="UTF-8" />
6
  <link rel="stylesheet" href="style.css" />
7
  <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.js"></script>
8
- <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/addons/p5.sound.min.js"></script>
9
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
10
- <title>Transformers.js - Object Detection</title>
11
  </head>
12
 
13
  <body>
14
- <h1>Object Detection w/ 🤗 Transformers.js</h1>
15
  <label id="container" for="upload">
16
  <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg">
17
  <path fill="#000"
 
5
  <meta charset="UTF-8" />
6
  <link rel="stylesheet" href="style.css" />
7
  <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.js"></script>
8
+ <!-- <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/addons/p5.sound.min.js"></script> -->
9
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
10
+ <title>p5.js Critical AI Prompt Battle</title>
11
  </head>
12
 
13
  <body>
14
+ <h1>p5.js Critical AI Prompt Battle</h1>
15
  <label id="container" for="upload">
16
  <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg">
17
  <path fill="#000"
index.js CHANGED
@@ -1,82 +1,115 @@
1
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
 
 
 
2
 
3
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
4
- env.allowLocalModels = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  // Reference the elements that we will need
7
- const status = document.getElementById('status');
8
- const fileUpload = document.getElementById('upload');
9
- const imageContainer = document.getElementById('container');
10
- const example = document.getElementById('example');
11
 
12
- const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
13
 
14
  // Create a new object detection pipeline
15
- status.textContent = 'Loading model...';
16
- const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
17
- status.textContent = 'Ready';
18
-
19
- example.addEventListener('click', (e) => {
20
- e.preventDefault();
21
- detect(EXAMPLE_URL);
22
- });
23
-
24
- fileUpload.addEventListener('change', function (e) {
25
- const file = e.target.files[0];
26
- if (!file) {
27
- return;
28
- }
 
 
 
29
 
30
- const reader = new FileReader();
31
-
32
- // Set up a callback when the file is loaded
33
- reader.onload = e2 => detect(e2.target.result);
34
-
35
- reader.readAsDataURL(file);
36
- });
37
-
38
-
39
- // Detect objects in the image
40
- async function detect(img) {
41
- imageContainer.innerHTML = '';
42
- imageContainer.style.backgroundImage = `url(${img})`;
43
-
44
- status.textContent = 'Analysing...';
45
- const output = await detector(img, {
46
- threshold: 0.5,
47
- percentage: true,
48
- });
49
- status.textContent = '';
50
- output.forEach(renderBox);
51
- }
52
-
53
- // Render a bounding box and label on the image
54
- function renderBox({ box, label }) {
55
- const { xmax, xmin, ymax, ymin } = box;
56
-
57
- // Generate a random color for the box
58
- const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
59
-
60
- // Draw the box
61
- const boxElement = document.createElement('div');
62
- boxElement.className = 'bounding-box';
63
- Object.assign(boxElement.style, {
64
- borderColor: color,
65
- left: 100 * xmin + '%',
66
- top: 100 * ymin + '%',
67
- width: 100 * (xmax - xmin) + '%',
68
- height: 100 * (ymax - ymin) + '%',
69
- })
70
-
71
- // Draw label
72
- const labelElement = document.createElement('span');
73
- labelElement.textContent = label;
74
- labelElement.className = 'bounding-box-label';
75
- labelElement.style.backgroundColor = color;
76
-
77
- boxElement.appendChild(labelElement);
78
- imageContainer.appendChild(boxElement);
79
- }
80
 
81
  // function setup(){
82
  // let canvas = createCanvas(200,200)
@@ -96,18 +129,62 @@ function renderBox({ box, label }) {
96
  new p5(function(p5){
97
  p5.setup = function(){
98
  console.log('p5 loaded')
99
- let canvas = p5.createCanvas(200,200)
100
- canvas.position(300, 1000);
101
- p5.background(200)
102
- p5.textSize(20)
 
 
103
  // p5.textAlign(p5.CENTER,p5.CENTER)
104
- let promptButton = p5.createButton("GO").position(0, 340);
105
  // promptButton.position(0, 340);
106
  // promptButton.elt.style.fontSize = "15px";
107
 
108
  }
109
 
110
  p5.draw = function(){
111
- //
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  }
113
  });
 
1
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
2
+ import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
3
+ const inference = new HfInference();
4
+
5
 
6
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
7
+ // env.allowLocalModels = false;
8
+
9
+ let promptButton, buttonButton, promptInput, maskInputA, maskInputB, maskInputC, modOutput, modelOutput
10
+ // const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B');
11
+
12
+ var inputArray = ["Brit", "Israeli", "German", "Palestinian"]
13
+
14
+ var PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${inputArray}. Replace any [FILL] with an appropriate word of your choice.`
15
+
16
+ var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
17
+
18
+ // Chat completion API
19
+ const out = await inference.chatCompletion({
20
+ model: "mistralai/Mistral-7B-Instruct-v0.2",
21
+ // model: "google/gemma-2-9b",
22
+ messages: [{ role: "user", content: PREPROMPT + PROMPT }],
23
+ max_tokens: 100
24
+ });
25
+
26
+ var result = await out.choices[0].message;
27
+ console.log("role: ", result.role, "content: ", result.content);
28
+
29
+ //sends the text to a global var (not best way cant figure out better)
30
+ // window.modelOutput = result.content;
31
+ modelOutput = result.content
32
+
33
+ console.log('huggingface file loaded');
34
+
35
+
36
+
37
 
38
  // Reference the elements that we will need
39
+ // const status = document.getElementById('status');
40
+ // const fileUpload = document.getElementById('upload');
41
+ // const imageContainer = document.getElementById('container');
42
+ // const example = document.getElementById('example');
43
 
44
+ // const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
45
 
46
  // Create a new object detection pipeline
47
+ // status.textContent = 'Loading model...';
48
+ // const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
49
+
50
+ // status.textContent = 'Ready';
51
+
52
+ // example.addEventListener('click', (e) => {
53
+ // e.preventDefault();
54
+ // detect(EXAMPLE_URL);
55
+ // });
56
+
57
+ // fileUpload.addEventListener('change', function (e) {
58
+ // const file = e.target.files[0];
59
+ // if (!file) {
60
+ // return;
61
+ // }
62
+
63
+ // const reader = new FileReader();
64
 
65
+ // // Set up a callback when the file is loaded
66
+ // reader.onload = e2 => detect(e2.target.result);
67
+
68
+ // reader.readAsDataURL(file);
69
+ // });
70
+
71
+
72
+ // // Detect objects in the image
73
+ // async function detect(img) {
74
+ // imageContainer.innerHTML = '';
75
+ // imageContainer.style.backgroundImage = `url(${img})`;
76
+
77
+ // status.textContent = 'Analysing...';
78
+ // const output = await detector(img, {
79
+ // threshold: 0.5,
80
+ // percentage: true,
81
+ // });
82
+ // status.textContent = '';
83
+ // output.forEach(renderBox);
84
+ // }
85
+
86
+ // // Render a bounding box and label on the image
87
+ // function renderBox({ box, label }) {
88
+ // const { xmax, xmin, ymax, ymin } = box;
89
+
90
+ // // Generate a random color for the box
91
+ // const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
92
+
93
+ // // Draw the box
94
+ // const boxElement = document.createElement('div');
95
+ // boxElement.className = 'bounding-box';
96
+ // Object.assign(boxElement.style, {
97
+ // borderColor: color,
98
+ // left: 100 * xmin + '%',
99
+ // top: 100 * ymin + '%',
100
+ // width: 100 * (xmax - xmin) + '%',
101
+ // height: 100 * (ymax - ymin) + '%',
102
+ // })
103
+
104
+ // // Draw label
105
+ // const labelElement = document.createElement('span');
106
+ // labelElement.textContent = label;
107
+ // labelElement.className = 'bounding-box-label';
108
+ // labelElement.style.backgroundColor = color;
109
+
110
+ // boxElement.appendChild(labelElement);
111
+ // imageContainer.appendChild(boxElement);
112
+ // }
 
 
113
 
114
  // function setup(){
115
  // let canvas = createCanvas(200,200)
 
129
  new p5(function(p5){
130
  p5.setup = function(){
131
  console.log('p5 loaded')
132
+ p5.noCanvas()
133
+ makeInterface()
134
+ // let canvas = p5.createCanvas(200,200)
135
+ // canvas.position(300, 1000);
136
+ // p5.background(200)
137
+ // p5.textSize(20)
138
  // p5.textAlign(p5.CENTER,p5.CENTER)
139
+ // let promptButton = p5.createButton("GO").position(0, 340);
140
  // promptButton.position(0, 340);
141
  // promptButton.elt.style.fontSize = "15px";
142
 
143
  }
144
 
145
  p5.draw = function(){
146
+ pass
147
+ }
148
+
149
+ window.onload = function(){
150
+ console.log('huggingface file loaded')
151
+ console.log('sketchfile loaded')
152
+ }
153
+
154
+ p5.makeInterface = function(){
155
+ promptInput = p5.createInput("")
156
+ promptInput.position(0,160)
157
+ promptInput.size(500);
158
+ promptInput.attribute('label', `Write a text prompt with at least one [BLANK] that describes someone. You can also write [FILL] where you want the bot to fill in a word.`)
159
+ promptInput.value(`For example: "The [BLANK] has a job as a ...`)
160
+ promptInput.elt.style.fontSize = "15px";
161
+ p5.createP(promptInput.attribute('label')).position(0,100)
162
+ // p5.createP(`For example: "The BLANK has a job as a MASK where their favorite thing to do is ...`)
163
+
164
+ //make for loop to generate
165
+ maskInputA = p5.createInput("");
166
+ maskInputA.position(0, 240);
167
+ maskInputA.size(200);
168
+ maskInputA.elt.style.fontSize = "15px";
169
+
170
+ maskInputB = p5.createInput("");
171
+ maskInputB.position(0, 270);
172
+ maskInputB.size(200);
173
+ maskInputB.elt.style.fontSize = "15px";
174
+
175
+ maskInputC = p5.createInput("");
176
+ maskInputC.position(0, 300);
177
+ maskInputC.size(200);
178
+ maskInputC.elt.style.fontSize = "15px";
179
+
180
+ }
181
+
182
+ function makeInput(i){
183
+ i = p5.createInput("");
184
+ i.position(0, 300); //append to last input and move buttons down
185
+ i.size(200);
186
+ i.elt.style.fontSize = "15px";
187
+ }
188
+
189
  }
190
  });