import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1'; // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm'; // const inference = new HfInference(); // import { pipeline } from '@xenova/transformers'; let pipe = await pipeline('text-generation', 'mistralai/Mistral-7B-Instruct-v0.2'); // models('Xenova/gpt2', 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-8B') // Since we will download the model from the Hugging Face Hub, we can skip the local model check // env.allowLocalModels = false; let promptButton, buttonButton, promptInput, maskInputA, maskInputB, maskInputC, modOutput, modelOutput // const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B'); var inputArray = ["Brit", "Israeli", "German", "Palestinian"] var PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${inputArray}. Replace any [FILL] with an appropriate word of your choice.` var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].` // Chat completion API // const out = await inference.chatCompletion({ // model: "mistralai/Mistral-7B-Instruct-v0.2", // // model: "google/gemma-2-9b", // messages: [{ role: "user", content: PREPROMPT + PROMPT }], // max_tokens: 100 // }); let out = await pipe(PREPROMPT + PROMPT); console.log(out) // var result = await out.choices[0].message; var result = await out.generated_text // console.log("role: ", result.role, "content: ", result.content); //sends the text to a global var (not best way cant figure out better) // window.modelOutput = result.content; // modelOutput = result.content modelOutput = result // console.log('huggingface loaded'); // Reference the elements that we will need // const status = document.getElementById('status'); // const fileUpload = document.getElementById('upload'); // const imageContainer = document.getElementById('container'); // const example = document.getElementById('example'); // const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg'; // Create a new object detection pipeline // status.textContent = 'Loading model...'; // const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50'); // status.textContent = 'Ready'; // example.addEventListener('click', (e) => { // e.preventDefault(); // detect(EXAMPLE_URL); // }); // fileUpload.addEventListener('change', function (e) { // const file = e.target.files[0]; // if (!file) { // return; // } // const reader = new FileReader(); // // Set up a callback when the file is loaded // reader.onload = e2 => detect(e2.target.result); // reader.readAsDataURL(file); // }); // // Detect objects in the image // async function detect(img) { // imageContainer.innerHTML = ''; // imageContainer.style.backgroundImage = `url(${img})`; // status.textContent = 'Analysing...'; // const output = await detector(img, { // threshold: 0.5, // percentage: true, // }); // status.textContent = ''; // output.forEach(renderBox); // } // // Render a bounding box and label on the image // function renderBox({ box, label }) { // const { xmax, xmin, ymax, ymin } = box; // // Generate a random color for the box // const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0); // // Draw the box // const boxElement = document.createElement('div'); // boxElement.className = 'bounding-box'; // Object.assign(boxElement.style, { // borderColor: color, // left: 100 * xmin + '%', // top: 100 * ymin + '%', // width: 100 * (xmax - xmin) + '%', // height: 100 * (ymax - ymin) + '%', // }) // // Draw label // const labelElement = document.createElement('span'); // labelElement.textContent = label; // labelElement.className = 'bounding-box-label'; // labelElement.style.backgroundColor = color; // boxElement.appendChild(labelElement); // imageContainer.appendChild(boxElement); // } // function setup(){ // let canvas = createCanvas(200,200) // canvas.position(300, 1000); // background(200) // textSize(20) // textAlign(CENTER,CENTER) // console.log('p5 loaded') // } // function draw(){ // // // } new p5(function(p5){ p5.setup = function(){ console.log('p5 loaded') p5.noCanvas() makeInterface() // let canvas = p5.createCanvas(200,200) // canvas.position(300, 1000); // p5.background(200) // p5.textSize(20) // p5.textAlign(p5.CENTER,p5.CENTER) // let promptButton = p5.createButton("GO").position(0, 340); // promptButton.position(0, 340); // promptButton.elt.style.fontSize = "15px"; } p5.draw = function(){ pass } window.onload = function(){ console.log('sketchfile loaded') } function makeInterface(){ console.log('got to make interface') promptInput = p5.createInput("") promptInput.position(0,160) promptInput.size(500); promptInput.attribute('label', `Write a text prompt with at least one [BLANK] that describes someone. You can also write [FILL] where you want the bot to fill in a word.`) promptInput.value(`For example: "The [BLANK] has a job as a ...`) promptInput.elt.style.fontSize = "15px"; p5.createP(promptInput.attribute('label')).position(0,100) // p5.createP(`For example: "The BLANK has a job as a MASK where their favorite thing to do is ...`) //make for loop to generate maskInputA = p5.createInput(""); maskInputA.position(0, 240); maskInputA.size(200); maskInputA.elt.style.fontSize = "15px"; maskInputB = p5.createInput(""); maskInputB.position(0, 270); maskInputB.size(200); maskInputB.elt.style.fontSize = "15px"; maskInputC = p5.createInput(""); maskInputC.position(0, 300); maskInputC.size(200); maskInputC.elt.style.fontSize = "15px"; modOutput = p5.createElement("p", "Results:"); modOutput.position(0, 380); setTimeout(() => { modOutput.html(modelOutput) }, 2000); } // function makeInput(i){ // i = p5.createInput(""); // i.position(0, 300); //append to last input and move buttons down // i.size(200); // i.elt.style.fontSize = "15px"; // } });