File size: 1,363 Bytes
f6c4656
 
4daff63
7495eb5
4daff63
 
f6c4656
 
7495eb5
9219820
 
 
f6c4656
 
4daff63
f6c4656
9219820
7ea3034
4daff63
9219820
7ea3034
4daff63
f6c4656
 
 
4daff63
 
 
 
 
 
 
 
 
0141b73
f6c4656
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load the model and tokenizer from Hugging Face Hub
model_path = "Canstralian/pentest_ai"  # Replace with your model path if needed
model = AutoModelForCausalLM.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)

# Confirm successful loading
print(f"Model and Tokenizer loaded from {model_path}")

# Function to handle user inputs and generate responses
def generate_text(instruction):
    # Encode the input text to token IDs
    inputs = tokenizer.encode(instruction, return_tensors='pt', truncation=True, max_length=512)
    print(f"Encoded input: {inputs}")
    
    # Generate the output text
    outputs = model.generate(inputs, max_length=150, num_beams=5, do_sample=True)  # Adjust if needed
    
    # Decode the output and return the response
    output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return output_text

# Gradio interface to interact with the text generation function
iface = gr.Interface(
    fn=generate_text, 
    inputs=gr.Textbox(lines=2, placeholder="Enter your question or prompt here..."), 
    outputs="text", 
    title="Pentest AI Text Generator",
    description="Generate text using a fine-tuned model for pentesting-related queries."
)

# Launch the interface
iface.launch()