microhugs / appaaa.py
acecalisto3's picture
Update appaaa.py
f697caa verified
import gradio as gr
from huggingface_hub import InferenceClient
import os
import requests
from transformers import pipeline
from sentence_transformers import SentenceTransformer, util
import logging
import ast
import hashlib
from typing import List, Dict, Tuple
import aiohttp
from pydantic import BaseModel, SecretStr
import json
# Enable detailed logging
logging.basicConfig(level=logging.INFO)
# Hugging Face Inference Client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Load a pre-trained model for sentence similarity
similarity_model = SentenceTransformer('all-mpnet-base-v2')
class GitHubConfig(BaseModel):
username: str
repository: str
api_token: SecretStr
class GitHubIntegration:
def __init__(self, config: GitHubConfig):
self.config = config
self.headers = {
"Authorization": f"Bearer {self.config.api_token.get_secret_value()}",
"Accept": "application/vnd.github.v3+json"
}
self.url = "https://api.github.com"
async def fetch_issues(self) -> List[Dict]:
cache_key = hashlib.md5(f"{self.config.username}/{self.config.repository}".encode()).hexdigest()
if cached := self._load_cache(cache_key):
return cached
url = f"{self.url}/repos/{self.config.username}/{self.config.repository}/issues"
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=self.headers) as response:
response.raise_for_status()
issues = await response.json()
self._save_cache(cache_key, issues)
return issues
except Exception as e:
logger.error(f"GitHub API error: {str(e)}")
raise
def _load_cache(self, key: str) -> List[Dict] | None:
# Implement your cache loading logic here
# Example: using a file-based cache
cache_file = f"cache_{key}.json"
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
return json.load(f)
return None
def _save_cache(self, key: str, data: List[Dict]):
# Implement your cache saving logic here
# Example: using a file-based cache
cache_file = f"cache_{key}.json"
with open(cache_file, "w") as f:
json.dump(data, f)
### Function to analyze issues and provide solutions
def analyze_issues(issue_text: str, model_name: str, severity: str = None, programming_language: str = None) -> str:
"""
Analyze issues and provide solutions.
Args:
issue_text (str): The issue text.
model_name (str): The model name.
severity (str, optional): The severity of the issue. Defaults to None.
programming_language (str, optional): The programming language. Defaults to None.
Returns:
str: The analyzed issue and solution.
"""
logging.info("Analyzing issue: {} with model: {}".format(issue_text, model_name))
prompt = """Issue: {}
Severity: {}
Programming Language: {}
Please provide a comprehensive resolution in the following format:
## Problem Summary:
(Concise summary of the issue)
## Root Cause Analysis:
(Possible reasons for the issue)
## Solution Options:
1. **Option 1:** (Description)
- Pros: (Advantages)
- Cons: (Disadvantages)
2. **Option 2:** (Description)
- Pros: (Advantages)
- Cons: (Disadvantages)
## Recommended Solution:
(The best solution with justification)
## Implementation Steps:
1. (Step 1)
2. (Step 2)
3. (Step 3)
## Verification Steps:
1. (Step 1)
2. (Step 2)
""".format(issue_text, severity, programming_language)
try:
nlp = pipeline("text-generation", model=model_name, max_length=1000) # Increase max_length
logging.info("Pipeline created with model: {}".format(model_name))
result = nlp(prompt)
logging.info("Model output: {}".format(result))
return result[0]['generated_text']
except Exception as e:
logging.error("Error analyzing issue with model {}: {}".format(model_name, e))
return "Error analyzing issue with model {}: {}".format(model_name, e)
### Function to find related issues
def find_related_issues(issue_text: str, issues: list) -> list:
"""
Find related issues.
Args:
issue_text (str): The issue text.
issues (list): The list of issues.
Returns:
list: The list of related issues.
"""
logging.info("Finding related issues for: {}".format(issue_text))
issue_embedding = similarity_model.encode(issue_text)
related_issues = []
for issue in issues:
title_embedding = similarity_model.encode(issue['title'])
similarity = util.cos_sim(issue_embedding, title_embedding)[0][0]
related_issues.append((issue, similarity))
related_issues = sorted(related_issues, key=lambda x: x[1], reverse=True)
logging.info("Found related issues: {}".format(related_issues))
return related_issues[:3] # Return top 3 most similar issues
### Function to handle chat responsesasync
async def respond(
command: str,
history: List[Tuple[str, str]],
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
github_api_token: str,
github_username: str,
github_repository: str,
selected_model: str,
severity: str,
programming_language: str,
*args
) -> str:
github_api_token_local = github_api_token
issues_local = []
github_client_local = None
messages = [{"role": "system", "content": system_message}]
logging.info("System message: {}".format(system_message))
for user_msg, assistant_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
logging.info("User message: {}".format(user_msg))
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
logging.info("Assistant message: {}".format(assistant_msg))
logging.info("Command received: {}".format(command))
try:
command, *args = command.split(' ', 1)
args = args[0] if args else ''
except ValueError:
yield "❌ Invalid command format. Use /help for instructions"
if command == "/github":
try:
if not args:
if github_client:
yield f"ℹ️ Current GitHub connection: {github_client.config.username}/{github_client.config.repository}"
else:
yield "ℹ️ Not connected to GitHub"
parts = args.split(maxsplit=2) # Allow spaces in token
if len(parts) < 3:
raise ValueError("Format: /github <username> <repo> <token>")
github_client = GitHubIntegration(GitHubConfig(
username=parts[0],
repository=parts[1],
api_token=SecretStr(parts[2])
))
issues = await github_client.fetch_issues() # Fetch issues after successful connection
yield "✅ GitHub configured successfully"
except Exception as e:
github_client = None
yield f"❌ Error: {str(e)}"
elif command == "/help":
help_message = """Available commands:
- `/github <username> <repo> <token>`: Connect to a GitHub repository.
- `/help`: Show this help message.
- `/generate_code [code description]`: Generate code based on the description.
- `/explain_concept [concept]`: Explain a concept.
- `/write_documentation [topic]`: Write documentation for a given topic.
- `/translate_code [code] to [target language]`: Translate code to another language.
- `/analyze [issue number]`: Analyze a GitHub issue.
- `/list_issues`: List all issues in the connected repository.
"""
yield help_message
elif command.isdigit() and issues:
try:
issue_number = int(command) - 1
issue = issues[issue_number]
issue_text = issue['title'] + "\n\n" + issue['body']
resolution = analyze_issues(issue_text, selected_model, severity, programming_language)
related_issues = find_related_issues(issue_text, issues)
related_issue_text = "\n".join(
["- {} (Similarity: {:.2f})".format(issue['title'], similarity) for issue, similarity in related_issues]
)
yield "Resolution for Issue '{}':\n{}\n\nRelated Issues:\n{}".format(issue['title'], resolution, related_issue_text)
except Exception as e:
logging.error("Error analyzing issue: {}".format(e))
yield "Error analyzing issue: {}".format(e)
elif command.startswith("/generate_code"):
code_description = command.replace("/generate_code", "").strip()
if not code_description:
yield "Please provide a description of the code you want to generate."
else:
prompt = "Generate code for the following: {}\nProgramming Language: {}".format(code_description, programming_language)
try:
generated_code = analyze_issues(prompt, selected_model)
code_output = "<pre>{}</pre>".format(generated_code)
yield code_output
except Exception as e:
logging.error("Error generating code: {}".format(e))
yield "Error generating code: {}".format(e)
elif command.startswith("/explain_concept"):
concept = command.replace("/explain_concept", "").strip()
if not concept:
yield "Please provide a concept to explain."
else:
prompt = "Explain the concept of {} in detail.".format(concept)
try:
explanation = analyze_issues(prompt, selected_model)
yield "<pre>{}</pre>".format(explanation)
except Exception as e:
logging.error("Error explaining concept: {}".format(e))
yield "Error explaining concept: {}".format(e)
elif command.startswith("/write_documentation"):
topic = command.replace("/write_documentation", "").strip()
if not topic:
yield "Please provide a topic for documentation."
else:
prompt = "Write documentation for the topic: {}".format(topic)
try:
documentation = analyze_issues(prompt, selected_model)
yield "<pre>{}</pre>".format(documentation)
except Exception as e:
logging.error("Error writing documentation: {}".format(e))
yield "Error writing documentation: {}".format(e)
elif command.startswith("/translate_code"):
try:
code, _, target_language = command.replace("/translate_code", "").strip().partition(" to ")
if not code or not target_language:
yield "Please provide code and target language in the format: `/translate_code [code] to [target language]`"
else:
prompt = f"Translate the following code to {target_language}:\n```\n{code}\n```"
try:
translated_code = analyze_issues(prompt, selected_model)
yield "<pre>{}</pre>".format(translated_code)
except Exception as e:
logging.error("Error translating code: {}".format(e))
yield "Error translating code: {}".format(e)
except Exception as e:
logging.error("Error parsing translate_code command: {}".format(e))
yield "Error parsing translate_code command: {}".format(e)
elif command.startswith("/analyze"):
try:
if not github_client:
yield "❌ You need to connect to a GitHub repository first using `/github <username> <repo> <token>`."
issue_number = int(command.replace("/analyze", "").strip()) - 1
if 0 <= issue_number < len(issues):
issue = issues[issue_number]
issue_text = issue['title'] + "\n\n" + issue['body']
resolution = analyze_issues(issue_text, selected_model, severity, programming_language)
related_issues = find_related_issues(issue_text, issues)
related_issue_text = "\n".join(
["- {} (Similarity: {:.2f})".format(issue['title'], similarity) for issue, similarity in related_issues]
)
yield "Resolution for Issue '{}':\n{}\n\nRelated Issues:\n{}".format(issue['title'], resolution, related_issue_text)
else:
yield "❌ Invalid issue number. Please enter a valid issue number from the list."
except Exception as e:
logging.error("Error analyzing issue: {}".format(e))
yield "Error analyzing issue: {}".format(e)
elif command == "/list_issues":
try:
if not github_client:
yield "❌ You need to connect to a GitHub repository first using `/github <username> <repo> <token>`."
if issues:
issue_list = "\n".join(
[f"- {issue['title']} (Issue #{issue['number']})" for issue in issues]
)
yield f"Issues in {github_client.config.username}/{github_client.config.repository}:\n{issue_list}"
else:
yield "❌ No issues found in the connected repository."
except Exception as e:
logging.error("Error listing issues: {}".format(e))
yield "Error listing issues: {}".format(e)
else:
yield "I'm not sure what you mean. Try using `/help` for a list of available commands."
def create_gradio_interface():
import gradio as gr
import asyncio
def process_command(
command: str,
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
github_token: str,
github_username: str,
github_repo: str,
model: str,
severity: str,
programming_language: str
):
try:
# Convert the synchronous call to async
import asyncio
return asyncio.run(respond(
command=command,
history=[],
system_message=system_message,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
github_api_token=github_token,
github_username=github_username,
github_repository=github_repo,
selected_model=model,
severity=severity,
programming_language=programming_language
))
except Exception as e:
return f"Error: {str(e)}"
def respond(
command: str,
history: list,
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
github_api_token: str,
github_username: str,
github_repository: str,
selected_model: str,
severity: str,
programming_language: str
):
# Simulate a response
return f"Response to '{command}'"
def create_gradio_interface():
with gr.Blocks(title="AI Assistant") as demo:
gr.Markdown("""
# AI Assistant
Ask me anything, or use commands to interact with GitHub.
Available commands:
- `/github <username> <repo> <token>`: Connect to GitHub
- `/help`: Show help
- `/generate_code`: Generate code
- `/analyze`: Analyze issues
- `/list_issues`: List repository issues
""")
with gr.Row():
with gr.Column():
command_input = gr.Textbox(
label="Command",
placeholder="Enter command (e.g., /help)",
lines=2
)
system_message = gr.Textbox(
label="System Message",
value="You are a helpful AI assistant.",
lines=2
)
with gr.Column():
github_token = gr.Textbox(
label="GitHub Token",
type="password",
placeholder="Enter GitHub token"
)
github_username = gr.Textbox(
label="GitHub Username",
placeholder="Enter GitHub username"
)
github_repo = gr.Textbox(
label="GitHub Repository",
placeholder="Enter repository name"
)
with gr.Row():
with gr.Column():
model = gr.Dropdown(
label="Model",
choices=["zephyr-7b-beta"],
value="zephyr-7b-beta"
)
severity = gr.Dropdown(
label="Severity",
choices=["Low", "Medium", "High"],
value="Medium"
)
programming_language = gr.Dropdown(
label="Programming Language",
choices=["Python", "JavaScript", "Java", "C++", "C#"],
value="Python"
)
with gr.Column():
max_tokens = gr.Slider(
label="Max Tokens",
minimum=50,
maximum=1000,
value=500,
step=50
)
temperature = gr.Slider(
label="Temperature",
minimum=0.1,
maximum=1.0,
value=0.7,
step=0.1
)
top_p = gr.Slider(
label="Top-p",
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.1
)
submit_btn = gr.Button("Submit")
response_output = gr.Textbox(
label="Response",
lines=10,
placeholder="Response will appear here..."
)
# Handle submit button click
submit_btn.click(
fn=process_command,
inputs=[
command_input,
system_message,
max_tokens,
temperature,
top_p,
github_token,
github_username,
github_repo,
model,
severity,
programming_language
],
outputs=response_output
)
# Add example commands
gr.Examples(
examples=[
["/help", "You are a helpful AI assistant.", 500, 0.7, 0.9, "", "", "", "zephyr-7b-beta", "Medium", "Python"],
["/github octocat hello-world YOUR_TOKEN", "You are a helpful AI assistant.", 500, 0.7, 0.9, "", "", "", "zephyr-7b-beta", "Medium", "Python"],
["/generate_code Create a FastAPI REST API", "You are a helpful AI assistant.", 500, 0.7, 0.9, "", "", "", "zephyr-7b-beta", "Medium", "Python"],
],
inputs=[
command_input,
system_message,
max_tokens,
temperature,
top_p,
github_token,
github_username,
github_repo,
model,
severity,
programming_language
]
)
return demo
# Launch the interface
if __name__ == "__main__":
demo = create_gradio_interface()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)