File size: 3,132 Bytes
854f61d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from statistics import mode
from langchain_community.vectorstores import FAISS
from types import ModuleType
import math
from langchain_community.llms import OpenAI
import requests
import requests.models
# from decouple import config


def classify_text(

    user_input: str,

    load_vector_store: ModuleType

) -> dict:
    faiss: FAISS = load_vector_store
    results = faiss.similarity_search_with_relevance_scores(user_input)
    avg_similarity_score = sum([result[1] for result in results]) / len(results)
    if avg_similarity_score > 0.7:
        print(f"Extremism {avg_similarity_score} detected, initiating countermeasures protocol... ")
        print(results)
        label = mode([result[0].metadata.get("label", None) for result in results])
        ideology = mode([result[0].metadata.get("ideology", None) for result in results])
        return {"extremism_detected": True, "ideology": ideology,  "type_label": label}
    else:
        return {"extremism_detected": False, "type_label": None}
    
def analyze_affect(

    user_input: str,

    classify_text: dict,

    mistral_public_url: str

) -> dict:
    if (classify_text["extremism_detected"] == True):
        prompt = (
            f"Analyze the following text for its emotional tone (affect):\n\n"
            f"'{user_input}'\n\n"
            "The affect is likely one of the following: Positive, Negative, Neutral, Mixed. Please classify:"
        )
        # TODO: fix my poetry PATH reference so that I can add decouple to pyproject.toml and switch out the below line
        # openai_client = OpenAI(api_key="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")
        # messages = []
        # messages.append(
        #     {
        #         "role": "user", 
        #         "content": prompt
        #     }
        # )
        # try:
        #     response = openai_client.chat.completions.create(model="gpt-3.5-turbo-1106",
        #                                                     messages=messages,
        #                                                     temperature=0)

        #     return response.choices[0].message.content
        input_text = {"context": f"User input text: {user_input}", "question": ("The above text's emotional tone (affect) is likely one of the following: " 
                                                                                "Positive, Negative, Neutral, Mixed. Please classify it. "
                                                                                "Answer with only a single word, which is the classification label you give to the above text, nothing else:\n")}
        
        # Function to fetch streaming response
        def fetch_data():
            response: requests.models.Response = requests.post(f'{mistral_public_url}/mistral-inference', json=input_text, stream=False)

            return response.text.strip()

        # Call the function to start fetching data
        result = fetch_data()
        classify_text['sentiment'] = result
        return classify_text
    return classify_text