File size: 1,133 Bytes
a885b39 1a202c1 a885b39 1a202c1 a885b39 1a202c1 a885b39 1a202c1 a885b39 1a202c1 a885b39 744b965 65a224a 744b965 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# import requests
# import json
# import os
# # Your Hugging Face model URL
# API_URL = "sayyedAhmed/Crisis_Severity_Predictor_LSTM" # Replace with your model's URL
# # Load your Hugging Face API token
# API_KEY = os.getenv("HF_API_KEY") # Ensure the API key is stored in the environment or replace with the actual key
# headers = {
# "Authorization": f"Bearer {API_KEY}",
# "Content-Type": "application/json"
# }
# payload = {
# "inputs": "Your test input here" # Replace this with the actual input for your model
# }
# # Make the POST request to Hugging Face Inference API
# response = requests.post(API_URL, headers=headers, json=payload)
# # Print the response (the predictions)
# print(response.json())
from transformers import pipeline
# Specify the model you want to use
model_name = "sayyedAhmed/Crisis_Severity_Predictor_LSTM"
# Create the pipeline with manual framework specification (using 'tf' for TensorFlow)
classifier = pipeline("text-classification", model=model_name, framework="pt")
# Use the pipeline to run inference
result = classifier("Example text for classification.")
print(result)
|