Inference with Dedicated Endpoint
import requests
API_URL = "https://ag765g6yhowax6vb.us-east4.gcp.endpoints.huggingface.cloud"
headers = {
"Accept" : "application/json",
"Content-Type": "application/json"
}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
prefix = "Determine whether the context is sufficient to answer the question:"
question = "name the unit of mass that is used in the same measurement system as gray per second?"
context = "('Gray per second', 'measurement_unit.absorbed_dose_rate_unit.measurement_system', 'International System of Units')"
input_ = f"""{prefix}
Question: {question}
Context: {context}"""
output = query({
"inputs": input_,
"parameters": {}
})
- Downloads last month
- 16
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.