RabbitRedux / evaluation /evaluation.py
Canstralian's picture
Create evaluation/evaluation.py
d181e93 verified
raw
history blame contribute delete
567 Bytes
from sklearn.metrics import accuracy_score, classification_report
def evaluate_model(model, X_test, y_test):
"""
Evaluate the trained model on the test set.
Args:
model: Trained model.
X_test (pd.DataFrame): Testing features.
y_test (pd.Series): Testing labels.
Returns:
dict: Evaluation metrics.
"""
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
report = classification_report(y_test, y_pred)
return {"accuracy": accuracy, "classification_report": report}