xai_framework / utils /eval_users.py
hodorfi's picture
Upload 1288 files
191195c
raw
history blame
2.31 kB
import streamlit as st
import os
ROOT_FIG_DIR = f'{os.getcwd()}/figures/'
def get_product_dev_page_layout():
# row6_1, row6_2, = st.columns((1,1))
row6_1, row6_2,row6_3 = st.tabs(["Evaluation Metrics", "Performance Evaluation", "Issues and Limitations"])
with row6_1:
# st.write("**Performance Metrics**")
st.subheader('Performance Metrics')
st.write('Following metrics are used for evaluation:')
st.image(f'{ROOT_FIG_DIR}/evaluation_template.png')
list_test = """<ul>
<li><strong>Accuracy: </strong>it is a ratio of correctly predicted observation to the total observations..</li>
</ul>"""
st.markdown(list_test, unsafe_allow_html=True)
# st.latex(r''' Accuracy=\frac{TP + TN}{TP+TN+FP+FN}''')
list_test = """<ul>
<li><strong>Precision: </strong>It is the ratio of correctly predicted positive observations to the total predicted positive observations</li>
</ul>"""
st.markdown(list_test, unsafe_allow_html=True)
# st.latex(r''' Precision=\frac{TP}{TP+FP}''')
list_test = """<ul>
<li><strong>Recall: </strong>It is the ratio of correctly predicted positive observations to the all observations in actual class.</li>
</ul>"""
st.markdown(list_test, unsafe_allow_html=True)
# st.latex(r''' Recall=\frac{TP}{TP+FN}''')
# with st.expander('Test Set Confusion Matrix'):
# # st.caption('Test Set Results:')
# st.image('./figures/test_confmat_20210404.png')
with row6_2:
# st.write("**Prediction Samples**")
# with st.expander('Test Set Confusion Matrix'):
# # st.caption('Test Set Results:')
st.subheader('Test Set Confusion Matrix')
st.image(f'{ROOT_FIG_DIR}/test_confmat_20210404.png')
st.subheader('Prediction Samples')
st.caption('Correctly Classified sample predictions:')
st.image(f'{ROOT_FIG_DIR}/pred_stats.png')
st.caption('Miss Classified sample predictions:')
st.image(f'{ROOT_FIG_DIR}/pred_stats.png')
st.subheader("Class-wise Prediction Distributions")
st.image(f'{ROOT_FIG_DIR}/training_prob_stats.png')