Spaces:
Build error
Build error
File size: 1,558 Bytes
95cb32a dacd5b6 95cb32a f7d1996 95cb32a cee7bf9 95cb32a dacd5b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import streamlit as st
# Setting the states
def initialize_states():
# Streamlit state variables
if "model_name" not in st.session_state:
st.session_state.model_name = None
if "layer_name" not in st.session_state:
st.session_state.layer_name = None
if "layer_list" not in st.session_state:
st.session_state.layer_list = None
if "model" not in st.session_state:
st.session_state.model = None
if "feat_extract" not in st.session_state:
st.session_state.feat_extract = None
# Strings
replicate = ":bulb: Choose **ResNet50V2** model and **conv3_block4_out** to get the results as in the example."
credits = ":memo: [Keras example](https://keras.io/examples/vision/visualizing_what_convnets_learn/) by [@fchollet](https://twitter.com/fchollet)."
vit_info = ":star: For Vision Transformers, check the excellent [probing-vits](https://huggingface.co/probing-vits) space."
title = "Visualizing What Convnets Learn"
info_text = """
Models in this demo are pre-trained on the ImageNet dataset.
The simple visualization process involves creation of input images that maximize the activation of specific filters in a target layer.
Such images represent a visualization of the pattern that the filter responds to.
"""
self_credit = "Space by Vrinda Prabhu"
# Constants and globals
IMG_WIDTH = 180
IMG_HEIGHT = 180
VIS_OPTION = {"only the first filter": 0, "the first 64 filters": 64}
ITERATIONS = 30
LEARNING_RATE = 10.0 |