Spaces:
Runtime error
Runtime error
Added explanations
Browse files- Gradio_app.ipynb +87 -9
- app.py +3 -1
Gradio_app.ipynb
CHANGED
@@ -17,14 +17,14 @@
|
|
17 |
},
|
18 |
{
|
19 |
"cell_type": "code",
|
20 |
-
"execution_count":
|
21 |
"metadata": {},
|
22 |
"outputs": [
|
23 |
{
|
24 |
"name": "stdout",
|
25 |
"output_type": "stream",
|
26 |
"text": [
|
27 |
-
"Running on local URL: http://127.0.0.1:
|
28 |
"\n",
|
29 |
"To create a public link, set `share=True` in `launch()`.\n"
|
30 |
]
|
@@ -32,7 +32,7 @@
|
|
32 |
{
|
33 |
"data": {
|
34 |
"text/html": [
|
35 |
-
"<div><iframe src=\"http://127.0.0.1:
|
36 |
],
|
37 |
"text/plain": [
|
38 |
"<IPython.core.display.HTML object>"
|
@@ -45,9 +45,84 @@
|
|
45 |
"data": {
|
46 |
"text/plain": []
|
47 |
},
|
48 |
-
"execution_count":
|
49 |
"metadata": {},
|
50 |
"output_type": "execute_result"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
}
|
52 |
],
|
53 |
"source": [
|
@@ -446,11 +521,14 @@
|
|
446 |
" button.click(mark_phases, inputs=[inputs, upload], outputs=outputs)\n",
|
447 |
" \n",
|
448 |
" with gr.Tab(\"Select earthquake from catalogue\"):\n",
|
449 |
-
"
|
450 |
-
"
|
451 |
-
"
|
452 |
-
"
|
453 |
-
"
|
|
|
|
|
|
|
454 |
" \"\"\")\n",
|
455 |
" with gr.Row(): \n",
|
456 |
" client_inputs = gr.Dropdown(\n",
|
|
|
17 |
},
|
18 |
{
|
19 |
"cell_type": "code",
|
20 |
+
"execution_count": 32,
|
21 |
"metadata": {},
|
22 |
"outputs": [
|
23 |
{
|
24 |
"name": "stdout",
|
25 |
"output_type": "stream",
|
26 |
"text": [
|
27 |
+
"Running on local URL: http://127.0.0.1:7878\n",
|
28 |
"\n",
|
29 |
"To create a public link, set `share=True` in `launch()`.\n"
|
30 |
]
|
|
|
32 |
{
|
33 |
"data": {
|
34 |
"text/html": [
|
35 |
+
"<div><iframe src=\"http://127.0.0.1:7878/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
36 |
],
|
37 |
"text/plain": [
|
38 |
"<IPython.core.display.HTML object>"
|
|
|
45 |
"data": {
|
46 |
"text/plain": []
|
47 |
},
|
48 |
+
"execution_count": 32,
|
49 |
"metadata": {},
|
50 |
"output_type": "execute_result"
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"name": "stdout",
|
54 |
+
"output_type": "stream",
|
55 |
+
"text": [
|
56 |
+
"Starting to download inventory\n",
|
57 |
+
"Finished downloading inventory\n",
|
58 |
+
"Processing CI.CCC...\n",
|
59 |
+
"Downloading waveform\n",
|
60 |
+
"Skipping CI_CCC_2019-07-04T17:33:40.494920Z\n",
|
61 |
+
"Processing CI.CLC...\n",
|
62 |
+
"Processing CI.JRC2...\n",
|
63 |
+
"Reading cached waveform\n",
|
64 |
+
"Added CI.JRC2 to the list of waveforms\n",
|
65 |
+
"Processing CI.LRL...\n",
|
66 |
+
"Reading cached waveform\n",
|
67 |
+
"Added CI.LRL to the list of waveforms\n",
|
68 |
+
"Processing CI.MPM...\n",
|
69 |
+
"Reading cached waveform\n",
|
70 |
+
"Processing CI.Q0072...\n",
|
71 |
+
"Reading cached waveform\n",
|
72 |
+
"Processing CI.SLA...\n",
|
73 |
+
"Reading cached waveform\n",
|
74 |
+
"Added CI.SLA to the list of waveforms\n",
|
75 |
+
"Processing CI.SRT...\n",
|
76 |
+
"Reading cached waveform\n",
|
77 |
+
"Added CI.SRT to the list of waveforms\n",
|
78 |
+
"Processing CI.TOW2...\n",
|
79 |
+
"Reading cached waveform\n",
|
80 |
+
"Added CI.TOW2 to the list of waveforms\n",
|
81 |
+
"Processing CI.WBM...\n",
|
82 |
+
"Downloading waveform\n",
|
83 |
+
"Skipping CI_WBM_2019-07-04T17:33:40.063616Z\n",
|
84 |
+
"Processing CI.WCS2...\n",
|
85 |
+
"Downloading waveform\n",
|
86 |
+
"Skipping CI_WCS2_2019-07-04T17:33:40.200958Z\n",
|
87 |
+
"Processing CI.WMF...\n",
|
88 |
+
"Reading cached waveform\n",
|
89 |
+
"Added CI.WMF to the list of waveforms\n",
|
90 |
+
"Processing CI.WNM...\n",
|
91 |
+
"Reading cached waveform\n",
|
92 |
+
"Processing CI.WRC2...\n",
|
93 |
+
"Downloading waveform\n",
|
94 |
+
"Skipping CI_WRC2_2019-07-04T17:33:38.698099Z\n",
|
95 |
+
"Processing CI.WRV2...\n",
|
96 |
+
"Reading cached waveform\n",
|
97 |
+
"Processing CI.WVP2...\n",
|
98 |
+
"Downloading waveform\n",
|
99 |
+
"Skipping CI_WVP2_2019-07-04T17:33:39.650402Z\n",
|
100 |
+
"Processing NP.1809...\n",
|
101 |
+
"Reading cached waveform\n",
|
102 |
+
"Processing NP.5419...\n",
|
103 |
+
"Reading cached waveform\n",
|
104 |
+
"Processing PB.B916...\n",
|
105 |
+
"Reading cached waveform\n",
|
106 |
+
"Processing PB.B917...\n",
|
107 |
+
"Reading cached waveform\n",
|
108 |
+
"Processing PB.B918...\n",
|
109 |
+
"Reading cached waveform\n",
|
110 |
+
"Processing PB.B921...\n",
|
111 |
+
"Reading cached waveform\n",
|
112 |
+
"Starting to run predictions\n"
|
113 |
+
]
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"name": "stderr",
|
117 |
+
"output_type": "stream",
|
118 |
+
"text": [
|
119 |
+
"/var/folders/_g/3q5q8_dj0ydcpktxlwxb5vrh0000gq/T/ipykernel_27324/2440224661.py:224: FutureWarning: The input object of type 'Tensor' is an array-like implementing one of the corresponding protocols (`__array__`, `__array_interface__` or `__array_struct__`); but not a sequence (or 0-D). In the future, this object will be coerced as if it was first converted using `np.array(obj)`. To retain the old behaviour, you have to either modify the type 'Tensor', or assign to an empty array created with `np.empty(correct_shape, dtype=object)`.\n",
|
120 |
+
" waveforms = np.array(waveforms)[selection_indexes]\n",
|
121 |
+
"/var/folders/_g/3q5q8_dj0ydcpktxlwxb5vrh0000gq/T/ipykernel_27324/2440224661.py:224: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.\n",
|
122 |
+
" waveforms = np.array(waveforms)[selection_indexes]\n",
|
123 |
+
"/var/folders/_g/3q5q8_dj0ydcpktxlwxb5vrh0000gq/T/ipykernel_27324/2440224661.py:231: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
|
124 |
+
" waveforms = [torch.tensor(waveform) for waveform in waveforms]\n"
|
125 |
+
]
|
126 |
}
|
127 |
],
|
128 |
"source": [
|
|
|
521 |
" button.click(mark_phases, inputs=[inputs, upload], outputs=outputs)\n",
|
522 |
" \n",
|
523 |
" with gr.Tab(\"Select earthquake from catalogue\"):\n",
|
524 |
+
"\n",
|
525 |
+
" gr.HTML(\"\"\"\n",
|
526 |
+
" <div style=\"padding: 20px; border-radius: 10px; font-size: 16px;\">\n",
|
527 |
+
" <p style=\"font-weight: bold; font-size: 24px; margin-bottom: 20px;\">Using PhaseHunter to Analyze Seismic Waveforms</p>\n",
|
528 |
+
" <p>Select an earthquake from the global earthquake catalogue and the app will download the waveform from the FDSN client of your choice. The app will use a velocity model of your choice to select appropriate time windows for each station within a specified radius of the earthquake.</p>\n",
|
529 |
+
" <p>The app will then analyze the waveforms and mark the detected phases on the waveform. Pick data for each waveform is reported in seconds from the start of the waveform.</p>\n",
|
530 |
+
" <p>Velocities are derived from distance and travel time determined by PhaseHunter picks (<span style=\"font-style: italic;\">v = distance/predicted_pick_time</span>). The background of the velocity plot is colored by DEM.</p>\n",
|
531 |
+
" </div>\n",
|
532 |
" \"\"\")\n",
|
533 |
" with gr.Row(): \n",
|
534 |
" client_inputs = gr.Dropdown(\n",
|
app.py
CHANGED
@@ -394,7 +394,9 @@ with gr.Blocks() as demo:
|
|
394 |
|
395 |
with gr.Tab("Select earthquake from catalogue"):
|
396 |
gr.Markdown("""
|
397 |
-
Select an earthquake from the global earthquake catalogue and the app will download the waveform from the FDSN client of your choice.
|
|
|
|
|
398 |
Pick data for each waveform is reported in seconds from the start of the waveform.
|
399 |
Velocities are derived from distance and travel time determined by PhaseHunter picks ($v = \mathrm{distance}/\mathrm{predicted_pick_time}$).
|
400 |
Backround of velocity plot is colored by DEM.
|
|
|
394 |
|
395 |
with gr.Tab("Select earthquake from catalogue"):
|
396 |
gr.Markdown("""
|
397 |
+
Select an earthquake from the global earthquake catalogue and the app will download the waveform from the FDSN client of your choice.
|
398 |
+
The app will use a velocity model of your choice to select appropriate time windows for each station within specify radius of the earthquake.
|
399 |
+
The app will then analyze the waveforms and mark the detected phases on the waveform.
|
400 |
Pick data for each waveform is reported in seconds from the start of the waveform.
|
401 |
Velocities are derived from distance and travel time determined by PhaseHunter picks ($v = \mathrm{distance}/\mathrm{predicted_pick_time}$).
|
402 |
Backround of velocity plot is colored by DEM.
|