admin commited on
Commit
353de0b
·
1 Parent(s): 4279937
Files changed (7) hide show
  1. .gitattributes +11 -11
  2. .gitignore +6 -0
  3. README.md +1 -1
  4. app.py +436 -0
  5. model.py +145 -0
  6. requirements.txt +6 -0
  7. utils.py +67 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.db* filter=lfs diff=lfs merge=lfs -text
29
+ *.ark* filter=lfs diff=lfs merge=lfs -text
30
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
31
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
32
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
33
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
34
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
35
+ *.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ *.pt
2
+ __pycache__/*
3
+ tmp/*
4
+ flagged/*
5
+ test.py
6
+ rename.sh
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: CTIS
3
- emoji: 👀
4
  colorFrom: indigo
5
  colorTo: blue
6
  sdk: gradio
 
1
  ---
2
  title: CTIS
3
+ emoji: 🪕🎶
4
  colorFrom: indigo
5
  colorTo: blue
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import random
4
+ import shutil
5
+ import librosa
6
+ import warnings
7
+ import numpy as np
8
+ import gradio as gr
9
+ import librosa.display
10
+ import matplotlib.pyplot as plt
11
+ from utils import get_modelist, find_files, embed_img, TEMP_DIR
12
+ from collections import Counter
13
+ from model import EvalNet
14
+
15
+
16
+ TRANSLATE = {
17
+ "C0090": ["大笒", "da4_cen2"],
18
+ "C0091": ["高音横笛", "Treble_heng2_di2"],
19
+ "C0092": ["低音横笛", "Bass_heng2_di2"],
20
+ "C0093": ["中音横笛", "Alto_heng2_di2"],
21
+ "C0094": ["唢呐", "suo3_na"],
22
+ "C0095": ["长唢呐", "chang2_suo3_na"],
23
+ "C0096": ["小筚篥", "Treble_bi4_li"],
24
+ "C0097": ["中音筚篥", "Alto_bi4_li"],
25
+ "C0098": ["低音筚篥", "Bass_bi4_li"],
26
+ "C0099": ["短箫", "duan3_xiao1"],
27
+ "C0100": ["短箫(传统)", "duan3_xiao1_(traditional)"],
28
+ "C0101": ["洞箫", "dong4_xiao1"],
29
+ "C0113": ["尖子号", "jian1_zi3_hao4"],
30
+ "C0114": ["尖子号2", "jian1_zi3_hao4_2"],
31
+ "C0117": ["南音洞箫", "nan2_yin1_dong4_xiao1"],
32
+ "C0123": ["南嗳仔", "nan2_ai1_zai3"],
33
+ "C0124": ["大吹", "da4_chui1"],
34
+ "C0182": ["长号", "chang2_hao4"],
35
+ "C0183": ["老长号", "lao3_chang2_hao4"],
36
+ "C0187": ["高音唢呐", "Treble_suo3_na"],
37
+ "C0188": ["低音唢呐", "Bass_suo3_na"],
38
+ "C0200": ["大芦笙", "da4_lu2_sheng1"],
39
+ "C0201": ["小芦笙", "xiao3_lu2_sheng1"],
40
+ "C0237": ["G调梆笛", "bang1_di2_in_G"],
41
+ "C0243": ["高音键笙", "Treble_jian4_sheng1"],
42
+ "C0244": ["传统笙", "traditional_sheng1"],
43
+ "C0257": ["低音加键唢呐", "Bass_jia1_jian4_suo3_na"],
44
+ "C0259": ["中音加键唢呐", "Alto_jia1_jian4_suo3_na"],
45
+ "C0263": ["中音笙", "Alto_sheng1"],
46
+ "C0264": ["低音笙", "Bass_sheng1"],
47
+ "C0265": ["管子", "guan3_zi"],
48
+ "C0280": ["A调曲笛", "qu3_di2_in_A"],
49
+ "C0281": ["G调新笛", "xin1_di2_in_G"],
50
+ "C0282": ["萧", "xiao1"],
51
+ "C0283": ["埙", "xun1"],
52
+ "C0296": ["唢呐2", "suo3_na_2"],
53
+ "C0303": ["小闷笛", "xiao3_men1_di2"],
54
+ "C0304": ["侗笛", "dong4_di2"],
55
+ "C0305": ["德", "de2"],
56
+ "C0306": ["拉祜族葫芦笙", "la1_hu2_zu2_hu2_lu2_sheng1"],
57
+ "C0308": ["吐良", "tu3_liang2"],
58
+ "C0309": ["葫芦丝", "hu2_lu2_si1"],
59
+ "C0310": ["F调巴乌", "ba1_wu1_in_F"],
60
+ "C0311": ["俄比", "e2_bi3"],
61
+ "C0316": ["侗巴", "dong4_ba1"],
62
+ "D0015": ["扬琴", "yang2_qin2"],
63
+ "D0048": ["低音大锣", "Bass_da4_luo2"],
64
+ "D0049": ["虎音锣", "hu3_yin1_luo2"],
65
+ "D0050": ["小钹", "xiao3_bo1"],
66
+ "D0051": ["钹", "bo1"],
67
+ "D0058": ["提手(板)", "ti2_shou3_(ban3)"],
68
+ "D0060": ["川小锣", "chuan1_xiao3_luo2"],
69
+ "D0061": ["大铛铛", "da4_cheng1_cheng1"],
70
+ "D0062": ["小铛铛", "xiao3_cheng1_cheng1"],
71
+ "D0063": ["二馨", "er4_xin1"],
72
+ "D0064": ["川大钵", "chuan1_da4_bo1"],
73
+ "D0065": ["苏钵", "su1_bo1"],
74
+ "D0066": ["川剧堂鼓", "chuan1_ju4_tang2_gu3"],
75
+ "D0067": ["川铰", "chuan1_jiao3"],
76
+ "D0068": ["川大锣", "chuan1_da4_luo2"],
77
+ "D0069": ["蛮锣", "man2_luo2"],
78
+ "D0070": ["包锣", "bao1_luo2"],
79
+ "D0071": ["引鼓", "yin3_gu3"],
80
+ "D0102": ["上杖鼓", "shang4_zhang4_gu3"],
81
+ "D0103": ["小锣", "xiao3_luo2"],
82
+ "D0104": ["圆锣", "yuan2_luo2"],
83
+ "D0105": ["杖鼓", "zhang4_gu3"],
84
+ "D0125": ["南鼓", "nan2_gu3"],
85
+ "D0126": ["压脚鼓", "ya1_jiao3_gu3"],
86
+ "D0127": ["钟", "zhong1"],
87
+ "D0128": ["草锣", "cao3_luo2"],
88
+ "D0129": ["锣仔", "luo2_zai3"],
89
+ "D0130": ["响盏", "xiang3_zhan3"],
90
+ "D0131": ["小叫", "xiao3_jiao4"],
91
+ "D0132": ["拍", "pai1"],
92
+ "D0137": ["渔鼓", "yu2_gu3"],
93
+ "D0138": ["简板", "jian3_ban3"],
94
+ "D0140": ["脚梆子", "jiao3_bang1_zi"],
95
+ "D0143": ["双铃", "shuang1_ling2"],
96
+ "D0144": ["小叫锣", "xiao3_jiao4_luo2"],
97
+ "D0145": ["拍板", "pai1_ban3"],
98
+ "D0146": ["四宝", "si4_bao3"],
99
+ "D0147": ["响盏2", "xiang3_zhan3_2"],
100
+ "D0172": ["碗碗", "wan3_wan3"],
101
+ "D0173": ["代子", "dai4_zi"],
102
+ "D0176": ["福(新)", "fu2_(reformed)"],
103
+ "D0177": ["禄(新)", "lu4_(reformed)"],
104
+ "D0178": ["寿(新)", "shou4_(reformed)"],
105
+ "D0179": ["宜春三星鼓福鼓老鼓", "yi2_chun1_san1_xing1_gu3_fu2_gu3_(traditional)"],
106
+ "D0180": ["宜春三星鼓禄鼓老鼓", "yi2_chun1_san1_xing1_gu3_lu4_gu3_(traditional)"],
107
+ "D0181": ["宜春三星鼓寿鼓老鼓", "yi2_chun1_san1_xing1_gu3_shou4_gu3_(traditional)"],
108
+ "D0184": ["宜春三星鼓双铛", "yi2_chun1_san1_xing1_gu3_shuang1_ding1"],
109
+ "D0185": ["宜春三星鼓单铛", "yi2_chun1_san1_xing1_gu3_dan1_ding1"],
110
+ "D0186": ["宜春三星鼓镲", "yi2_chun1_san1_xing1_gu3_chao3"],
111
+ "D0241": ["编钟", "bian1_zhong1"],
112
+ "D0242": ["编磬", "bian1_qing4"],
113
+ "D0245": ["南梆子", "nan2_bang1_zi"],
114
+ "D0246": ["北梆子", "bei3_bang1_zi"],
115
+ "D0247": ["碰铃", "peng4_ling2"],
116
+ "D0248": ["中国大鼓", "Chinese_da4_gu3"],
117
+ "D0249": ["花盆鼓", "hua1_pen2_gu3"],
118
+ "D0250": ["小堂鼓", "xiao3_tang2_gu3"],
119
+ "D0251": ["扁鼓", "bian3_gu3"],
120
+ "D0252": ["五音排鼓", "wu3_yin1_pai2_gu3"],
121
+ "D0268": ["草帽镲", "cao3_mao4_chao3"],
122
+ "D0269": ["铙", "nao2"],
123
+ "D0270": ["铙钹", "nao2_bo1"],
124
+ "D0271": ["小镲", "xiao3_chao3"],
125
+ "D0272": ["抄锣", "chao1_luo2"],
126
+ "D0273": ["中虎", "zhong1_hu3"],
127
+ "D0274": ["武锣", "wu3_luo2"],
128
+ "D0275": ["小锣2", "xiao3_luo2_2"],
129
+ "D0276": ["马锣", "ma3_luo2"],
130
+ "D0277": ["木鱼", "mu4_yu2"],
131
+ "D0278": ["板鼓", "ban3_gu3"],
132
+ "D0279": ["云锣", "yun2_luo2"],
133
+ "D0284": ["斗锣", "dou3_luo2"],
134
+ "D0286": ["曲锣", "qu3_luo2"],
135
+ "D0287": ["深波", "shen1_bo1"],
136
+ "D0290": ["大镲", "da4_chao3"],
137
+ "D0298": ["编铓", "bian1_zhang1"],
138
+ "D0299": ["牛铃", "niu2_ling2"],
139
+ "D0315": ["竹排琴", "zhu2_pai2_qin2"],
140
+ "D0325": ["那格拉", "na4_ge2_la1"],
141
+ "D0326": ["库休克", "ku4_xiu1_ke4"],
142
+ "D0327": ["萨巴依", "sa4_ba1_yi1"],
143
+ "D0328": ["手鼓", "shou3_gu3"],
144
+ "L0044": ["锡剧主胡", "xi1_ju4_zhu3_hu2"],
145
+ "L0045": ["扬剧主胡", "yang2_ju4_zhu3_hu2"],
146
+ "L0046": ["扬剧主胡F调", "yang2_ju4_zhu3_hu2_in_F"],
147
+ "L0047": ["扬剧主胡(小西皮)", "yang2_ju4_zhu3_hu2_(xiao3_xi1_pi2)"],
148
+ "L0053": ["广西彩调主胡", "guang3_xi1_cai3_diao4_zhu3_hu2"],
149
+ "L0055": ["牛腿琴", "niu2_tui3_qin2"],
150
+ "L0056": ["壮剧马骨胡D调", "zhuang4_ju4_ma3_gu3_hu2_in_D"],
151
+ "L0072": ["盖板(新)D调", "gai4_ban3_(reformed)_in_D"],
152
+ "L0073": ["盖板(传统)", "gai4_ban3_(traditional)"],
153
+ "L0074": ["壮剧土胡", "zhuang4_ju4_tu3_hu2"],
154
+ "L0075": ["晋剧晋胡", "jin4_ju4_jin4_hu2"],
155
+ "L0076": ["壮剧土胡2", "zhuang4_ju4_tu3_hu2_2"],
156
+ "L0077": ["晋剧二股弦", "jin4_ju4_er4_gu3_xian2"],
157
+ "L0080": ["吕剧坠琴", "lv3_ju4_zhui4_qin2"],
158
+ "L0084": ["奚琴(传统)", "xi1_qin2_(traditional)"],
159
+ "L0085": ["奚琴(改良)", "xi1_qin2_(reformed)"],
160
+ "L0086": ["中音奚琴(改良)", "Alto_xi1_qin2_(reformed)"],
161
+ "L0115": ["莱芜梆子-梆胡", "lai2_wu2_bang1_zi-bang1_hu2"],
162
+ "L0121": ["六角弦", "liu4_jiao3_xian2"],
163
+ "L0122": ["壳仔弦", "ke2_zai3_xian2"],
164
+ "L0133": ["陇剧陇胡(传统)", "long3_ju4_long3_hu2_(traditional)"],
165
+ "L0134": ["陇剧陇胡(改良)D调", "long3_ju4_long3_hu2_(reformed)_in_D"],
166
+ "L0135": ["齐琴", "qi2_qin2"],
167
+ "L0136": ["渔胡", "yu2_hu2"],
168
+ "L0139": ["坠胡", "zhui4_hu2"],
169
+ "L0141": ["越胡", "yue4_hu2"],
170
+ "L0148": ["板胡", "ban3_hu2"],
171
+ "L0149": ["绍剧板胡", "shao4_ju4_ban3_hu2"],
172
+ "L0150": ["宛梆子梆胡", "yuan1_bang1_zi_bang1_hu2"],
173
+ "L0151": ["四弦", "si4_xian2"],
174
+ "L0152": ["滇葫(小二胡)", "dian1_hu2_(xiao3_er4_hu2)"],
175
+ "L0153": ["云南花灯丝弦", "yun2_nan2_hua1_deng1_si1_xian2"],
176
+ "L0154": ["仕胡", "shi4_hu2"],
177
+ "L0155": ["伬胡", "chi4_hu2"],
178
+ "L0156": ["工胡", "gong1_hu2"],
179
+ "L0157": ["大胡", "da4_hu2"],
180
+ "L0158": ["低音伬胡", "Bass_chi4_hu2"],
181
+ "L0160": ["丝弦", "si1_xian2"],
182
+ "L0161": ["滇胡", "dian1_hu2"],
183
+ "L0162": ["襄阳专用胡琴", "xiang1_yang2_zhuan1_yong4_hu2_qin2"],
184
+ "L0163": ["雷胡", "lei2_hu2"],
185
+ "L0164": ["赣胡", "gan4_hu2"],
186
+ "L0165": ["高腔赣胡", "gao1_qiang1_gan4_hu2"],
187
+ "L0166": ["高腔赣胡第2代", "gao1_qiang1_gan4_hu2_2nd_generation"],
188
+ "L0167": ["黔胡", "qian2_hu2"],
189
+ "L0168": ["花胡", "hua1_hu2"],
190
+ "L0169": ["花胡2", "hua1_hu2_2"],
191
+ "L0170": ["二股弦", "er4_gu3_xian2"],
192
+ "L0239": ["高音板胡", "Treble_ban3_hu2"],
193
+ "L0240": ["中音板胡", "Alto_ban3_hu2"],
194
+ "L0256": ["雷琴", "lei2_qin2"],
195
+ "L0266": ["二胡", "er4_hu2"],
196
+ "L0285": ["二弦", "er4_xian2"],
197
+ "L0288": ["椰胡", "ye1_hu2"],
198
+ "L0291": ["扁八角高胡", "bian3_ba1_jiao3_gao1_hu2"],
199
+ "L0292": ["六角高胡", "liu4_jiao3_gao1_hu2"],
200
+ "L0297": ["中胡", "zhong1_hu2"],
201
+ "L0307": ["芦笙", "lu2_sheng1"],
202
+ "L0312": ["牛角胡", "niu2_jiao3_hu2"],
203
+ "L0313": ["佤族独弦琴", "wa3_zu2_du2_xian2_qin2"],
204
+ "L0314": ["葫芦琴", "hu2_lu2_qin2"],
205
+ "T0006": ["陶布舒尔", "tao2_bu4_shu1_er3"],
206
+ "T0007": ["雅托嘎", "ya3_tuo1_ga2"],
207
+ "T0078": ["四股弦", "si4_gu3_xian2"],
208
+ "T0081": ["玄琴", "xuan2_qin2"],
209
+ "T0082": ["伽倻琴(改良)", "jia1_ye2_qin2_(reformed)"],
210
+ "T0083": ["伽倻琴", "jia1_ye2_qin2"],
211
+ "T0087": ["雅筝", "ya3_zheng1"],
212
+ "T0088": ["扬琴2", "yang2_qin2_2"],
213
+ "T0089": ["扬琴3", "yang2_qin2_3"],
214
+ "T0111": ["三弦", "san1_xian2"],
215
+ "T0116": ["八角月琴", "ba1_jiao3_yue4_qin2"],
216
+ "T0159": ["双清", "shuang1_qing1"],
217
+ "T0171": ["月琴", "yue4_qin2"],
218
+ "T0238": ["大阮", "da4_ruan3"],
219
+ "T0254": ["箜篌", "kong1_hou2"],
220
+ "T0255": ["古筝", "gu3_zheng1"],
221
+ "T0260": ["中阮", "zhong1_ruan3"],
222
+ "T0261": ["柳琴", "liu3_qin2"],
223
+ "T0262": ["琵琶", "pi2_pa2"],
224
+ "T0267": ["扬琴4", "yang2_qin2_4"],
225
+ "T0289": ["三弦2", "san1_xian2_2"],
226
+ "T0294": ["南音琵琶", "nan2_yin1_pi2_pa2"],
227
+ "T0295": ["南音三弦", "nan2_yin1_san1_xian2"],
228
+ "T0300": ["澜沧小三弦", "lan2_cang1_xiao3_san1_xian2"],
229
+ "T0301": ["玎", "ding1"],
230
+ "T0302": ["傈傈族奇奔", "li4_li4_zu2_qi2_ben1"],
231
+ "T0317": ["独弦琴", "du2_xian2_qin2"],
232
+ "T0318": ["弹拨尔", "dan4_bo1_er3"],
233
+ "T0319": ["低音热瓦普", "Bass_re4_wa3_pu3"],
234
+ "T0320": ["民间热瓦普", "folk_re4_wa3_pu3"],
235
+ "T0323": ["都它尔", "du1_ta1_er3"],
236
+ }
237
+ CLASSES = list(TRANSLATE.keys())
238
+ SAMPLE_RATE = 44100
239
+
240
+
241
+ def circular_padding(spec: np.ndarray, end: int):
242
+ size = len(spec)
243
+ if end <= size:
244
+ return spec
245
+
246
+ num_padding = end - size
247
+ num_repeat = num_padding // size + int(num_padding % size != 0)
248
+ padding = np.tile(spec, num_repeat)
249
+ return np.concatenate((spec, padding))[:end]
250
+
251
+
252
+ def wav2mel(audio_path: str, width=2, top_db=40):
253
+ os.makedirs(TEMP_DIR, exist_ok=True)
254
+ try:
255
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
256
+ non_silents = librosa.effects.split(y, top_db=top_db)
257
+ y = np.concatenate([y[start:end] for start, end in non_silents])
258
+ total_frames = len(y)
259
+ if total_frames % (width * sr) != 0:
260
+ count = total_frames // (width * sr) + 1
261
+ y = circular_padding(y, count * width * sr)
262
+
263
+ mel_spec = librosa.feature.melspectrogram(y=y, sr=sr)
264
+ log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
265
+ dur = librosa.get_duration(y=y, sr=sr)
266
+ total_frames = log_mel_spec.shape[1]
267
+ step = int(width * total_frames / dur)
268
+ count = int(total_frames / step)
269
+ begin = int(0.5 * (total_frames - count * step))
270
+ end = begin + step * count
271
+ for i in range(begin, end, step):
272
+ librosa.display.specshow(log_mel_spec[:, i : i + step])
273
+ plt.axis("off")
274
+ plt.savefig(
275
+ f"{TEMP_DIR}/{i}.jpg",
276
+ bbox_inches="tight",
277
+ pad_inches=0.0,
278
+ )
279
+ plt.close()
280
+
281
+ except Exception as e:
282
+ print(f"Error converting {audio_path} : {e}")
283
+
284
+
285
+ def wav2cqt(audio_path: str, width=2, top_db=40):
286
+ os.makedirs(TEMP_DIR, exist_ok=True)
287
+ try:
288
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
289
+ non_silents = librosa.effects.split(y, top_db=top_db)
290
+ y = np.concatenate([y[start:end] for start, end in non_silents])
291
+ total_frames = len(y)
292
+ if total_frames % (width * sr) != 0:
293
+ count = total_frames // (width * sr) + 1
294
+ y = circular_padding(y, count * width * sr)
295
+
296
+ cqt_spec = librosa.cqt(y=y, sr=sr)
297
+ log_cqt_spec = librosa.power_to_db(np.abs(cqt_spec) ** 2, ref=np.max)
298
+ dur = librosa.get_duration(y=y, sr=sr)
299
+ total_frames = log_cqt_spec.shape[1]
300
+ step = int(width * total_frames / dur)
301
+ count = int(total_frames / step)
302
+ begin = int(0.5 * (total_frames - count * step))
303
+ end = begin + step * count
304
+ for i in range(begin, end, step):
305
+ librosa.display.specshow(log_cqt_spec[:, i : i + step])
306
+ plt.axis("off")
307
+ plt.savefig(
308
+ f"{TEMP_DIR}/{i}.jpg",
309
+ bbox_inches="tight",
310
+ pad_inches=0.0,
311
+ )
312
+ plt.close()
313
+
314
+ except Exception as e:
315
+ print(f"Error converting {audio_path} : {e}")
316
+
317
+
318
+ def wav2chroma(audio_path: str, width=2, top_db=40):
319
+ os.makedirs(TEMP_DIR, exist_ok=True)
320
+ try:
321
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
322
+ non_silents = librosa.effects.split(y, top_db=top_db)
323
+ y = np.concatenate([y[start:end] for start, end in non_silents])
324
+ total_frames = len(y)
325
+ if total_frames % (width * sr) != 0:
326
+ count = total_frames // (width * sr) + 1
327
+ y = circular_padding(y, count * width * sr)
328
+
329
+ chroma_spec = librosa.feature.chroma_stft(y=y, sr=sr)
330
+ log_chroma_spec = librosa.power_to_db(np.abs(chroma_spec) ** 2, ref=np.max)
331
+ dur = librosa.get_duration(y=y, sr=sr)
332
+ total_frames = log_chroma_spec.shape[1]
333
+ step = int(width * total_frames / dur)
334
+ count = int(total_frames / step)
335
+ begin = int(0.5 * (total_frames - count * step))
336
+ end = begin + step * count
337
+ for i in range(begin, end, step):
338
+ librosa.display.specshow(log_chroma_spec[:, i : i + step])
339
+ plt.axis("off")
340
+ plt.savefig(
341
+ f"{TEMP_DIR}/{i}.jpg",
342
+ bbox_inches="tight",
343
+ pad_inches=0.0,
344
+ )
345
+ plt.close()
346
+
347
+ except Exception as e:
348
+ print(f"Error converting {audio_path} : {e}")
349
+
350
+
351
+ def most_frequent_value(lst: list):
352
+ counter = Counter(lst)
353
+ max_count = max(counter.values())
354
+ for element, count in counter.items():
355
+ if count == max_count:
356
+ return element
357
+
358
+ return None
359
+
360
+
361
+ def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
362
+ if os.path.exists(folder_path):
363
+ shutil.rmtree(folder_path)
364
+
365
+ if not wav_path:
366
+ return None, "请输入音频 Please input an audio!"
367
+
368
+ try:
369
+ model = EvalNet(log_name, len(TRANSLATE)).model
370
+ except Exception as e:
371
+ return None, f"{e}"
372
+
373
+ spec = log_name.split("_")[-3]
374
+ eval("wav2%s" % spec)(wav_path)
375
+ jpgs = find_files(folder_path, ".jpg")
376
+ preds = []
377
+ for jpg in jpgs:
378
+ input = embed_img(jpg)
379
+ output: torch.Tensor = model(input)
380
+ preds.append(torch.max(output.data, 1)[1])
381
+
382
+ pred_id = most_frequent_value(preds)
383
+ return (
384
+ os.path.basename(wav_path),
385
+ f"{TRANSLATE[CLASSES[pred_id]][0]} ({TRANSLATE[CLASSES[pred_id]][1].capitalize()})",
386
+ )
387
+
388
+
389
+ if __name__ == "__main__":
390
+ warnings.filterwarnings("ignore")
391
+ models = get_modelist()
392
+ examples = []
393
+ example_wavs = find_files()
394
+ model_num = len(models)
395
+ for wav in example_wavs:
396
+ examples.append([wav, models[random.randint(0, model_num - 1)]])
397
+
398
+ with gr.Blocks() as demo:
399
+ gr.Interface(
400
+ fn=infer,
401
+ inputs=[
402
+ gr.Audio(label="上传录音 Upload a recording", type="filepath"),
403
+ gr.Dropdown(
404
+ choices=models, label="选择模型 Select a model", value=models[0]
405
+ ),
406
+ ],
407
+ outputs=[
408
+ gr.Textbox(label="音频文件名 Audio filename", show_copy_button=True),
409
+ gr.Textbox(
410
+ label="中国乐器识别 Chinese instrument recognition",
411
+ show_copy_button=True,
412
+ ),
413
+ ],
414
+ examples=examples,
415
+ cache_examples=False,
416
+ flagging_mode="never",
417
+ title="建议录音时长保持在 3s 左右<br>It is recommended to keep the recording length around 3s.",
418
+ )
419
+
420
+ gr.Markdown(
421
+ """
422
+ # 引用 Cite
423
+ ```bibtex
424
+ @dataset{zhaorui_liu_2021_5676893,
425
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
426
+ title = {CCMusic: an Open and Diverse Database for Chinese Music Information Retrieval Research},
427
+ month = {mar},
428
+ year = {2024},
429
+ publisher = {HuggingFace},
430
+ version = {1.2},
431
+ url = {https://huggingface.co/ccmusic-database}
432
+ }
433
+ ```"""
434
+ )
435
+
436
+ demo.launch()
model.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.models as models
4
+ from modelscope.msdatasets import MsDataset
5
+ from utils import MODEL_DIR
6
+
7
+
8
+ class EvalNet:
9
+ model: nn.Module = None
10
+ m_type = "squeezenet"
11
+ input_size = 224
12
+ output_size = 512
13
+
14
+ def __init__(self, log_name: str, cls_num: int):
15
+ saved_model_path = f"{MODEL_DIR}/{log_name}/save.pt"
16
+ m_ver = "_".join(log_name.split("_")[:-3])
17
+ self.m_type, self.input_size = self._model_info(m_ver)
18
+
19
+ if not hasattr(models, m_ver):
20
+ raise Exception("Unsupported model.")
21
+
22
+ self.model = eval("models.%s()" % m_ver)
23
+ linear_output = self._set_outsize()
24
+ self._set_classifier(cls_num, linear_output)
25
+ checkpoint = torch.load(saved_model_path, map_location="cpu")
26
+ if torch.cuda.is_available():
27
+ checkpoint = torch.load(saved_model_path)
28
+
29
+ self.model.load_state_dict(checkpoint, False)
30
+ self.model.eval()
31
+
32
+ def _get_backbone(self, ver: str, backbone_list: list):
33
+ for bb in backbone_list:
34
+ if ver == bb["ver"]:
35
+ return bb
36
+
37
+ print("Backbone name not found, using default option - alexnet.")
38
+ return backbone_list[0]
39
+
40
+ def _model_info(self, m_ver: str):
41
+ backbone_list = MsDataset.load(
42
+ "monetjoe/cv_backbones",
43
+ split="v1",
44
+ )
45
+ backbone = self._get_backbone(m_ver, backbone_list)
46
+ m_type = str(backbone["type"])
47
+ input_size = int(backbone["input_size"])
48
+ return m_type, input_size
49
+
50
+ def _classifier(self, cls_num: int, output_size: int, linear_output: bool):
51
+ q = (1.0 * output_size / cls_num) ** 0.25
52
+ l1 = int(q * cls_num)
53
+ l2 = int(q * l1)
54
+ l3 = int(q * l2)
55
+ if linear_output:
56
+ return torch.nn.Sequential(
57
+ nn.Dropout(),
58
+ nn.Linear(output_size, l3),
59
+ nn.ReLU(inplace=True),
60
+ nn.Dropout(),
61
+ nn.Linear(l3, l2),
62
+ nn.ReLU(inplace=True),
63
+ nn.Dropout(),
64
+ nn.Linear(l2, l1),
65
+ nn.ReLU(inplace=True),
66
+ nn.Linear(l1, cls_num),
67
+ )
68
+
69
+ else:
70
+ return torch.nn.Sequential(
71
+ nn.Dropout(),
72
+ nn.Conv2d(output_size, l3, kernel_size=(1, 1), stride=(1, 1)),
73
+ nn.ReLU(inplace=True),
74
+ nn.AdaptiveAvgPool2d(output_size=(1, 1)),
75
+ nn.Flatten(),
76
+ nn.Linear(l3, l2),
77
+ nn.ReLU(inplace=True),
78
+ nn.Dropout(),
79
+ nn.Linear(l2, l1),
80
+ nn.ReLU(inplace=True),
81
+ nn.Linear(l1, cls_num),
82
+ )
83
+
84
+ def _set_outsize(self):
85
+ for name, module in self.model.named_modules():
86
+ if (
87
+ str(name).__contains__("classifier")
88
+ or str(name).__eq__("fc")
89
+ or str(name).__contains__("head")
90
+ or hasattr(module, "classifier")
91
+ ):
92
+ if isinstance(module, torch.nn.Linear):
93
+ self.output_size = module.in_features
94
+ return True
95
+
96
+ if isinstance(module, torch.nn.Conv2d):
97
+ self.output_size = module.in_channels
98
+ return False
99
+
100
+ return False
101
+
102
+ def _set_classifier(self, cls_num: int, linear_output: bool):
103
+ if self.m_type == "convnext":
104
+ del self.model.classifier[2]
105
+ self.model.classifier = nn.Sequential(
106
+ *list(self.model.classifier)
107
+ + list(self._classifier(cls_num, self.output_size, linear_output))
108
+ )
109
+ return
110
+
111
+ elif self.m_type == "maxvit":
112
+ del self.model.classifier[5]
113
+ self.model.classifier = nn.Sequential(
114
+ *list(self.model.classifier)
115
+ + list(self._classifier(cls_num, self.output_size, linear_output))
116
+ )
117
+ return
118
+
119
+ if hasattr(self.model, "classifier"):
120
+ self.model.classifier = self._classifier(
121
+ cls_num, self.output_size, linear_output
122
+ )
123
+ return
124
+
125
+ elif hasattr(self.model, "fc"):
126
+ self.model.fc = self._classifier(cls_num, self.output_size, linear_output)
127
+ return
128
+
129
+ elif hasattr(self.model, "head"):
130
+ self.model.head = self._classifier(cls_num, self.output_size, linear_output)
131
+ return
132
+
133
+ self.model.heads.head = self._classifier(
134
+ cls_num, self.output_size, linear_output
135
+ )
136
+
137
+ def forward(self, x: torch.Tensor):
138
+ if torch.cuda.is_available():
139
+ x = x.cuda()
140
+ self.model = self.model.cuda()
141
+
142
+ if self.m_type == "googlenet":
143
+ return self.model(x)[0]
144
+ else:
145
+ return self.model(x)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ librosa
2
+ torch
3
+ matplotlib
4
+ torchvision
5
+ pillow
6
+ modelscope
utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.transforms as transforms
4
+ from modelscope import snapshot_download
5
+ from PIL import Image
6
+
7
+ MODEL_DIR = snapshot_download(
8
+ f"ccmusic-database/CTIS",
9
+ cache_dir=f"{os.getcwd()}/__pycache__",
10
+ )
11
+ TEMP_DIR = f"{os.getcwd()}/flagged"
12
+
13
+
14
+ def toCUDA(x):
15
+ if hasattr(x, "cuda"):
16
+ if torch.cuda.is_available():
17
+ return x.cuda()
18
+
19
+ return x
20
+
21
+
22
+ def find_files(folder_path=f"{MODEL_DIR}/examples", ext=".wav"):
23
+ wav_files = []
24
+ for root, _, files in os.walk(folder_path):
25
+ for file in files:
26
+ if file.endswith(ext):
27
+ file_path = os.path.join(root, file)
28
+ wav_files.append(file_path)
29
+
30
+ return wav_files
31
+
32
+
33
+ def get_modelist(model_dir=MODEL_DIR):
34
+ try:
35
+ entries = os.listdir(model_dir)
36
+ except OSError as e:
37
+ print(f"无法访问 {model_dir}: {e}")
38
+ return
39
+
40
+ # 遍历所有条目
41
+ output = []
42
+ for entry in entries:
43
+ # 获取完整路径
44
+ full_path = os.path.join(model_dir, entry)
45
+ # 跳过'.git'文件夹
46
+ if entry == ".git" or entry == "examples":
47
+ print(f"跳过 .git 或 examples 文件夹: {full_path}")
48
+ continue
49
+
50
+ # 检查条目是文件还是目录
51
+ if os.path.isdir(full_path):
52
+ # 打印目录路径
53
+ output.append(os.path.basename(full_path))
54
+
55
+ return output
56
+
57
+
58
+ def embed_img(img_path: str, input_size=224):
59
+ transform = transforms.Compose(
60
+ [
61
+ transforms.Resize([input_size, input_size]),
62
+ transforms.ToTensor(),
63
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
64
+ ]
65
+ )
66
+ img = Image.open(img_path).convert("RGB")
67
+ return transform(img).unsqueeze(0)