Noumida commited on
Commit
28dfe00
·
verified ·
1 Parent(s): dd3975a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -31
app.py CHANGED
@@ -4,66 +4,62 @@ import torchaudio
4
  import gradio as gr
5
  import spaces
6
  from transformers import AutoModel
7
-
8
  DESCRIPTION = "IndicConformer-600M Multilingual ASR (CTC + RNNT)"
 
9
  LANGUAGE_NAME_TO_CODE = {
10
- "Assamese": "as", "Bengali": "bn", "Bodo": "brx", "Dogri": "doi", "Gujarati": "gu", "Hindi": "hi", "Kannada": "kn", "Kashmiri": "ks",
11
- "Konkani": "kok", "Maithili": "mai", "Malayalam": "ml", "Manipuri": "mni", "Marathi": "mr", "Nepali": "ne", "Odia": "or", "Punjabi": "pa",
12
- "Sanskrit": "sa", "Santali": "sat", "Sindhi": "sd", "Tamil": "ta", "Telugu": "te", "Urdu": "ur"
 
 
 
13
  }
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
16
  model = AutoModel.from_pretrained("ai4bharat/indic-conformer-600m-multilingual", trust_remote_code=True).to(device)
17
  model.eval()
18
 
19
- def chunk_waveform(waveform, sr, max_length_sec=60):
20
- num_samples = waveform.shape[1]
21
- chunk_length = int(max_length_sec * sr)
22
- chunks = []
23
- for start in range(0, num_samples, chunk_length):
24
- end = min(start + chunk_length, num_samples)
25
- chunks.append(waveform[:, start:end])
26
- return chunks
27
-
28
  @spaces.GPU
29
  def transcribe_ctc_and_rnnt(audio_path, language_name):
30
  lang_code = LANGUAGE_NAME_TO_CODE[language_name]
 
 
31
  waveform, sr = torchaudio.load(audio_path)
32
  waveform = waveform.mean(dim=0, keepdim=True) if waveform.shape[0] > 1 else waveform
33
  waveform = torchaudio.functional.resample(waveform, sr, 16000).to(device)
34
- sr = 16000
35
 
36
  try:
37
- chunks = chunk_waveform(waveform, sr, max_length_sec=60) # 60s chunks
38
- ctc_transcripts, rnnt_transcripts = [], []
39
  with torch.no_grad():
40
- for chunk in chunks:
41
- if chunk.shape[1] < sr: # skip too short segments if needed
42
- continue
43
- t_ctc = model(chunk, lang_code, "ctc")
44
- t_rnnt = model(chunk, lang_code, "rnnt")
45
- ctc_transcripts.append(str(t_ctc).strip())
46
- rnnt_transcripts.append(str(t_rnnt).strip())
47
-
48
- ctc_full = " ".join(ctc_transcripts)
49
- rnnt_full = " ".join(rnnt_transcripts)
50
- return ctc_full, rnnt_full
51
  except Exception as e:
52
  return f"Error: {str(e)}", ""
53
 
 
 
 
54
  with gr.Blocks() as demo:
55
  gr.Markdown(f"## {DESCRIPTION}")
56
  with gr.Row():
57
  with gr.Column():
58
  audio = gr.Audio(label="Upload or Record Audio", type="filepath")
59
- lang = gr.Dropdown(label="Select Language", choices=list(LANGUAGE_NAME_TO_CODE.keys()), value="Hindi")
 
 
 
 
60
  transcribe_btn = gr.Button("Transcribe (CTC + RNNT)")
61
  with gr.Column():
62
  gr.Markdown("### CTC Transcription")
63
- ctc_output = gr.Textbox(lines=5)
64
  gr.Markdown("### RNNT Transcription")
65
- rnnt_output = gr.Textbox(lines=5)
 
66
  transcribe_btn.click(fn=transcribe_ctc_and_rnnt, inputs=[audio, lang], outputs=[ctc_output, rnnt_output], api_name="transcribe")
67
 
68
  if __name__ == "__main__":
69
- demo.queue().launch()
 
4
  import gradio as gr
5
  import spaces
6
  from transformers import AutoModel
7
+ #indicconformer
8
  DESCRIPTION = "IndicConformer-600M Multilingual ASR (CTC + RNNT)"
9
+
10
  LANGUAGE_NAME_TO_CODE = {
11
+ "Assamese": "as", "Bengali": "bn", "Bodo": "brx", "Dogri": "doi",
12
+ "Gujarati": "gu", "Hindi": "hi", "Kannada": "kn", "Kashmiri": "ks",
13
+ "Konkani": "kok", "Maithili": "mai", "Malayalam": "ml", "Manipuri": "mni",
14
+ "Marathi": "mr", "Nepali": "ne", "Odia": "or", "Punjabi": "pa",
15
+ "Sanskrit": "sa", "Santali": "sat", "Sindhi": "sd", "Tamil": "ta",
16
+ "Telugu": "te", "Urdu": "ur"
17
  }
18
 
19
  device = "cuda" if torch.cuda.is_available() else "cpu"
20
+
21
+ # Load Indic Conformer model (assumes custom forward handles decoding strategy)
22
  model = AutoModel.from_pretrained("ai4bharat/indic-conformer-600m-multilingual", trust_remote_code=True).to(device)
23
  model.eval()
24
 
 
 
 
 
 
 
 
 
 
25
  @spaces.GPU
26
  def transcribe_ctc_and_rnnt(audio_path, language_name):
27
  lang_code = LANGUAGE_NAME_TO_CODE[language_name]
28
+
29
+ # Load and preprocess audio
30
  waveform, sr = torchaudio.load(audio_path)
31
  waveform = waveform.mean(dim=0, keepdim=True) if waveform.shape[0] > 1 else waveform
32
  waveform = torchaudio.functional.resample(waveform, sr, 16000).to(device)
 
33
 
34
  try:
35
+ # Assume model's forward method takes waveform, language code, and decoding type
 
36
  with torch.no_grad():
37
+ transcription_ctc = model(waveform, lang_code, "ctc")
38
+ transcription_rnnt = model(waveform, lang_code, "rnnt")
 
 
 
 
 
 
 
 
 
39
  except Exception as e:
40
  return f"Error: {str(e)}", ""
41
 
42
+ return transcription_ctc.strip(), transcription_rnnt.strip()
43
+
44
+ # Gradio UI
45
  with gr.Blocks() as demo:
46
  gr.Markdown(f"## {DESCRIPTION}")
47
  with gr.Row():
48
  with gr.Column():
49
  audio = gr.Audio(label="Upload or Record Audio", type="filepath")
50
+ lang = gr.Dropdown(
51
+ label="Select Language",
52
+ choices=list(LANGUAGE_NAME_TO_CODE.keys()),
53
+ value="Hindi"
54
+ )
55
  transcribe_btn = gr.Button("Transcribe (CTC + RNNT)")
56
  with gr.Column():
57
  gr.Markdown("### CTC Transcription")
58
+ ctc_output = gr.Textbox(lines=3)
59
  gr.Markdown("### RNNT Transcription")
60
+ rnnt_output = gr.Textbox(lines=3)
61
+
62
  transcribe_btn.click(fn=transcribe_ctc_and_rnnt, inputs=[audio, lang], outputs=[ctc_output, rnnt_output], api_name="transcribe")
63
 
64
  if __name__ == "__main__":
65
+ demo.queue().launch()