Spaces:
Runtime error
Runtime error
Add gradio files
Browse files- README.md +13 -13
- app.py +49 -0
- packages.txt +2 -0
- requirements.txt +6 -0
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
-
title: Luganda
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: blue
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
|
@@ -10,28 +10,28 @@ pinned: false
|
|
| 10 |
|
| 11 |
# Configuration
|
| 12 |
|
| 13 |
-
`title`: _string_
|
| 14 |
Display title for the Space
|
| 15 |
|
| 16 |
-
`emoji`: _string_
|
| 17 |
Space emoji (emoji-only character allowed)
|
| 18 |
|
| 19 |
-
`colorFrom`: _string_
|
| 20 |
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 21 |
|
| 22 |
-
`colorTo`: _string_
|
| 23 |
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 24 |
|
| 25 |
-
`sdk`: _string_
|
| 26 |
-
Can be either `gradio
|
| 27 |
|
| 28 |
-
`sdk_version` : _string_
|
| 29 |
Only applicable for `streamlit` SDK.
|
| 30 |
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
| 31 |
|
| 32 |
-
`app_file`: _string_
|
| 33 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code
|
| 34 |
Path is relative to the root of the repository.
|
| 35 |
|
| 36 |
-
`pinned`: _boolean_
|
| 37 |
Whether the Space stays on top of your list.
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Luganda ASR
|
| 3 |
+
emoji: π
|
| 4 |
colorFrom: blue
|
| 5 |
+
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
|
|
|
| 10 |
|
| 11 |
# Configuration
|
| 12 |
|
| 13 |
+
`title`: _string_
|
| 14 |
Display title for the Space
|
| 15 |
|
| 16 |
+
`emoji`: _string_
|
| 17 |
Space emoji (emoji-only character allowed)
|
| 18 |
|
| 19 |
+
`colorFrom`: _string_
|
| 20 |
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 21 |
|
| 22 |
+
`colorTo`: _string_
|
| 23 |
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 24 |
|
| 25 |
+
`sdk`: _string_
|
| 26 |
+
Can be either `gradio` or `streamlit`
|
| 27 |
|
| 28 |
+
`sdk_version` : _string_
|
| 29 |
Only applicable for `streamlit` SDK.
|
| 30 |
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
| 31 |
|
| 32 |
+
`app_file`: _string_
|
| 33 |
+
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
| 34 |
Path is relative to the root of the repository.
|
| 35 |
|
| 36 |
+
`pinned`: _boolean_
|
| 37 |
Whether the Space stays on top of your list.
|
app.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import soundfile as sf
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import sox
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def convert(inputfile, outfile):
|
| 10 |
+
sox_tfm = sox.Transformer()
|
| 11 |
+
sox_tfm.set_output_format(
|
| 12 |
+
file_type="wav", channels=1, encoding="signed-integer", rate=16000, bits=16
|
| 13 |
+
)
|
| 14 |
+
sox_tfm.build(inputfile, outfile)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
api_token = os.getenv("API_TOKEN")
|
| 18 |
+
model_name = "indonesian-nlp/wav2vec2-luganda"
|
| 19 |
+
processor = Wav2Vec2Processor.from_pretrained(model_name, use_auth_token=api_token)
|
| 20 |
+
model = Wav2Vec2ForCTC.from_pretrained(model_name, use_auth_token=api_token)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def parse_transcription(wav_file):
|
| 24 |
+
filename = wav_file.name.split('.')[0]
|
| 25 |
+
convert(wav_file.name, filename + "16k.wav")
|
| 26 |
+
speech, _ = sf.read(filename + "16k.wav")
|
| 27 |
+
input_values = processor(speech, sampling_rate=16_000, return_tensors="pt").input_values
|
| 28 |
+
logits = model(input_values).logits
|
| 29 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
| 30 |
+
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
|
| 31 |
+
return transcription
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
output = gr.outputs.Textbox(label="The transcript")
|
| 35 |
+
|
| 36 |
+
input_ = gr.inputs.Audio(source="microphone", type="file")
|
| 37 |
+
|
| 38 |
+
gr.Interface(parse_transcription, inputs=input_, outputs=[output],
|
| 39 |
+
analytics_enabled=False,
|
| 40 |
+
show_tips=False,
|
| 41 |
+
theme='huggingface',
|
| 42 |
+
layout='vertical',
|
| 43 |
+
title="Automatic Speech Recognition for Luganda",
|
| 44 |
+
description="Speech Recognition Live Demo for Luganda",
|
| 45 |
+
article="This demo was built for the "
|
| 46 |
+
"<a href='https://zindi.africa/competitions/mozilla-luganda-automatic-speech-recognition' target='_blank'>Mozilla Luganda Automatic Speech Recognition Competition</a>. "
|
| 47 |
+
"It uses the <a href='https://huggingface.co/indonesian-nlp/wav2vec2-luganda' target='_blank'>indonesian-nlp/wav2vec2-luganda</a> model "
|
| 48 |
+
"which was fine-tuned on Luganda Common Voice speech datasets.",
|
| 49 |
+
enable_queue=True).launch( inline=False)
|
packages.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
libsndfile1
|
| 2 |
+
sox
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
soundfile
|
| 3 |
+
torch
|
| 4 |
+
transformers
|
| 5 |
+
sox
|
| 6 |
+
sentencepiece
|