init
Browse files- attach_speaker_embedding_s2s.py +3 -102
- speaker_embedding_metavoice.py +104 -0
- test_s2s.sh +1 -1
- tokenize_dataset_s2s.py +1 -1
- encodec_audio_tokenizer.py → tokenizer_encodec.py +0 -0
attach_speaker_embedding_s2s.py
CHANGED
|
@@ -1,21 +1,13 @@
|
|
| 1 |
-
import subprocess
|
| 2 |
-
from os.path import join as p_join
|
| 3 |
-
from typing import Optional
|
| 4 |
-
|
| 5 |
-
import librosa
|
| 6 |
-
from librosa import feature
|
| 7 |
-
import numpy as np
|
| 8 |
-
from torch import nn
|
| 9 |
-
|
| 10 |
-
|
| 11 |
import os
|
| 12 |
from os.path import expanduser
|
| 13 |
|
| 14 |
import shutil
|
| 15 |
-
import torch
|
| 16 |
from soundfile import LibsndfileError
|
| 17 |
from datasets import load_dataset, DatasetDict, Audio
|
| 18 |
|
|
|
|
|
|
|
|
|
|
| 19 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
| 20 |
sides = set(direction.split("-"))
|
| 21 |
dataset_id = os.getenv("DATASET_ID", 0)
|
|
@@ -24,97 +16,6 @@ hf_org = os.getenv("HF_ORG", "asahi417")
|
|
| 24 |
hf_dataset = os.getenv("HF_DATASET", f"seamless-align-{direction}")
|
| 25 |
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
| 26 |
audio_loader = Audio()
|
| 27 |
-
|
| 28 |
-
checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt"
|
| 29 |
-
model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt")
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
def wget(url: str, output_file: Optional[str] = None):
|
| 33 |
-
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 34 |
-
subprocess.run(["wget", url, "-O", output_file])
|
| 35 |
-
if not os.path.exists(output_file):
|
| 36 |
-
raise ValueError(f"failed to download {url}")
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
class MetaVoiceSE(nn.Module):
|
| 40 |
-
|
| 41 |
-
mel_window_length = 25
|
| 42 |
-
mel_window_step = 10
|
| 43 |
-
mel_n_channels = 40
|
| 44 |
-
sampling_rate = 16000
|
| 45 |
-
partials_n_frames = 160
|
| 46 |
-
model_hidden_size = 256
|
| 47 |
-
model_embedding_size = 256
|
| 48 |
-
model_num_layers = 3
|
| 49 |
-
|
| 50 |
-
def __init__(self):
|
| 51 |
-
super().__init__()
|
| 52 |
-
if not os.path.exists(model_weight):
|
| 53 |
-
wget(checkpoint_url, model_weight)
|
| 54 |
-
# Define the network
|
| 55 |
-
self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True)
|
| 56 |
-
self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size)
|
| 57 |
-
self.relu = nn.ReLU()
|
| 58 |
-
# Load weight
|
| 59 |
-
self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False)
|
| 60 |
-
# Get the target device
|
| 61 |
-
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 62 |
-
self.to(self.device)
|
| 63 |
-
self.eval()
|
| 64 |
-
|
| 65 |
-
def compute_partial_slices(self, n_samples: int, rate, min_coverage):
|
| 66 |
-
# Compute how many frames separate two partial utterances
|
| 67 |
-
samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000))
|
| 68 |
-
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
|
| 69 |
-
frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame))
|
| 70 |
-
# Compute the slices
|
| 71 |
-
wav_slices, mel_slices = [], []
|
| 72 |
-
steps = max(1, n_frames - self.partials_n_frames + frame_step + 1)
|
| 73 |
-
for i in range(0, steps, frame_step):
|
| 74 |
-
mel_range = np.array([i, i + self.partials_n_frames])
|
| 75 |
-
wav_range = mel_range * samples_per_frame
|
| 76 |
-
mel_slices.append(slice(*mel_range))
|
| 77 |
-
wav_slices.append(slice(*wav_range))
|
| 78 |
-
# Evaluate whether extra padding is warranted or not
|
| 79 |
-
last_wav_range = wav_slices[-1]
|
| 80 |
-
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
|
| 81 |
-
if coverage < min_coverage and len(mel_slices) > 1:
|
| 82 |
-
return wav_slices[:-1], mel_slices[:-1]
|
| 83 |
-
return wav_slices, mel_slices
|
| 84 |
-
|
| 85 |
-
def get_speaker_embedding(self,
|
| 86 |
-
wav: np.ndarray,
|
| 87 |
-
sampling_rate: Optional[int] = None,
|
| 88 |
-
rate: float = 1.3,
|
| 89 |
-
min_coverage: float = 0.75) -> np.ndarray:
|
| 90 |
-
if sampling_rate != self.sampling_rate:
|
| 91 |
-
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate)
|
| 92 |
-
wav, _ = librosa.effects.trim(wav, top_db=20)
|
| 93 |
-
wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
|
| 94 |
-
max_wave_length = wav_slices[-1].stop
|
| 95 |
-
if max_wave_length >= len(wav):
|
| 96 |
-
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
|
| 97 |
-
# Wav -> Mel spectrogram
|
| 98 |
-
frames = feature.melspectrogram(
|
| 99 |
-
y=wav,
|
| 100 |
-
sr=self.sampling_rate,
|
| 101 |
-
n_fft=int(self.sampling_rate * self.mel_window_length / 1000),
|
| 102 |
-
hop_length=int(self.sampling_rate * self.mel_window_step / 1000),
|
| 103 |
-
n_mels=self.mel_n_channels,
|
| 104 |
-
)
|
| 105 |
-
mel = frames.astype(np.float32).T
|
| 106 |
-
mel = np.array([mel[s] for s in mel_slices])
|
| 107 |
-
# inference
|
| 108 |
-
with torch.no_grad():
|
| 109 |
-
mel = torch.from_numpy(mel).to(self.device)
|
| 110 |
-
_, (hidden, _) = self.lstm(mel)
|
| 111 |
-
embeds_raw = self.relu(self.linear(hidden[-1]))
|
| 112 |
-
partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
|
| 113 |
-
partial_embeds = partial_embeds.cpu().numpy()
|
| 114 |
-
raw_embed = np.mean(partial_embeds, axis=0)
|
| 115 |
-
return raw_embed / np.linalg.norm(raw_embed, 2)
|
| 116 |
-
|
| 117 |
-
|
| 118 |
speaker_embedder = MetaVoiceSE()
|
| 119 |
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
from os.path import expanduser
|
| 3 |
|
| 4 |
import shutil
|
|
|
|
| 5 |
from soundfile import LibsndfileError
|
| 6 |
from datasets import load_dataset, DatasetDict, Audio
|
| 7 |
|
| 8 |
+
from speaker_embedding_metavoice import MetaVoiceSE
|
| 9 |
+
|
| 10 |
+
|
| 11 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
| 12 |
sides = set(direction.split("-"))
|
| 13 |
dataset_id = os.getenv("DATASET_ID", 0)
|
|
|
|
| 16 |
hf_dataset = os.getenv("HF_DATASET", f"seamless-align-{direction}")
|
| 17 |
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
| 18 |
audio_loader = Audio()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
speaker_embedder = MetaVoiceSE()
|
| 20 |
|
| 21 |
|
speaker_embedding_metavoice.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Speaker embedding obtained via speaker verification training.
|
| 2 |
+
- feature dimension: 256
|
| 3 |
+
- source: https://github.com/metavoiceio/metavoice-src
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import subprocess
|
| 7 |
+
from os.path import join as p_join
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
import librosa
|
| 11 |
+
from librosa import feature
|
| 12 |
+
import numpy as np
|
| 13 |
+
import torch
|
| 14 |
+
from torch import nn
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt"
|
| 18 |
+
model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def wget(url: str, output_file: Optional[str] = None):
|
| 22 |
+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 23 |
+
subprocess.run(["wget", url, "-O", output_file])
|
| 24 |
+
if not os.path.exists(output_file):
|
| 25 |
+
raise ValueError(f"failed to download {url}")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class MetaVoiceSE(nn.Module):
|
| 29 |
+
|
| 30 |
+
mel_window_length = 25
|
| 31 |
+
mel_window_step = 10
|
| 32 |
+
mel_n_channels = 40
|
| 33 |
+
sampling_rate = 16000
|
| 34 |
+
partials_n_frames = 160
|
| 35 |
+
model_hidden_size = 256
|
| 36 |
+
model_embedding_size = 256
|
| 37 |
+
model_num_layers = 3
|
| 38 |
+
|
| 39 |
+
def __init__(self):
|
| 40 |
+
super().__init__()
|
| 41 |
+
if not os.path.exists(model_weight):
|
| 42 |
+
wget(checkpoint_url, model_weight)
|
| 43 |
+
# Define the network
|
| 44 |
+
self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True)
|
| 45 |
+
self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size)
|
| 46 |
+
self.relu = nn.ReLU()
|
| 47 |
+
# Load weight
|
| 48 |
+
self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False)
|
| 49 |
+
# Get the target device
|
| 50 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 51 |
+
self.to(self.device)
|
| 52 |
+
self.eval()
|
| 53 |
+
|
| 54 |
+
def compute_partial_slices(self, n_samples: int, rate, min_coverage):
|
| 55 |
+
# Compute how many frames separate two partial utterances
|
| 56 |
+
samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000))
|
| 57 |
+
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
|
| 58 |
+
frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame))
|
| 59 |
+
# Compute the slices
|
| 60 |
+
wav_slices, mel_slices = [], []
|
| 61 |
+
steps = max(1, n_frames - self.partials_n_frames + frame_step + 1)
|
| 62 |
+
for i in range(0, steps, frame_step):
|
| 63 |
+
mel_range = np.array([i, i + self.partials_n_frames])
|
| 64 |
+
wav_range = mel_range * samples_per_frame
|
| 65 |
+
mel_slices.append(slice(*mel_range))
|
| 66 |
+
wav_slices.append(slice(*wav_range))
|
| 67 |
+
# Evaluate whether extra padding is warranted or not
|
| 68 |
+
last_wav_range = wav_slices[-1]
|
| 69 |
+
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
|
| 70 |
+
if coverage < min_coverage and len(mel_slices) > 1:
|
| 71 |
+
return wav_slices[:-1], mel_slices[:-1]
|
| 72 |
+
return wav_slices, mel_slices
|
| 73 |
+
|
| 74 |
+
def get_speaker_embedding(self,
|
| 75 |
+
wav: np.ndarray,
|
| 76 |
+
sampling_rate: Optional[int] = None,
|
| 77 |
+
rate: float = 1.3,
|
| 78 |
+
min_coverage: float = 0.75) -> np.ndarray:
|
| 79 |
+
if sampling_rate != self.sampling_rate:
|
| 80 |
+
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate)
|
| 81 |
+
wav, _ = librosa.effects.trim(wav, top_db=20)
|
| 82 |
+
wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
|
| 83 |
+
max_wave_length = wav_slices[-1].stop
|
| 84 |
+
if max_wave_length >= len(wav):
|
| 85 |
+
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
|
| 86 |
+
# Wav -> Mel spectrogram
|
| 87 |
+
frames = feature.melspectrogram(
|
| 88 |
+
y=wav,
|
| 89 |
+
sr=self.sampling_rate,
|
| 90 |
+
n_fft=int(self.sampling_rate * self.mel_window_length / 1000),
|
| 91 |
+
hop_length=int(self.sampling_rate * self.mel_window_step / 1000),
|
| 92 |
+
n_mels=self.mel_n_channels,
|
| 93 |
+
)
|
| 94 |
+
mel = frames.astype(np.float32).T
|
| 95 |
+
mel = np.array([mel[s] for s in mel_slices])
|
| 96 |
+
# inference
|
| 97 |
+
with torch.no_grad():
|
| 98 |
+
mel = torch.from_numpy(mel).to(self.device)
|
| 99 |
+
_, (hidden, _) = self.lstm(mel)
|
| 100 |
+
embeds_raw = self.relu(self.linear(hidden[-1]))
|
| 101 |
+
partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
|
| 102 |
+
partial_embeds = partial_embeds.cpu().numpy()
|
| 103 |
+
raw_embed = np.mean(partial_embeds, axis=0)
|
| 104 |
+
return raw_embed / np.linalg.norm(raw_embed, 2)
|
test_s2s.sh
CHANGED
|
@@ -6,7 +6,7 @@ export LINE_NO_END=10
|
|
| 6 |
export HF_DATASET="experiment-process-seamless-align"
|
| 7 |
python fetch_dataset_s2s.py
|
| 8 |
# tokenize
|
| 9 |
-
export DATASET_ID=
|
| 10 |
export DIRECTION="enA-jaA"
|
| 11 |
export HF_DATASET="experiment-process-seamless-align"
|
| 12 |
python tokenize_dataset_s2s.py
|
|
|
|
| 6 |
export HF_DATASET="experiment-process-seamless-align"
|
| 7 |
python fetch_dataset_s2s.py
|
| 8 |
# tokenize
|
| 9 |
+
export DATASET_ID=test_tokenized
|
| 10 |
export DIRECTION="enA-jaA"
|
| 11 |
export HF_DATASET="experiment-process-seamless-align"
|
| 12 |
python tokenize_dataset_s2s.py
|
tokenize_dataset_s2s.py
CHANGED
|
@@ -5,7 +5,7 @@ import shutil
|
|
| 5 |
import torch
|
| 6 |
from soundfile import LibsndfileError
|
| 7 |
from datasets import load_dataset, DatasetDict, Audio
|
| 8 |
-
from
|
| 9 |
|
| 10 |
|
| 11 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
|
|
|
| 5 |
import torch
|
| 6 |
from soundfile import LibsndfileError
|
| 7 |
from datasets import load_dataset, DatasetDict, Audio
|
| 8 |
+
from tokenizer_encodec import EncodecTokenizer
|
| 9 |
|
| 10 |
|
| 11 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
encodec_audio_tokenizer.py → tokenizer_encodec.py
RENAMED
|
File without changes
|