Skip to content

Commit

Permalink
bugfix: beatrice load
Browse files Browse the repository at this point in the history
  • Loading branch information
w-okada committed Nov 8, 2023
1 parent 3512bbb commit d03132d
Show file tree
Hide file tree
Showing 12 changed files with 525 additions and 310 deletions.
1 change: 0 additions & 1 deletion client/demo/build-voice-changer-js.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
# cp -r ~/git-work/voice-changer-js/lib/package.json node_modules/@dannadori/voice-changer-js/
# cp -r ~/git-work/voice-changer-js/lib/dist node_modules/@dannadori/voice-changer-js/


cd ~/git-work/voice-changer-js/lib/ ; npm run build:prod; cd -
rm -rf node_modules/@dannadori/voice-changer-js
mkdir -p node_modules/@dannadori/voice-changer-js/dist
Expand Down
37 changes: 24 additions & 13 deletions client/demo/src/001_provider/001_AppStateProvider.tsx
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import { ClientState } from "@dannadori/voice-changer-client-js";
import { VoiceChangerJSClient } from "@dannadori/voice-changer-js";
import React, { useContext, useEffect, useRef } from "react";
import { ReactNode } from "react";
import { useVCClient } from "../001_globalHooks/001_useVCClient";
import { useAppRoot } from "./001_AppRootProvider";
import { useMessageBuilder } from "../hooks/useMessageBuilder";

import { VoiceChangerJSClient } from "./VoiceChangerJSClient";

type Props = {
children: ReactNode;
};
Expand Down Expand Up @@ -58,22 +57,34 @@ export const AppStateProvider = ({ children }: Props) => {

// useEffect(() => {
// if (clientState.clientState.initialized) {
// const baseUrl = "https://192.168.0.247:18888";
// // const modelUrl = `${baseUrl}/models/rvc2v_40k_f0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvc2v_40k_nof0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvc2v_16k_f0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvcv2_amitaro_v2_40k_f0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvcv2_amitaro_v2_40k_nof0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvcv2_amitaro_v2_32k_f0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvcv2_amitaro_v2_32k_nof0_24000.bin`;

// // const modelUrl = `${baseUrl}/models/rvcv1_amitaro_v1_32k_f0_24000.bin`;
// const modelUrl = `${baseUrl}/models/rvcv1_amitaro_v1_32k_nof0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvcv1_amitaro_v1_40k_f0_24000.bin`;
// // const modelUrl = `${baseUrl}/models/rvcv1_amitaro_v1_40k_nof0_24000.bin`;

// voiceChangerJSClient.current = new VoiceChangerJSClient();
// voiceChangerJSClient.current.initialize();
// voiceChangerJSClient.current.initialize(
// {
// baseUrl: baseUrl,
// inputSamplingRate: 48000,
// outputSamplingRate: 48000,
// },
// modelUrl,
// );
// clientState.clientState.setInternalAudioProcessCallback({
// processAudio: async (data: Uint8Array) => {
// console.log("[CLIENTJS] start --------------------------------------");
// const audioF32 = new Float32Array(data.buffer);
// const converted = await voiceChangerJSClient.current!.convert(audioF32);

// let audio_int16_out = new Int16Array(converted.length);
// for (let i = 0; i < converted.length; i++) {
// audio_int16_out[i] = converted[i] * 32768.0;
// }
// const res = new Uint8Array(audio_int16_out.buffer);
// console.log("AUDIO::::audio_int16_out", audio_int16_out);

// console.log("[CLIENTJS] end --------------------------------------");
// const res = new Uint8Array(converted.buffer);
// return res;
// },
// });
Expand Down
149 changes: 0 additions & 149 deletions client/demo/src/001_provider/VoiceChangerJSClient.ts

This file was deleted.

32 changes: 28 additions & 4 deletions client/demo/webpack.common.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,20 +55,44 @@ module.exports = {
patterns: [{ from: "public/favicon.ico", to: "favicon.ico" }],
}),

new CopyPlugin({
patterns: [{ from: "./node_modules/@dannadori/voice-changer-js/dist/ort-wasm-simd.wasm", to: "ort-wasm-simd.wasm" }],
}),
// new CopyPlugin({
// patterns: [{ from: "./node_modules/@dannadori/voice-changer-js/dist/ort-wasm-simd.wasm", to: "ort-wasm-simd.wasm" }],
// patterns: [{ from: "./node_modules/@dannadori/voice-changer-js/dist/tfjs-backend-wasm-simd.wasm", to: "tfjs-backend-wasm-simd.wasm" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "./node_modules/@dannadori/voice-changer-js/dist/process.js", to: "process.js" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/emb_pit_24000.bin", to: "models/emb_pit_24000.bin" }],
// patterns: [{ from: "public/models/rvcv2_emb_pit_24000.bin", to: "models/rvcv2_emb_pit_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv2_amitaro_v2_32k_f0_24000.bin", to: "models/rvcv2_amitaro_v2_32k_f0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv2_amitaro_v2_32k_nof0_24000.bin", to: "models/rvcv2_amitaro_v2_32k_nof0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv2_amitaro_v2_40k_f0_24000.bin", to: "models/rvcv2_amitaro_v2_40k_f0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv2_amitaro_v2_40k_nof0_24000.bin", to: "models/rvcv2_amitaro_v2_40k_nof0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv1_emb_pit_24000.bin", to: "models/rvcv1_emb_pit_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv1_amitaro_v1_32k_f0_24000.bin", to: "models/rvcv1_amitaro_v1_32k_f0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvcv1_amitaro_v1_32k_nof0_24000.bin", to: "models/rvcv1_amitaro_v1_32k_nof0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvc2v_24000.bin", to: "models/rvc2v_24000.bin" }],
// patterns: [{ from: "public/models/rvcv1_amitaro_v1_40k_f0_24000.bin", to: "models/rvcv1_amitaro_v1_40k_f0_24000.bin" }],
// }),
// new CopyPlugin({
// patterns: [{ from: "public/models/rvc2vnof0_24000.bin", to: "models/rvc2vnof0_24000.bin" }],
// patterns: [{ from: "public/models/rvcv1_amitaro_v1_40k_nof0_24000.bin", to: "models/rvcv1_amitaro_v1_40k_nof0_24000.bin" }],
// }),
],
};
43 changes: 32 additions & 11 deletions server/voice_changer/DDSP_SVC/DDSP_SVC.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from .models.diffusion.infer_gt_mel import DiffGtMel

from voice_changer.utils.VoiceChangerModel import AudioInOut
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
from voice_changer.DDSP_SVC.DDSP_SVCSetting import DDSP_SVCSettings
from voice_changer.RVC.embedder.EmbedderManager import EmbedderManager
Expand All @@ -44,15 +44,20 @@ def phase_vocoder(a, b, fade_out, fade_in):
deltaphase = deltaphase - 2 * np.pi * torch.floor(deltaphase / 2 / np.pi + 0.5)
w = 2 * np.pi * torch.arange(n // 2 + 1).to(a) + deltaphase
t = torch.arange(n).unsqueeze(-1).to(a) / n
result = a * (fade_out**2) + b * (fade_in**2) + torch.sum(absab * torch.cos(w * t + phia), -1) * fade_out * fade_in / n
result = (
a * (fade_out**2)
+ b * (fade_in**2)
+ torch.sum(absab * torch.cos(w * t + phia), -1) * fade_out * fade_in / n
)
return result


class DDSP_SVC:
class DDSP_SVC(VoiceChangerModel):
initialLoad: bool = True

def __init__(self, params: VoiceChangerParams, slotInfo: DDSPSVCModelSlot):
print("[Voice Changer] [DDSP-SVC] Creating instance ")
self.voiceChangerType = "DDSP-SVC"
self.deviceManager = DeviceManager.get_instance()
self.gpu_num = torch.cuda.device_count()
self.params = params
Expand All @@ -71,8 +76,18 @@ def __init__(self, params: VoiceChangerParams, slotInfo: DDSPSVCModelSlot):
def initialize(self):
self.device = self.deviceManager.getDevice(self.settings.gpu)
vcparams = VoiceChangerParamsManager.get_instance().params
modelPath = os.path.join(vcparams.model_dir, str(self.slotInfo.slotIndex), "model", self.slotInfo.modelFile)
diffPath = os.path.join(vcparams.model_dir, str(self.slotInfo.slotIndex), "diff", self.slotInfo.diffModelFile)
modelPath = os.path.join(
vcparams.model_dir,
str(self.slotInfo.slotIndex),
"model",
self.slotInfo.modelFile,
)
diffPath = os.path.join(
vcparams.model_dir,
str(self.slotInfo.slotIndex),
"diff",
self.slotInfo.diffModelFile,
)

self.svc_model = SvcDDSP()
self.svc_model.setVCParams(self.params)
Expand Down Expand Up @@ -112,11 +127,15 @@ def generate_input(
# newData = newData.astype(np.float32)

if self.audio_buffer is not None:
self.audio_buffer = np.concatenate([self.audio_buffer, newData], 0) # 過去のデータに連結
self.audio_buffer = np.concatenate(
[self.audio_buffer, newData], 0
) # 過去のデータに連結
else:
self.audio_buffer = newData

convertSize = inputSize + crossfadeSize + solaSearchFrame + self.settings.extraConvertSize
convertSize = (
inputSize + crossfadeSize + solaSearchFrame + self.settings.extraConvertSize
)

# if convertSize % self.hop_size != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
# convertSize = convertSize + (self.hop_size - (convertSize % self.hop_size))
Expand Down Expand Up @@ -147,15 +166,18 @@ def _pyTorch_inference(self, data):
f0_min=50,
f0_max=1100,
# safe_prefix_pad_length=0, # TBD なにこれ?
safe_prefix_pad_length=self.settings.extraConvertSize / self.svc_model.args.data.sampling_rate,
safe_prefix_pad_length=self.settings.extraConvertSize
/ self.svc_model.args.data.sampling_rate,
diff_model=self.diff_model,
diff_acc=self.settings.diffAcc, # TBD なにこれ?
diff_spk_id=self.settings.diffSpkId,
diff_use=True if self.settings.useDiff == 1 else False,
# diff_use_dpm=True if self.settings.useDiffDpm == 1 else False, # TBD なにこれ?
method=self.settings.diffMethod,
k_step=self.settings.kStep, # TBD なにこれ?
diff_silence=True if self.settings.useDiffSilence == 1 else False, # TBD なにこれ?
diff_silence=True
if self.settings.useDiffSilence == 1
else False, # TBD なにこれ?
)

return _audio.cpu().numpy() * 32768.0
Expand All @@ -182,5 +204,4 @@ def __del__(self):
pass

def get_model_current(self):
return [
]
return []
Loading

0 comments on commit d03132d

Please sign in to comment.