I use the javascript speech SDK of azure and I modified the speechSynthesisLanguage
property to change the output language but it does not work. The output language is still english. I followed the documentation and use the demo code available on their repo.
(function() {
// <code>
"use strict";
// pull in the required packages.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var readline = require("readline");
const { exec } = require("child_process");
// replace with your own subscription key,
// service region (e.g., "westus"), and
// the name of the file you save the synthesized audio.
var subscriptionKey = "xxx";
var serviceRegion = "francecentral"; // e.g., "westus"
var filename = "test1.wav";
// we are done with the setup
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
speechConfig.setProperty('speechSynthesisLanguage', 'fr-FR')
speechConfig.setProperty('SpeechServiceConnection_RecoLanguage', 'fr-FR')
speechConfig.setProperty('speechSynthesisVoiceName', 'fr-FR-AlainNeural')
console.log(speechConfig)
//create the speech synthesizer.
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
rl.question("Test à saisir :\n", function (text) {
rl.close();
// start the synthesizer and wait for a result.
synthesizer.speakTextAsync(text,
function (result) {
if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted) {
console.log("synthesis finished.");
} else {
console.error("Speech synthesis canceled, " result.errorDetails
"\nDid you update the subscription info?");
}
synthesizer.close();
synthesizer = undefined;
},
function (err) {
console.trace("err - " err);
synthesizer.close();
synthesizer = undefined;
});
console.log("Now synthesizing to: " filename);
exec(`vlc ${filename} vlc://quit`);
});
// </code>
}());
The output language is corretly set to fr-FR
according to the doc.
console.log(speechConfig.getProperty('speechSynthesisLanguage')) //fr-FR
I tried to change the language by german or use the setServiceProperty(string, string, ServicePropertyChannel)
method but it didn't work.
Do you have a solution? Thank you
CodePudding user response:
I am assuming you want to convert French text into speech which will be French. Now to do this we need to set two setting i.e
speechSynthesisLanguage
andspeechSynthesisVoiceName
these settings would represent name of the language you are passing and name of the speech.Here I was created a function which will ask for French as text and will add the data to the
YourAudioFile.wav
file.
code:
(function() {
"use strict";
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var readline = require("readline");
var audioFile = "YourAudioFile.wav";
const speechConfig = sdk.SpeechConfig.fromSubscription(process.env.SPEECH_KEY, process.env.SPEECH_REGION);
const audioConfig = sdk.AudioConfig.fromAudioFileOutput(audioFile);
// the speech settings for french
speechConfig.speechSynthesisLanguage = "fr-FR";
speechConfig.speechSynthesisVoiceName = "fr-FR-BrigitteNeural";
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
rl.question("Enter some text that you want to speak >\n> ",
function (text)
{
rl.close();
synthesizer.speakTextAsync(text,
function (result)
{
if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted)
{
console.log("synthesis finished.");
}
else
{
console.error("Speech synthesis canceled, " result.errorDetails "\nDid you set the speech resource key and region values?");
}
synthesizer.close();
synthesizer = null;
},
function (err)
{
console.trace("err - " err);
synthesizer.close();
synthesizer = null;
}
);
console.log("Now synthesizing to: " audioFile);
}
);
}());
The majority of the above code is from this MSDOC on text to speech.