Started google cloud tests

This commit is contained in:
Pax1601 2024-11-28 20:08:43 +01:00
parent dd641fc2aa
commit 42e62be0f5
6 changed files with 52 additions and 46 deletions

View File

@ -158,8 +158,6 @@ export class AudioManager {
this.#devices = devices;
AudioManagerDevicesChangedEvent.dispatch(devices);
});
this.#startSpeechRecognition();
}
stop() {
@ -330,30 +328,4 @@ export class AudioManager {
if (this.#socket?.readyState == 1) this.#socket?.send(new Uint8Array([AudioMessageType.settings, ...Buffer.from(JSON.stringify(message), "utf-8")]));
}
#startSpeechRecognition() {
const grammar =
"#JSGF V1.0; grammar colors; public <color> = aqua | azure | beige | bisque | black | blue | brown | chocolate | coral | crimson | cyan | fuchsia | ghostwhite | gold | goldenrod | gray | green | indigo | ivory | khaki | lavender | lime | linen | magenta | maroon | moccasin | navy | olive | orange | orchid | peru | pink | plum | purple | red | salmon | sienna | silver | snow | tan | teal | thistle | tomato | turquoise | violet | white | yellow ;";
//@ts-ignore
const recognition = new window.webkitSpeechRecognition();
//@ts-ignore
const speechRecognitionList = new window.webkitSpeechGrammarList();
speechRecognitionList.addFromString(grammar, 1);
recognition.grammars = speechRecognitionList;
recognition.continuous = true;
recognition.lang = "en-US";
recognition.interimResults = true;
//recognition.maxAlternatives = 1;
const diagnostic = document.querySelector(".output");
const bg = document.querySelector("html");
recognition.start();
recognition.onresult = (event) => {
const color = event.results[0][0].transcript;
diagnostic.textContent = `Result received: ${color}`;
bg.style.backgroundColor = color;
};
}
}

View File

@ -26,7 +26,7 @@ export class TextToSpeechSource extends AudioSource {
body: JSON.stringify({ text }), // Send the data in JSON format
};
fetch(getApp().getExpressAddress() + `/api/texttospeech/generate`, requestOptions)
fetch(getApp().getExpressAddress() + `/api/speech/generate`, requestOptions)
.then((response) => {
if (response.status === 200) {
console.log(`Text to speech generate correctly`);

View File

@ -10,6 +10,7 @@
},
"private": true,
"dependencies": {
"@google-cloud/speech": "^6.7.0",
"appjs": "^0.0.20",
"appjs-win32": "^0.0.19",
"body-parser": "^1.20.2",

View File

@ -27,7 +27,7 @@ module.exports = function (configLocation, viteProxy) {
"databases"
)
);
const textToSpeechRouter = require("./routes/api/texttospeech")();
const speechRouter = require("./routes/api/speech")();
/* Load the config and create the express app */
let config = {};
@ -76,12 +76,12 @@ module.exports = function (configLocation, viteProxy) {
app.use("/api/airbases", airbasesRouter);
app.use("/api/elevation", elevationRouter);
app.use("/api/databases", databasesRouter);
app.use("/api/texttospeech", textToSpeechRouter);
app.use("/api/speech", speechRouter);
app.use("/resources", resourcesRouter);
app.use("/express/api/airbases", airbasesRouter);
app.use("/express/api/elevation", elevationRouter);
app.use("/express/api/databases", databasesRouter);
app.use("/express/api/texttospeech", textToSpeechRouter);
app.use("/express/api/speech", speechRouter);
app.use("/express/resources", resourcesRouter);
/* Set default index */

View File

@ -0,0 +1,47 @@
import express = require("express");
import fs = require("fs");
var gtts = require("node-gtts")("en");
const router = express.Router();
module.exports = function () {
router.put("/generate", (req, res, next) => {
res.set({ "Content-Type": "audio/mpeg" });
gtts.stream(req.body.text).pipe(res);
});
router.get("/recognize", (req, res, next) => {
//// Imports the Google Cloud client library
//const speech = require("@google-cloud/speech");
//
//// Creates a client
//const client = new speech.SpeechClient();
//
//// The path to the remote LINEAR16 file
//const gcsUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";
//
//// The audio file's encoding, sample rate in hertz, and BCP-47 language code
//const audio = {
// uri: gcsUri,
//};
//const config = {
// encoding: "LINEAR16",
// sampleRateHertz: 16000,
// languageCode: "en-US",
//};
//const request = {
// audio: audio,
// config: config,
//};
//
//// Detects speech in the audio file
//client.recognize(request).then((response) => {
// const transcription = response.results
// .map((result) => result.alternatives[0].transcript)
// .join("\n");
// console.log(`Transcription: ${transcription}`);
//});
});
return router;
};

View File

@ -1,14 +0,0 @@
import express = require('express');
import fs = require('fs');
var gtts = require('node-gtts')('en');
const router = express.Router();
module.exports = function () {
router.put( "/generate", ( req, res, next ) => {
res.set({'Content-Type': 'audio/mpeg'});
gtts.stream(req.body.text).pipe(res);
});
return router;
}