Размер видео: 1280 X 720853 X 480640 X 360
Показать панель управления
Автовоспроизведение
Автоповтор
2) express 백엔드 코드:import express from 'express';import multer from 'multer';import speech from '@google-cloud/speech';import credentials from './my-project.json' assert { type: "json" };import cors from 'cors';const app = express();const port = 3000;app.use(cors());const upload = multer();const client = new speech.SpeechClient({ credentials,});app.post('/upload', upload.single('audio'), async (req, res) => { try { if (!req.file) { return res.status(400).send('No file uploaded.'); } const audioBytes = req.file.buffer; const audio = { content: audioBytes, }; const config = { encoding: 'WEBM_OPUS', languageCode: 'ko-KR', }; const request = { config: config, audio: audio, }; const [response] = await client.recognize(request); const transcription = response.results .map(result => result.alternatives[0].transcript) .join(''); console.log("===== transcription: ", transcription); res.send({ transcription }); } catch (error) { console.error('Error during speech recognition:', error); res.status(500).send('Error during speech recognition.'); }});app.listen(port, () => { console.log(`Server listening at localhost:${port}`);});
* 이전 영상: ruclips.net/video/2ZCLF2rarn8/видео.html
1) react 프론트엔드 코드:import React, { useState, useRef } from 'react';function App() { const [isRecording, setIsRecording] = useState(false); const [transcription, setTranscription] = useState(''); const mediaRecorderRef = useRef(null); const audioChunksRef = useRef([]); const startRecording = async () => { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); mediaRecorderRef.current = new MediaRecorder(stream); audioChunksRef.current = []; mediaRecorderRef.current.ondataavailable = (event) => { audioChunksRef.current.push(event.data); }; mediaRecorderRef.current.onstop = async () => { if (audioChunksRef.current.length > 0) { const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' }); const formData = new FormData(); formData.append('audio', audioBlob); const response = await fetch('localhost:3000/upload', { method: 'POST', body: formData, }); const result = await response.json(); setTranscription(result.transcription); } }; mediaRecorderRef.current.start(); setIsRecording(true); }; const stopRecording = () => { if (mediaRecorderRef.current) { mediaRecorderRef.current.stop(); } setIsRecording(false); }; return ( Audio Transcription {isRecording ? 'Stop Recording' : 'Start Recording'} {transcription && ( Transcription: {transcription} )} );}export default App;
2) express 백엔드 코드:
import express from 'express';
import multer from 'multer';
import speech from '@google-cloud/speech';
import credentials from './my-project.json' assert { type: "json" };
import cors from 'cors';
const app = express();
const port = 3000;
app.use(cors());
const upload = multer();
const client = new speech.SpeechClient({
credentials,
});
app.post('/upload', upload.single('audio'), async (req, res) => {
try {
if (!req.file) {
return res.status(400).send('No file uploaded.');
}
const audioBytes = req.file.buffer;
const audio = {
content: audioBytes,
};
const config = {
encoding: 'WEBM_OPUS',
languageCode: 'ko-KR',
};
const request = {
config: config,
audio: audio,
};
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('
');
console.log("===== transcription: ", transcription);
res.send({ transcription });
} catch (error) {
console.error('Error during speech recognition:', error);
res.status(500).send('Error during speech recognition.');
}
});
app.listen(port, () => {
console.log(`Server listening at localhost:${port}`);
});
* 이전 영상: ruclips.net/video/2ZCLF2rarn8/видео.html
1) react 프론트엔드 코드:
import React, { useState, useRef } from 'react';
function App() {
const [isRecording, setIsRecording] = useState(false);
const [transcription, setTranscription] = useState('');
const mediaRecorderRef = useRef(null);
const audioChunksRef = useRef([]);
const startRecording = async () => {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorderRef.current = new MediaRecorder(stream);
audioChunksRef.current = [];
mediaRecorderRef.current.ondataavailable = (event) => {
audioChunksRef.current.push(event.data);
};
mediaRecorderRef.current.onstop = async () => {
if (audioChunksRef.current.length > 0) {
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
const formData = new FormData();
formData.append('audio', audioBlob);
const response = await fetch('localhost:3000/upload', {
method: 'POST',
body: formData,
});
const result = await response.json();
setTranscription(result.transcription);
}
};
mediaRecorderRef.current.start();
setIsRecording(true);
};
const stopRecording = () => {
if (mediaRecorderRef.current) {
mediaRecorderRef.current.stop();
}
setIsRecording(false);
};
return (
Audio Transcription
{isRecording ? 'Stop Recording' : 'Start Recording'}
{transcription && (
Transcription:
{transcription}
)}
);
}
export default App;