189 lines
6.3 KiB
JavaScript
189 lines
6.3 KiB
JavaScript
const sodium = require('libsodium-wrappers');
|
|
const { Client, GatewayIntentBits } = require('discord.js');
|
|
const { joinVoiceChannel, getVoiceConnection, VoiceConnectionStatus, EndBehaviorType } = require('@discordjs/voice');
|
|
const fs = require('fs');
|
|
const { OpusEncoder } = require('@discordjs/opus');
|
|
const { exec } = require('child_process');
|
|
const OpenAI = require('openai');
|
|
|
|
// OpenAI APIキーの確認
|
|
const apiKey = process.env.OPENAI_API_KEY || 'your-openai-api-key';
|
|
if (!apiKey || apiKey === 'your-openai-api-key') {
|
|
console.error('Error: OpenAI API key is missing or invalid. Set the API key in the environment variables.');
|
|
process.exit(1); // エラーコード1で終了
|
|
}
|
|
|
|
// OpenAI APIクライアントの作成
|
|
const openai = new OpenAI({
|
|
apiKey: apiKey,
|
|
});
|
|
|
|
sodium.ready.then(() => {
|
|
console.log('Sodium library loaded!');
|
|
});
|
|
|
|
const client = new Client({
|
|
intents: [
|
|
GatewayIntentBits.Guilds,
|
|
GatewayIntentBits.GuildVoiceStates,
|
|
GatewayIntentBits.GuildMessages,
|
|
GatewayIntentBits.MessageContent,
|
|
],
|
|
});
|
|
|
|
client.once('ready', () => {
|
|
console.log('Bot is ready!');
|
|
});
|
|
|
|
client.on('messageCreate', async message => {
|
|
if (message.content === '!join') {
|
|
if (message.member.voice.channel) {
|
|
const connection = joinVoiceChannel({
|
|
channelId: message.member.voice.channel.id,
|
|
guildId: message.guild.id,
|
|
adapterCreator: message.guild.voiceAdapterCreator,
|
|
selfDeaf: false,
|
|
selfMute: false,
|
|
});
|
|
|
|
connection.on(VoiceConnectionStatus.Ready, () => {
|
|
console.log('Bot has connected to the channel!');
|
|
startRecording(connection, message.member.voice.channel);
|
|
});
|
|
} else {
|
|
message.reply('ボイスチャンネルに接続してください。');
|
|
}
|
|
}
|
|
|
|
if (message.content === '!leave') {
|
|
const connection = getVoiceConnection(message.guild.id);
|
|
if (connection) {
|
|
connection.destroy();
|
|
message.reply('ボイスチャンネルを離れました。');
|
|
} else {
|
|
message.reply('ボイスチャンネルに接続していません。');
|
|
}
|
|
}
|
|
});
|
|
|
|
function startRecording(connection, voiceChannel) {
|
|
const receiver = connection.receiver;
|
|
const activeStreams = new Map(); // ユーザーごとの音声ストリーム管理
|
|
|
|
receiver.speaking.on('start', (userId) => {
|
|
const user = voiceChannel.members.get(userId);
|
|
if (!user) {
|
|
console.error('User is undefined');
|
|
return;
|
|
}
|
|
|
|
console.log(`Started receiving audio from user: ${user.id}`);
|
|
|
|
if (activeStreams.has(user.id)) {
|
|
return;
|
|
}
|
|
|
|
const audioStream = receiver.subscribe(user.id, {
|
|
end: EndBehaviorType.Manual
|
|
});
|
|
|
|
const filePath = `./recordings/${user.id}-${Date.now()}.pcm`;
|
|
const writableStream = fs.createWriteStream(filePath);
|
|
activeStreams.set(user.id, { audioStream, writableStream, filePath });
|
|
|
|
const opusDecoder = new OpusEncoder(48000, 2); // Opusデコーダーを使用
|
|
audioStream.on('data', (chunk) => {
|
|
try {
|
|
const pcmData = opusDecoder.decode(chunk);
|
|
writableStream.write(pcmData);
|
|
console.log(`Received ${chunk.length} bytes of audio data from user ${user.id}`);
|
|
} catch (err) {
|
|
console.error('Error decoding audio chunk:', err);
|
|
}
|
|
});
|
|
|
|
audioStream.on('error', (err) => {
|
|
console.error('Error in the audio stream:', err);
|
|
});
|
|
});
|
|
|
|
receiver.speaking.on('end', (userId) => {
|
|
const streamData = activeStreams.get(userId);
|
|
if (!streamData) {
|
|
return;
|
|
}
|
|
|
|
const { writableStream, filePath } = streamData;
|
|
|
|
writableStream.end(() => {
|
|
console.log(`Recording saved to ${filePath}`);
|
|
activeStreams.delete(userId);
|
|
|
|
fs.stat(filePath, (err, stats) => {
|
|
if (err) {
|
|
console.error('Error checking file:', err);
|
|
return;
|
|
}
|
|
if (stats.size > 0) {
|
|
convertToWavAndTranscribe(filePath); // バックグラウンドで変換と文字起こしを実行
|
|
} else {
|
|
console.log(`File ${filePath} is empty, skipping conversion.`);
|
|
fs.unlinkSync(filePath); // 空のファイルを削除
|
|
}
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function convertToWavAndTranscribe(pcmPath) {
|
|
const wavPath = pcmPath.replace('.pcm', '.wav');
|
|
console.log(`Converting ${pcmPath} to ${wavPath} and transcribing in the background`);
|
|
|
|
const ffmpeg = exec(`ffmpeg -f s16le -ar 48000 -ac 2 -i ${pcmPath} ${wavPath}`, (error, stdout, stderr) => {
|
|
if (error) {
|
|
console.error(`Error during conversion: ${error.message}`);
|
|
return;
|
|
}
|
|
if (stderr) {
|
|
console.error(`FFmpeg stderr: ${stderr}`);
|
|
return;
|
|
}
|
|
});
|
|
|
|
ffmpeg.on('close', (code) => {
|
|
if (code === 0) {
|
|
console.log(`Successfully converted ${pcmPath} to ${wavPath}`);
|
|
fs.unlinkSync(pcmPath); // PCMファイルを削除
|
|
|
|
// Whisper APIを使って文字起こしを実行
|
|
transcribeAudio(wavPath);
|
|
} else {
|
|
console.error(`FFmpeg exited with code ${code}, conversion failed.`);
|
|
}
|
|
});
|
|
}
|
|
|
|
// Whisper APIを使って音声ファイルを文字起こしする関数
|
|
async function transcribeAudio(filePath) {
|
|
try {
|
|
console.log(`Transcribing file: ${filePath}`);
|
|
|
|
const response = await openai.audio.transcriptions.create({
|
|
file: fs.createReadStream(filePath),
|
|
model: 'whisper-1',
|
|
response_format: 'json' // レスポンス形式をJSONに指定
|
|
});
|
|
|
|
// 文字起こし結果を表示
|
|
if (response && response.text) {
|
|
console.log('Transcription:', response.text);
|
|
} else {
|
|
console.error('Error: Transcription response is missing "text" field.');
|
|
}
|
|
} catch (error) {
|
|
console.error('Error during transcription:', error.response ? error.response.data : error.message);
|
|
}
|
|
}
|
|
|
|
client.login(process.env.BOT_TOKEN);
|