2024-09-08 13:13:51 +00:00
|
|
|
const sodium = require('libsodium-wrappers');
|
|
|
|
const { Client, GatewayIntentBits } = require('discord.js');
|
2024-09-08 16:03:53 +00:00
|
|
|
const { joinVoiceChannel, getVoiceConnection, VoiceConnectionStatus, EndBehaviorType, createAudioPlayer, createAudioResource, AudioPlayerStatus } = require('@discordjs/voice');
|
2024-09-08 13:13:51 +00:00
|
|
|
const fs = require('fs');
|
2024-09-08 14:19:10 +00:00
|
|
|
const { OpusEncoder } = require('@discordjs/opus');
|
2024-09-09 13:51:40 +00:00
|
|
|
const { spawn } = require('child_process');
|
2024-09-08 14:19:10 +00:00
|
|
|
const OpenAI = require('openai');
|
2024-09-08 16:03:53 +00:00
|
|
|
const axios = require('axios');
|
2024-09-08 14:19:10 +00:00
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
let isProcessing = false; // 処理中フラグ
|
|
|
|
let isPlaying = false; // 音声再生中フラグ
|
|
|
|
|
2024-09-08 14:19:10 +00:00
|
|
|
// OpenAI APIキーの確認
|
|
|
|
const apiKey = process.env.OPENAI_API_KEY || 'your-openai-api-key';
|
|
|
|
if (!apiKey || apiKey === 'your-openai-api-key') {
|
|
|
|
console.error('Error: OpenAI API key is missing or invalid. Set the API key in the environment variables.');
|
|
|
|
process.exit(1); // エラーコード1で終了
|
|
|
|
}
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
// ChatGPTのモデル
|
|
|
|
const chatGptModel = process.env.CHAT_GPT_MODEL || 'gpt-4o';
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
const openai = new OpenAI({ apiKey });
|
2024-09-08 13:13:51 +00:00
|
|
|
|
|
|
|
sodium.ready.then(() => {
|
|
|
|
console.log('Sodium library loaded!');
|
|
|
|
});
|
|
|
|
|
|
|
|
const client = new Client({
|
|
|
|
intents: [
|
|
|
|
GatewayIntentBits.Guilds,
|
|
|
|
GatewayIntentBits.GuildVoiceStates,
|
|
|
|
GatewayIntentBits.GuildMessages,
|
|
|
|
GatewayIntentBits.MessageContent,
|
|
|
|
],
|
|
|
|
});
|
|
|
|
|
|
|
|
client.once('ready', () => {
|
|
|
|
console.log('Bot is ready!');
|
|
|
|
});
|
|
|
|
|
|
|
|
client.on('messageCreate', async message => {
|
2024-09-09 13:51:40 +00:00
|
|
|
if (isProcessing) {
|
|
|
|
message.reply('現在処理中です。少々お待ちください。');
|
|
|
|
return; // 処理中なら新しい処理を行わない
|
|
|
|
}
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
if (message.content === '!cs_join') {
|
2024-09-08 13:13:51 +00:00
|
|
|
if (message.member.voice.channel) {
|
|
|
|
const connection = joinVoiceChannel({
|
|
|
|
channelId: message.member.voice.channel.id,
|
|
|
|
guildId: message.guild.id,
|
|
|
|
adapterCreator: message.guild.voiceAdapterCreator,
|
|
|
|
selfDeaf: false,
|
|
|
|
selfMute: false,
|
|
|
|
});
|
|
|
|
|
|
|
|
connection.on(VoiceConnectionStatus.Ready, () => {
|
|
|
|
console.log('Bot has connected to the channel!');
|
|
|
|
startRecording(connection, message.member.voice.channel);
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
message.reply('ボイスチャンネルに接続してください。');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
if (message.content === '!cs_leave') {
|
2024-09-08 13:13:51 +00:00
|
|
|
const connection = getVoiceConnection(message.guild.id);
|
|
|
|
if (connection) {
|
2024-09-08 16:03:53 +00:00
|
|
|
connection.destroy(); // ボイスチャンネルから切断
|
2024-09-08 13:13:51 +00:00
|
|
|
message.reply('ボイスチャンネルを離れました。');
|
|
|
|
} else {
|
|
|
|
message.reply('ボイスチャンネルに接続していません。');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
async function startRecording(connection, voiceChannel) {
|
2024-09-08 13:13:51 +00:00
|
|
|
const receiver = connection.receiver;
|
2024-09-09 13:51:40 +00:00
|
|
|
const activeStreams = new Map();
|
2024-09-08 13:13:51 +00:00
|
|
|
|
|
|
|
receiver.speaking.on('start', (userId) => {
|
|
|
|
const user = voiceChannel.members.get(userId);
|
|
|
|
if (!user) {
|
|
|
|
console.error('User is undefined');
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
console.log(`Started receiving audio from user: ${user.id}`);
|
|
|
|
|
|
|
|
if (activeStreams.has(user.id)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const audioStream = receiver.subscribe(user.id, {
|
|
|
|
end: EndBehaviorType.Manual
|
|
|
|
});
|
|
|
|
|
|
|
|
const filePath = `./recordings/${user.id}-${Date.now()}.pcm`;
|
|
|
|
const writableStream = fs.createWriteStream(filePath);
|
|
|
|
activeStreams.set(user.id, { audioStream, writableStream, filePath });
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
const opusDecoder = new OpusEncoder(48000, 2);
|
2024-09-08 13:13:51 +00:00
|
|
|
audioStream.on('data', (chunk) => {
|
|
|
|
try {
|
|
|
|
const pcmData = opusDecoder.decode(chunk);
|
|
|
|
writableStream.write(pcmData);
|
|
|
|
} catch (err) {
|
|
|
|
console.error('Error decoding audio chunk:', err);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
audioStream.on('error', (err) => {
|
|
|
|
console.error('Error in the audio stream:', err);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
receiver.speaking.on('end', (userId) => {
|
|
|
|
const streamData = activeStreams.get(userId);
|
|
|
|
if (!streamData) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const { writableStream, filePath } = streamData;
|
|
|
|
|
|
|
|
writableStream.end(() => {
|
|
|
|
console.log(`Recording saved to ${filePath}`);
|
|
|
|
activeStreams.delete(userId);
|
|
|
|
|
|
|
|
fs.stat(filePath, (err, stats) => {
|
|
|
|
if (err) {
|
|
|
|
console.error('Error checking file:', err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (stats.size > 0) {
|
2024-09-09 13:51:40 +00:00
|
|
|
convertToWavAndTranscribe(filePath, connection);
|
2024-09-08 13:13:51 +00:00
|
|
|
} else {
|
|
|
|
console.log(`File ${filePath} is empty, skipping conversion.`);
|
2024-09-09 13:51:40 +00:00
|
|
|
fs.unlinkSync(filePath);
|
2024-09-08 13:13:51 +00:00
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
async function convertToWavAndTranscribe(pcmPath, connection) {
|
|
|
|
if (isProcessing) return; // 処理中ならスキップ
|
|
|
|
|
|
|
|
isProcessing = true; // 処理開始
|
2024-09-08 13:13:51 +00:00
|
|
|
const wavPath = pcmPath.replace('.pcm', '.wav');
|
2024-09-08 14:19:10 +00:00
|
|
|
console.log(`Converting ${pcmPath} to ${wavPath} and transcribing in the background`);
|
2024-09-08 13:13:51 +00:00
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
const ffmpeg = spawn('ffmpeg', ['-f', 's16le', '-ar', '48000', '-ac', '2', '-i', pcmPath, wavPath]);
|
|
|
|
|
|
|
|
const ffmpegTimeout = setTimeout(() => {
|
2024-09-09 13:51:40 +00:00
|
|
|
ffmpeg.kill('SIGKILL');
|
2024-09-08 16:03:53 +00:00
|
|
|
console.error('FFmpeg process timed out and was killed');
|
2024-09-09 13:51:40 +00:00
|
|
|
isProcessing = false; // タイムアウトで処理終了
|
|
|
|
}, 10000); // 10秒でタイムアウト
|
2024-09-08 16:03:53 +00:00
|
|
|
|
|
|
|
ffmpeg.stdout.on('data', (data) => {
|
|
|
|
console.log(`FFmpeg stdout: ${data}`);
|
|
|
|
});
|
|
|
|
|
|
|
|
ffmpeg.stderr.on('data', (data) => {
|
|
|
|
console.error(`FFmpeg stderr: ${data}`);
|
2024-09-08 14:19:10 +00:00
|
|
|
});
|
2024-09-08 13:13:51 +00:00
|
|
|
|
2024-09-08 14:19:10 +00:00
|
|
|
ffmpeg.on('close', (code) => {
|
2024-09-09 13:51:40 +00:00
|
|
|
clearTimeout(ffmpegTimeout);
|
2024-09-08 16:03:53 +00:00
|
|
|
|
2024-09-08 14:19:10 +00:00
|
|
|
if (code === 0) {
|
|
|
|
console.log(`Successfully converted ${pcmPath} to ${wavPath}`);
|
2024-09-09 13:51:40 +00:00
|
|
|
fs.unlinkSync(pcmPath); // PCMファイル削除
|
2024-09-08 14:19:10 +00:00
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
// Whisper APIで文字起こしと次の処理へ
|
2024-09-08 16:03:53 +00:00
|
|
|
transcribeAudio(wavPath, connection);
|
2024-09-08 14:19:10 +00:00
|
|
|
} else {
|
|
|
|
console.error(`FFmpeg exited with code ${code}, conversion failed.`);
|
2024-09-09 13:51:40 +00:00
|
|
|
isProcessing = false; // エラーで処理終了
|
2024-09-08 14:19:10 +00:00
|
|
|
}
|
2024-09-08 13:13:51 +00:00
|
|
|
});
|
2024-09-08 16:03:53 +00:00
|
|
|
|
|
|
|
ffmpeg.on('error', (err) => {
|
|
|
|
console.error('FFmpeg process error:', err);
|
2024-09-09 13:51:40 +00:00
|
|
|
isProcessing = false; // エラーで処理終了
|
2024-09-08 16:03:53 +00:00
|
|
|
});
|
2024-09-08 13:13:51 +00:00
|
|
|
}
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
async function transcribeAudio(filePath, connection) {
|
2024-09-08 14:19:10 +00:00
|
|
|
try {
|
|
|
|
console.log(`Transcribing file: ${filePath}`);
|
|
|
|
const response = await openai.audio.transcriptions.create({
|
|
|
|
file: fs.createReadStream(filePath),
|
|
|
|
model: 'whisper-1',
|
2024-09-09 13:51:40 +00:00
|
|
|
response_format: 'json',
|
|
|
|
language: 'ja'
|
2024-09-08 14:19:10 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
if (response && response.text) {
|
2024-09-08 14:39:40 +00:00
|
|
|
const transcription = response.text;
|
|
|
|
console.log('Transcription:', transcription);
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
const shouldReact = await shouldReactToTranscription(transcription);
|
|
|
|
if (!shouldReact) {
|
|
|
|
console.log('ChatGPT decided not to react to this transcription.');
|
|
|
|
fs.unlink(filePath, (err) => {
|
2024-09-09 13:51:40 +00:00
|
|
|
if (err) console.error(`Error deleting recording file: ${err}`);
|
|
|
|
else console.log(`Deleted recording file: ${filePath}`);
|
2024-09-08 16:03:53 +00:00
|
|
|
});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-09-08 14:39:40 +00:00
|
|
|
const reply = await generateChatResponse(transcription);
|
|
|
|
console.log('ChatGPT response:', reply);
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
if (reply) {
|
|
|
|
const voiceFilePath = await generateVoiceFromText(reply);
|
|
|
|
if (voiceFilePath) {
|
|
|
|
await playVoiceInChannel(connection, voiceFilePath); // 音声再生が完了するまで待つ
|
|
|
|
}
|
2024-09-08 16:03:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fs.unlink(filePath, (err) => {
|
2024-09-09 13:51:40 +00:00
|
|
|
if (err) console.error(`Error deleting recording file: ${err}`);
|
|
|
|
else console.log(`Deleted recording file: ${filePath}`);
|
2024-09-08 16:03:53 +00:00
|
|
|
});
|
2024-09-08 14:19:10 +00:00
|
|
|
} else {
|
|
|
|
console.error('Error: Transcription response is missing "text" field.');
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error during transcription:', error.response ? error.response.data : error.message);
|
2024-09-09 13:51:40 +00:00
|
|
|
} finally {
|
|
|
|
isProcessing = false; // 最後に処理フラグをリセット
|
2024-09-08 14:19:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
async function shouldReactToTranscription(transcription) {
|
|
|
|
try {
|
|
|
|
const systemPrompt = 'あなたは「セカイ」というキャラクターです。次のユーザーの発言に対して反応すべきかどうかを判断してください。反応すべき場合は「はい」、反応しない場合は「いいえ」と答えてください。';
|
|
|
|
|
|
|
|
const response = await openai.chat.completions.create({
|
2024-09-09 13:51:40 +00:00
|
|
|
model: chatGptModel,
|
2024-09-08 16:03:53 +00:00
|
|
|
messages: [
|
|
|
|
{ role: 'system', content: systemPrompt },
|
|
|
|
{ role: 'user', content: transcription }
|
|
|
|
]
|
|
|
|
});
|
|
|
|
|
|
|
|
if (response && response.choices && response.choices.length > 0) {
|
|
|
|
const result = response.choices[0].message.content.trim();
|
|
|
|
return result.toLowerCase() === 'はい';
|
|
|
|
} else {
|
|
|
|
console.error('Error: ChatGPT response is missing choices.');
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error during shouldReactToTranscription generation:', error.response ? error.response.data : error.message);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-08 14:39:40 +00:00
|
|
|
async function generateChatResponse(transcription) {
|
|
|
|
try {
|
2024-09-09 13:51:40 +00:00
|
|
|
const systemPrompt = 'あなたはこれから「セカイ」という名前の少女を演じてユーザーとの会話を楽しんでください。回答メッセージは最大でも80字になるようにしてください。';
|
2024-09-08 16:03:53 +00:00
|
|
|
|
2024-09-08 14:39:40 +00:00
|
|
|
const chatResponse = await openai.chat.completions.create({
|
2024-09-09 13:51:40 +00:00
|
|
|
model: chatGptModel,
|
2024-09-08 14:39:40 +00:00
|
|
|
messages: [
|
2024-09-08 16:03:53 +00:00
|
|
|
{ role: 'system', content: systemPrompt },
|
2024-09-08 14:39:40 +00:00
|
|
|
{ role: 'user', content: transcription }
|
|
|
|
]
|
|
|
|
});
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
console.log('ChatGPT full response:', chatResponse);
|
|
|
|
|
2024-09-08 14:39:40 +00:00
|
|
|
if (chatResponse && chatResponse.choices && chatResponse.choices.length > 0) {
|
2024-09-08 16:03:53 +00:00
|
|
|
return chatResponse.choices[0].message.content;
|
2024-09-08 14:39:40 +00:00
|
|
|
} else {
|
|
|
|
console.error('Error: ChatGPT response is missing choices.');
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error during ChatGPT response generation:', error.response ? error.response.data : error.message);
|
|
|
|
return null;
|
2024-09-09 13:51:40 +00:00
|
|
|
} finally {
|
|
|
|
isProcessing = false; // 処理終了後にフラグを必ずリセット
|
2024-09-08 14:39:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
async function generateVoiceFromText(text, retryCount = 10) {
|
|
|
|
try {
|
|
|
|
const response = await axios.post('http://192.168.100.252:5000/generate_voice', {
|
|
|
|
text: text,
|
|
|
|
narrator: "SEKAI",
|
|
|
|
emotion_happy: "80",
|
|
|
|
emotion_sad: "20",
|
|
|
|
emotion_angry: "10",
|
|
|
|
emotion_fun: "50",
|
|
|
|
pitch: "1"
|
|
|
|
}, {
|
2024-09-09 13:51:40 +00:00
|
|
|
responseType: 'arraybuffer'
|
2024-09-08 16:03:53 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
const outputFilePath = `./voices/output-${Date.now()}.wav`;
|
2024-09-09 13:51:40 +00:00
|
|
|
await fs.promises.writeFile(outputFilePath, response.data);
|
2024-09-08 16:03:53 +00:00
|
|
|
console.log(`Voice file saved to ${outputFilePath}`);
|
2024-09-09 13:51:40 +00:00
|
|
|
return outputFilePath;
|
2024-09-08 16:03:53 +00:00
|
|
|
} catch (error) {
|
|
|
|
console.error('Error generating voice:', error.message);
|
|
|
|
|
|
|
|
if (retryCount > 0) {
|
|
|
|
console.log(`Retrying to generate the voice... (${retryCount} retries left)`);
|
2024-09-09 13:51:40 +00:00
|
|
|
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
|
|
return generateVoiceFromText(text, retryCount - 1);
|
2024-09-08 16:03:53 +00:00
|
|
|
} else {
|
|
|
|
console.error('Max retries reached. Could not generate the voice.');
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
async function playVoiceInChannel(connection, filePath, retryCount = 3) {
|
|
|
|
if (isPlaying) {
|
|
|
|
console.log('Another audio is still playing, waiting...');
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isPlaying = true;
|
2024-09-08 16:03:53 +00:00
|
|
|
const player = createAudioPlayer();
|
|
|
|
const resource = createAudioResource(filePath);
|
|
|
|
|
|
|
|
player.play(resource);
|
|
|
|
connection.subscribe(player);
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
player.on(AudioPlayerStatus.Playing, () => {
|
|
|
|
console.log('Audio is playing...');
|
|
|
|
});
|
|
|
|
|
2024-09-08 16:03:53 +00:00
|
|
|
player.on(AudioPlayerStatus.Idle, () => {
|
|
|
|
console.log('Finished playing voice.');
|
|
|
|
setTimeout(() => {
|
2024-09-09 13:51:40 +00:00
|
|
|
// 再生が終了した後にWAVファイルを削除
|
2024-09-08 16:03:53 +00:00
|
|
|
fs.unlink(filePath, (err) => {
|
|
|
|
if (err) {
|
|
|
|
console.error(`Error deleting file: ${err}`);
|
|
|
|
} else {
|
|
|
|
console.log(`Deleted file: ${filePath}`);
|
|
|
|
}
|
|
|
|
});
|
2024-09-09 13:51:40 +00:00
|
|
|
}, 1000);
|
|
|
|
isPlaying = false; // 音声再生終了
|
|
|
|
isProcessing = false; // 処理終了
|
2024-09-08 16:03:53 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
player.on('error', error => {
|
|
|
|
console.error('Error playing audio:', error.message);
|
|
|
|
|
2024-09-09 13:51:40 +00:00
|
|
|
// 再生エラー時にリトライ処理
|
2024-09-08 16:03:53 +00:00
|
|
|
if (retryCount > 0) {
|
|
|
|
console.log(`Retrying to play the voice... (${retryCount} retries left)`);
|
|
|
|
setTimeout(() => {
|
|
|
|
playVoiceInChannel(connection, filePath, retryCount - 1);
|
|
|
|
}, 1000); // 1秒待機してリトライ
|
|
|
|
} else {
|
|
|
|
console.error('Max retries reached. Could not play the audio.');
|
2024-09-09 13:51:40 +00:00
|
|
|
isPlaying = false; // 音声再生失敗
|
|
|
|
isProcessing = false; // 処理終了
|
2024-09-08 16:03:53 +00:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2024-09-08 14:19:10 +00:00
|
|
|
client.login(process.env.BOT_TOKEN);
|