diff --git a/.gitignore b/.gitignore index 9fd0febdb..e101f0053 100644 --- a/.gitignore +++ b/.gitignore @@ -66,3 +66,6 @@ credentials/ .claude/pai_updates/ .claude/pai_backups/ .claude/.pai-sync-history + +# Local backups (user customizations preserved across updates) +.local-backup/ diff --git a/Releases/v4.0.3/.claude/VoiceServer/install.sh b/Releases/v4.0.3/.claude/VoiceServer/install.sh index 15b6c83e0..816524696 100755 --- a/Releases/v4.0.3/.claude/VoiceServer/install.sh +++ b/Releases/v4.0.3/.claude/VoiceServer/install.sh @@ -189,6 +189,21 @@ echo " -d '{\"message\": \"Hello from PAI\"}'" echo echo -e "${GREEN}The voice server will now start automatically when you log in.${NC}" +# Install voice CLI to /usr/local/bin +VOICE_CLI="$SCRIPT_DIR/voice" +if [ -f "$VOICE_CLI" ]; then + chmod +x "$VOICE_CLI" + echo -e "${YELLOW}> Installing 'voice' CLI...${NC}" + if ln -sf "$VOICE_CLI" /usr/local/bin/voice 2>/dev/null; then + echo -e "${GREEN}OK Installed 'voice' CLI to /usr/local/bin/voice${NC}" + echo " Usage: voice say | voice 11labs | voice status | voice test" + else + echo -e "${YELLOW}! Could not symlink to /usr/local/bin (permission denied)${NC}" + echo " To install manually:" + echo " sudo ln -sf $VOICE_CLI /usr/local/bin/voice" + fi +fi + # Ask about menu bar indicator echo read -p "Would you like to install a menu bar indicator? (y/n): " -n 1 -r diff --git a/Releases/v4.0.3/.claude/VoiceServer/server.ts b/Releases/v4.0.3/.claude/VoiceServer/server.ts index 9f5dec95c..c034edff7 100644 --- a/Releases/v4.0.3/.claude/VoiceServer/server.ts +++ b/Releases/v4.0.3/.claude/VoiceServer/server.ts @@ -35,9 +35,25 @@ if (existsSync(envPath)) { const PORT = parseInt(process.env.PORT || "8888"); const ELEVENLABS_API_KEY = process.env.ELEVENLABS_API_KEY; -if (!ELEVENLABS_API_KEY) { - console.error('⚠️ ELEVENLABS_API_KEY not found in ~/.env'); - console.error('Add: ELEVENLABS_API_KEY=your_key_here'); +// TTS engine selection: "elevenlabs" or "macos-say" +// Set via TTS_ENGINE env var, or settings.json voice.tts_engine +type TtsEngine = "elevenlabs" | "macos-say"; +let TTS_ENGINE: TtsEngine = (process.env.TTS_ENGINE as TtsEngine) || "elevenlabs"; + +// macOS say defaults +interface MacOsSayConfig { + voice: string; // macOS voice name (e.g., "Samantha", "Daniel", "Ava") + rate: number; // words per minute (default ~175-200) +} + +let macOsSayConfig: MacOsSayConfig = { + voice: "Samantha", + rate: 200, +}; + +if (TTS_ENGINE === "elevenlabs" && !ELEVENLABS_API_KEY) { + console.warn('⚠️ ELEVENLABS_API_KEY not found — falling back to macOS say'); + TTS_ENGINE = "macos-say"; } // ========================================================================== @@ -136,6 +152,8 @@ interface LoadedVoiceConfig { voices: Record; // keyed by name ("main", "algorithm") voicesByVoiceId: Record; // keyed by voiceId for lookup desktopNotifications: boolean; // whether to show macOS notification banners + ttsEngine?: TtsEngine; // override from settings.json + macOsSay?: Partial; // macOS say config from settings.json } // Last-resort defaults if settings.json is entirely missing or unparseable @@ -195,7 +213,11 @@ function loadVoiceConfig(): LoadedVoiceConfig { console.log(` ${name}: ${entry.voiceName || entry.voiceId} (speed: ${entry.speed}, stability: ${entry.stability})`); } - return { defaultVoiceId, voices, voicesByVoiceId, desktopNotifications }; + // TTS engine and macOS say config from settings.json + const ttsEngine = settings.voice?.tts_engine as TtsEngine | undefined; + const macOsSay = settings.voice?.macos_say as Partial | undefined; + + return { defaultVoiceId, voices, voicesByVoiceId, desktopNotifications, ttsEngine, macOsSay }; } catch (error) { console.error('⚠️ Failed to load settings.json voice config:', error); return { defaultVoiceId: '', voices: {}, voicesByVoiceId: {}, desktopNotifications: true }; @@ -204,6 +226,15 @@ function loadVoiceConfig(): LoadedVoiceConfig { // Load config at startup const voiceConfig = loadVoiceConfig(); + +// Apply settings.json TTS engine override (env var takes priority) +if (!process.env.TTS_ENGINE && voiceConfig.ttsEngine) { + TTS_ENGINE = voiceConfig.ttsEngine; +} +if (voiceConfig.macOsSay) { + if (voiceConfig.macOsSay.voice) macOsSayConfig.voice = voiceConfig.macOsSay.voice; + if (voiceConfig.macOsSay.rate) macOsSayConfig.rate = voiceConfig.macOsSay.rate; +} const DEFAULT_VOICE_ID = voiceConfig.defaultVoiceId || process.env.ELEVENLABS_VOICE_ID || "s3TPKV1kjDlVtZbl4Ksh"; // Look up a voice entry by voice ID @@ -394,6 +425,44 @@ async function playAudio(audioBuffer: ArrayBuffer, volume: number = FALLBACK_VOL }); } +// Speak text using macOS say command +async function speakWithSay(text: string, volume?: number): Promise { + // Apply pronunciation replacements before speaking + const pronouncedText = applyPronunciations(text); + if (pronouncedText !== text) { + console.log(`📖 Pronunciation: "${text}" → "${pronouncedText}"`); + } + + const args = ['-v', macOsSayConfig.voice, '-r', macOsSayConfig.rate.toString()]; + + // macOS say doesn't have a volume flag, so use afplay with a generated AIFF + if (volume !== undefined && volume !== 1.0) { + const tempFile = `/tmp/voice-${Date.now()}.aiff`; + args.push('-o', tempFile); + + await new Promise((resolve, reject) => { + const proc = spawn('/usr/bin/say', [...args, pronouncedText]); + proc.on('error', reject); + proc.on('exit', (code) => code === 0 ? resolve() : reject(new Error(`say exited with code ${code}`))); + }); + + await new Promise((resolve, reject) => { + const proc = spawn('/usr/bin/afplay', ['-v', volume.toString(), tempFile]); + proc.on('error', reject); + proc.on('exit', (code) => { + spawn('/bin/rm', [tempFile]); + code === 0 ? resolve() : reject(new Error(`afplay exited with code ${code}`)); + }); + }); + } else { + await new Promise((resolve, reject) => { + const proc = spawn('/usr/bin/say', [...args, pronouncedText]); + proc.on('error', reject); + proc.on('exit', (code) => code === 0 ? resolve() : reject(new Error(`say exited with code ${code}`))); + }); + } +} + // Spawn a process safely function spawnSafe(command: string, args: string[]): Promise { return new Promise((resolve, reject) => { @@ -414,6 +483,12 @@ function spawnSafe(command: string, args: string[]): Promise { }); } +// ========================================================================== +// Mute state — toggled via /mute and /unmute endpoints, no restart needed +// ========================================================================== + +let voiceMuted = false; + // ========================================================================== // Core: Send notification with 3-tier voice settings resolution // ========================================================================== @@ -454,58 +529,71 @@ async function sendNotification( const { cleaned, emotion } = extractEmotionalMarker(safeMessage); safeMessage = cleaned; - // Generate and play voice using ElevenLabs + // Generate and play voice let voicePlayed = false; let voiceError: string | undefined; - if (voiceEnabled && ELEVENLABS_API_KEY) { + if (voiceMuted) { + console.log(`🔇 Voice muted — skipping TTS`); + } else if (voiceEnabled) { try { - const voice = voiceId || DEFAULT_VOICE_ID; - - // 3-tier voice settings resolution - let resolvedSettings: ElevenLabsVoiceSettings; - let resolvedVolume: number; - - if (callerVoiceSettings && Object.keys(callerVoiceSettings).length > 0) { - // Tier 1: Caller provided explicit voice_settings → pass through - resolvedSettings = { - stability: callerVoiceSettings.stability ?? FALLBACK_VOICE_SETTINGS.stability, - similarity_boost: callerVoiceSettings.similarity_boost ?? FALLBACK_VOICE_SETTINGS.similarity_boost, - style: callerVoiceSettings.style ?? FALLBACK_VOICE_SETTINGS.style, - speed: callerVoiceSettings.speed ?? FALLBACK_VOICE_SETTINGS.speed, - use_speaker_boost: callerVoiceSettings.use_speaker_boost ?? FALLBACK_VOICE_SETTINGS.use_speaker_boost, - }; - resolvedVolume = callerVolume ?? FALLBACK_VOLUME; - console.log(`🔗 Voice settings: pass-through from caller`); - } else { - // Tier 2/3: Look up by voiceId, fall back to main - const voiceEntry = lookupVoiceByVoiceId(voice) || voiceConfig.voices.main; - if (voiceEntry) { - resolvedSettings = voiceEntryToSettings(voiceEntry); - resolvedVolume = callerVolume ?? voiceEntry.volume ?? FALLBACK_VOLUME; - console.log(`📋 Voice settings: from settings.json (${voiceEntry.voiceName || voice})`); - } else { - resolvedSettings = { ...FALLBACK_VOICE_SETTINGS }; + if (TTS_ENGINE === "macos-say") { + // macOS say — no API key needed + const resolvedVolume = callerVolume ?? FALLBACK_VOLUME; + console.log(`🎙️ Speaking with macOS say (voice: ${macOsSayConfig.voice}, rate: ${macOsSayConfig.rate}, volume: ${resolvedVolume})`); + await speakWithSay(safeMessage, resolvedVolume); + voicePlayed = true; + } else if (ELEVENLABS_API_KEY) { + // ElevenLabs TTS + const voice = voiceId || DEFAULT_VOICE_ID; + + // 3-tier voice settings resolution + let resolvedSettings: ElevenLabsVoiceSettings; + let resolvedVolume: number; + + if (callerVoiceSettings && Object.keys(callerVoiceSettings).length > 0) { + // Tier 1: Caller provided explicit voice_settings → pass through + resolvedSettings = { + stability: callerVoiceSettings.stability ?? FALLBACK_VOICE_SETTINGS.stability, + similarity_boost: callerVoiceSettings.similarity_boost ?? FALLBACK_VOICE_SETTINGS.similarity_boost, + style: callerVoiceSettings.style ?? FALLBACK_VOICE_SETTINGS.style, + speed: callerVoiceSettings.speed ?? FALLBACK_VOICE_SETTINGS.speed, + use_speaker_boost: callerVoiceSettings.use_speaker_boost ?? FALLBACK_VOICE_SETTINGS.use_speaker_boost, + }; resolvedVolume = callerVolume ?? FALLBACK_VOLUME; - console.log(`⚠️ Voice settings: fallback defaults (no config found for ${voice})`); + console.log(`🔗 Voice settings: pass-through from caller`); + } else { + // Tier 2/3: Look up by voiceId, fall back to main + const voiceEntry = lookupVoiceByVoiceId(voice) || voiceConfig.voices.main; + if (voiceEntry) { + resolvedSettings = voiceEntryToSettings(voiceEntry); + resolvedVolume = callerVolume ?? voiceEntry.volume ?? FALLBACK_VOLUME; + console.log(`📋 Voice settings: from settings.json (${voiceEntry.voiceName || voice})`); + } else { + resolvedSettings = { ...FALLBACK_VOICE_SETTINGS }; + resolvedVolume = callerVolume ?? FALLBACK_VOLUME; + console.log(`⚠️ Voice settings: fallback defaults (no config found for ${voice})`); + } } - } - // Emotional preset overlay — modifies stability + similarity_boost only - if (emotion && EMOTIONAL_PRESETS[emotion]) { - resolvedSettings = { - ...resolvedSettings, - stability: EMOTIONAL_PRESETS[emotion].stability, - similarity_boost: EMOTIONAL_PRESETS[emotion].similarity_boost, - }; - console.log(`🎭 Emotion overlay: ${emotion}`); - } + // Emotional preset overlay — modifies stability + similarity_boost only + if (emotion && EMOTIONAL_PRESETS[emotion]) { + resolvedSettings = { + ...resolvedSettings, + stability: EMOTIONAL_PRESETS[emotion].stability, + similarity_boost: EMOTIONAL_PRESETS[emotion].similarity_boost, + }; + console.log(`🎭 Emotion overlay: ${emotion}`); + } - console.log(`🎙️ Generating speech (voice: ${voice}, speed: ${resolvedSettings.speed}, stability: ${resolvedSettings.stability}, boost: ${resolvedSettings.similarity_boost}, style: ${resolvedSettings.style}, volume: ${resolvedVolume})`); + console.log(`🎙️ Generating speech (voice: ${voice}, speed: ${resolvedSettings.speed}, stability: ${resolvedSettings.stability}, boost: ${resolvedSettings.similarity_boost}, style: ${resolvedSettings.style}, volume: ${resolvedVolume})`); - const audioBuffer = await generateSpeech(safeMessage, voice, resolvedSettings); - await playAudio(audioBuffer, resolvedVolume); - voicePlayed = true; + const audioBuffer = await generateSpeech(safeMessage, voice, resolvedSettings); + await playAudio(audioBuffer, resolvedVolume); + voicePlayed = true; + } else { + voiceError = "No TTS engine available (no API key and not using macos-say)"; + } } catch (error: any) { console.error("Failed to generate/play speech:", error); voiceError = error.message || "TTS generation failed"; @@ -683,14 +771,35 @@ const server = serve({ } } + if (url.pathname === "/mute" && req.method === "POST") { + voiceMuted = true; + console.log(`🔇 Voice muted`); + return new Response( + JSON.stringify({ status: "success", muted: true }), + { headers: { ...corsHeaders, "Content-Type": "application/json" }, status: 200 } + ); + } + + if (url.pathname === "/unmute" && req.method === "POST") { + voiceMuted = false; + console.log(`🔊 Voice unmuted`); + return new Response( + JSON.stringify({ status: "success", muted: false }), + { headers: { ...corsHeaders, "Content-Type": "application/json" }, status: 200 } + ); + } + if (url.pathname === "/health") { return new Response( JSON.stringify({ status: "healthy", port: PORT, - voice_system: "ElevenLabs", - default_voice_id: DEFAULT_VOICE_ID, - api_key_configured: !!ELEVENLABS_API_KEY, + muted: voiceMuted, + tts_engine: TTS_ENGINE, + voice_system: TTS_ENGINE === "macos-say" ? "macOS say" : "ElevenLabs", + ...(TTS_ENGINE === "macos-say" + ? { macos_voice: macOsSayConfig.voice, macos_rate: macOsSayConfig.rate } + : { default_voice_id: DEFAULT_VOICE_ID, api_key_configured: !!ELEVENLABS_API_KEY }), pronunciation_rules: pronunciationRules.length, configured_voices: Object.keys(voiceConfig.voices), }), @@ -709,8 +818,12 @@ const server = serve({ }); console.log(`🚀 Voice Server running on port ${PORT}`); -console.log(`🎙️ Using ElevenLabs TTS (default voice: ${DEFAULT_VOICE_ID})`); +if (TTS_ENGINE === "macos-say") { + console.log(`🎙️ Using macOS say (voice: ${macOsSayConfig.voice}, rate: ${macOsSayConfig.rate} wpm)`); +} else { + console.log(`🎙️ Using ElevenLabs TTS (default voice: ${DEFAULT_VOICE_ID})`); + console.log(`🔑 API Key: ${ELEVENLABS_API_KEY ? '✅ Configured' : '❌ Missing'}`); +} console.log(`📡 POST to http://localhost:${PORT}/notify`); console.log(`🔒 Security: CORS restricted to localhost, rate limiting enabled`); -console.log(`🔑 API Key: ${ELEVENLABS_API_KEY ? '✅ Configured' : '❌ Missing'}`); console.log(`📖 Pronunciations: ${pronunciationRules.length} rules loaded`); diff --git a/Releases/v4.0.3/.claude/VoiceServer/uninstall.sh b/Releases/v4.0.3/.claude/VoiceServer/uninstall.sh index 1f4cc8dcb..fc53ea008 100755 --- a/Releases/v4.0.3/.claude/VoiceServer/uninstall.sh +++ b/Releases/v4.0.3/.claude/VoiceServer/uninstall.sh @@ -58,6 +58,12 @@ if lsof -i :8888 > /dev/null 2>&1; then echo -e "${GREEN}OK Port 8888 cleared${NC}" fi +# Remove voice CLI symlink +if [ -L /usr/local/bin/voice ]; then + rm /usr/local/bin/voice 2>/dev/null || sudo rm /usr/local/bin/voice 2>/dev/null + echo -e "${GREEN}OK Removed voice CLI from /usr/local/bin${NC}" +fi + # Ask about logs echo read -p "Do you want to remove log files? (y/n): " -n 1 -r diff --git a/Releases/v4.0.3/.claude/VoiceServer/voice b/Releases/v4.0.3/.claude/VoiceServer/voice new file mode 100755 index 000000000..8ac409151 --- /dev/null +++ b/Releases/v4.0.3/.claude/VoiceServer/voice @@ -0,0 +1,269 @@ +#!/bin/bash +# voice — Switch PAI voice server between ElevenLabs and macOS say +# +# Usage: +# voice say Switch to macOS say +# voice say Ava Switch to macOS say with voice "Ava" +# voice say Ava 180 Switch to macOS say with voice "Ava" at 180 wpm +# voice 11labs Switch to ElevenLabs +# voice status Show current engine and server health +# voice voices List available macOS say voices +# voice restart Restart the voice server +# voice test [message] Send a test notification +# voice log Tail the voice server log + +set -euo pipefail + +SETTINGS="$HOME/.claude/settings.json" +VOICE_SERVER_DIR="$HOME/.claude/VoiceServer" +LOG="$HOME/Library/Logs/pai-voice-server.log" + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# ── Helpers ────────────────────────────────────────────────────── + +ensure_jq() { + if ! command -v jq &>/dev/null; then + echo -e "${RED}jq is required. Install: brew install jq${NC}" + exit 1 + fi +} + +get_engine() { + ensure_jq + if [ -f "$SETTINGS" ]; then + jq -r '.voice.tts_engine // "elevenlabs"' "$SETTINGS" + else + echo "elevenlabs" + fi +} + +get_say_voice() { + ensure_jq + if [ -f "$SETTINGS" ]; then + jq -r '.voice.macos_say.voice // "Samantha"' "$SETTINGS" + else + echo "Samantha" + fi +} + +get_say_rate() { + ensure_jq + if [ -f "$SETTINGS" ]; then + jq -r '.voice.macos_say.rate // 200' "$SETTINGS" + else + echo "200" + fi +} + +set_engine() { + ensure_jq + local engine="$1" + local tmp + tmp=$(mktemp) + + if [ ! -f "$SETTINGS" ]; then + echo '{}' > "$SETTINGS" + fi + + jq --arg engine "$engine" ' + .voice.tts_engine = $engine + ' "$SETTINGS" > "$tmp" && mv "$tmp" "$SETTINGS" +} + +set_say_config() { + ensure_jq + local voice="$1" + local rate="$2" + local tmp + tmp=$(mktemp) + + jq --arg voice "$voice" --argjson rate "$rate" ' + .voice.macos_say.voice = $voice | + .voice.macos_say.rate = $rate + ' "$SETTINGS" > "$tmp" && mv "$tmp" "$SETTINGS" +} + +restart_server() { + echo -e "${YELLOW}Restarting voice server...${NC}" + if [ -x "$VOICE_SERVER_DIR/restart.sh" ]; then + "$VOICE_SERVER_DIR/restart.sh" + else + # Manual restart + lsof -ti :8888 | xargs kill -9 2>/dev/null || true + sleep 1 + launchctl unload "$HOME/Library/LaunchAgents/com.pai.voice-server.plist" 2>/dev/null || true + launchctl load "$HOME/Library/LaunchAgents/com.pai.voice-server.plist" 2>/dev/null + sleep 2 + fi +} + +health_check() { + curl -s http://localhost:8888/health 2>/dev/null +} + +# ── Commands ───────────────────────────────────────────────────── + +cmd_say() { + local voice="${1:-$(get_say_voice)}" + local rate="${2:-$(get_say_rate)}" + + # Validate voice exists + if ! say -v '?' | grep -qi "^${voice} "; then + echo -e "${RED}Voice '${voice}' not found. Run 'voice voices' to list available voices.${NC}" + exit 1 + fi + + set_engine "macos-say" + set_say_config "$voice" "$rate" + echo -e "${GREEN}Switched to macOS say${NC}" + echo -e " Voice: ${BLUE}${voice}${NC}" + echo -e " Rate: ${BLUE}${rate} wpm${NC}" + restart_server + echo + echo -e "${GREEN}Done.${NC} Test with: ${BLUE}voice test${NC}" +} + +cmd_11labs() { + set_engine "elevenlabs" + echo -e "${GREEN}Switched to ElevenLabs${NC}" + restart_server + echo + echo -e "${GREEN}Done.${NC} Test with: ${BLUE}voice test${NC}" +} + +cmd_status() { + local engine + engine=$(get_engine) + + echo -e "${BLUE}PAI Voice Server Status${NC}" + echo -e " Engine: ${GREEN}${engine}${NC}" + + if [ "$engine" = "macos-say" ]; then + echo -e " Voice: $(get_say_voice)" + echo -e " Rate: $(get_say_rate) wpm" + fi + + echo + + local health + health=$(health_check) + if [ -n "$health" ]; then + echo -e " Server: ${GREEN}running${NC}" + local muted + muted=$(echo "$health" | jq -r '.muted // false' 2>/dev/null) + if [ "$muted" = "true" ]; then + echo -e " Muted: ${YELLOW}yes${NC}" + fi + echo "$health" | jq . 2>/dev/null || echo " $health" + else + echo -e " Server: ${RED}not responding${NC}" + fi +} + +cmd_voices() { + echo -e "${BLUE}Available macOS voices:${NC}" + echo -e "${YELLOW}(Voices marked with (premium) sound better but may need downloading in System Settings > Accessibility > Spoken Content)${NC}" + echo + say -v '?' | while IFS= read -r line; do + name=$(echo "$line" | awk '{print $1}') + lang=$(echo "$line" | awk '{print $2}') + echo -e " ${GREEN}${name}${NC} ${lang}" + done + echo + echo -e "Preview a voice: ${BLUE}say -v Ava 'Hello, I am Ava'${NC}" + echo -e "Use a voice: ${BLUE}voice say Ava${NC}" +} + +cmd_test() { + local msg="${1:-Hello from the PAI voice server}" + echo -e "${YELLOW}Sending test: ${NC}\"${msg}\"" + local result + result=$(curl -s -X POST http://localhost:8888/notify \ + -H "Content-Type: application/json" \ + -d "{\"message\": \"${msg}\"}" 2>&1) + + if echo "$result" | jq -e '.status == "success"' &>/dev/null; then + echo -e "${GREEN}OK${NC}" + else + echo -e "${RED}Failed:${NC} $result" + fi +} + +cmd_mute() { + local result + result=$(curl -s -X POST http://localhost:8888/mute 2>&1) + if echo "$result" | jq -e '.muted == true' &>/dev/null; then + echo -e "${YELLOW}🔇 Voice muted${NC}" + else + echo -e "${RED}Failed to mute:${NC} $result" + fi +} + +cmd_unmute() { + local result + result=$(curl -s -X POST http://localhost:8888/unmute 2>&1) + if echo "$result" | jq -e '.muted == false' &>/dev/null; then + echo -e "${GREEN}🔊 Voice unmuted${NC}" + else + echo -e "${RED}Failed to unmute:${NC} $result" + fi +} + +cmd_restart() { + restart_server +} + +cmd_log() { + if [ -f "$LOG" ]; then + tail -f "$LOG" + else + echo -e "${RED}No log file at ${LOG}${NC}" + fi +} + +cmd_help() { + echo "voice — PAI voice server manager" + echo + echo "Usage:" + echo " voice say [voice] [rate] Switch to macOS say (default: Samantha, 200 wpm)" + echo " voice 11labs Switch to ElevenLabs" + echo " voice mute Mute voice (server stays running)" + echo " voice unmute Unmute voice" + echo " voice status Show current engine and server health" + echo " voice voices List available macOS say voices" + echo " voice restart Restart the voice server" + echo " voice test [message] Send a test notification" + echo " voice log Tail the voice server log" + echo + echo "Examples:" + echo " voice say Use macOS say with current/default settings" + echo " voice say Ava Use macOS say with Ava voice" + echo " voice say Daniel 180 Use Daniel voice at 180 wpm" + echo " voice 11labs Switch back to ElevenLabs" + echo " voice mute Silence without stopping server" +} + +# ── Main ───────────────────────────────────────────────────────── + +case "${1:-help}" in + say) shift; cmd_say "$@" ;; + 11labs|elevenlabs) cmd_11labs ;; + mute) cmd_mute ;; + unmute) cmd_unmute ;; + status) cmd_status ;; + voices) cmd_voices ;; + restart) cmd_restart ;; + test) shift; cmd_test "$*" ;; + log) cmd_log ;; + help|-h|--help) cmd_help ;; + *) + echo -e "${RED}Unknown command: $1${NC}" + cmd_help + exit 1 + ;; +esac