wellnua-lite/utils/audioSession.ts
Sergei 059bc29b6b WIP: LiveKit voice call integration with Julia AI agent
NOT TESTED ON REAL DEVICE - simulator only verification

Components:
- LiveKit Cloud agent deployment (julia-agent/julia-ai/)
- React Native LiveKit client (hooks/useLiveKitRoom.ts)
- Voice call screen with audio session management
- WellNuo voice_ask API integration in Python agent

Tech stack:
- LiveKit Cloud for agent hosting
- @livekit/react-native SDK
- Deepgram STT/TTS (via LiveKit Cloud)
- Silero VAD for voice activity detection

Known issues:
- Microphone permissions may need manual testing on real device
- LiveKit audio playback not verified on physical hardware
- Agent greeting audio not confirmed working end-to-end

Next steps:
- Test on physical iOS device
- Verify microphone capture works
- Confirm TTS audio playback
- Test full conversation loop
2026-01-18 20:16:25 -08:00

148 lines
4.0 KiB
TypeScript

/**
* iOS AudioSession Configuration Helpers
*
* CRITICAL: This must be configured BEFORE connecting to LiveKit room!
* Without proper AudioSession setup, microphone won't work on iOS.
*/
import { Platform } from 'react-native';
// AudioSession module - use 'any' to avoid complex typing issues with @livekit/react-native
// The actual AudioSession from LiveKit has specific enum types that are hard to match statically
let audioSessionModule: any = null;
/**
* Import AudioSession module lazily
* This is needed because @livekit/react-native must be imported after registerGlobals()
*/
async function getAudioSession(): Promise<any | null> {
if (Platform.OS !== 'ios') return null;
if (!audioSessionModule) {
const livekit = await import('@livekit/react-native');
audioSessionModule = livekit.AudioSession;
}
return audioSessionModule;
}
/**
* Configure iOS AudioSession for bidirectional voice call
*
* MUST be called BEFORE connecting to LiveKit room!
*
* Configuration:
* - Category: playAndRecord (both speaker and mic)
* - Mode: voiceChat (optimized for voice calls)
* - Options: Bluetooth, speaker, mix with others
*/
export async function configureAudioForVoiceCall(): Promise<void> {
if (Platform.OS !== 'ios') {
console.log('[AudioSession] Skipping on non-iOS platform');
return;
}
console.log('[AudioSession] Configuring for voice call...');
try {
const AudioSession = await getAudioSession();
if (!AudioSession) {
console.error('[AudioSession] Failed to get AudioSession module');
return;
}
// Step 1: Set Apple-specific audio configuration
console.log('[AudioSession] Step 1: Setting Apple audio config...');
await AudioSession.setAppleAudioConfiguration({
audioCategory: 'playAndRecord',
audioCategoryOptions: [
'allowBluetooth',
'allowBluetoothA2DP',
'defaultToSpeaker',
'mixWithOthers',
],
audioMode: 'voiceChat',
});
// Step 2: Configure default output to speaker
console.log('[AudioSession] Step 2: Setting default output...');
await AudioSession.configureAudio({
ios: {
defaultOutput: 'speaker',
},
});
// Step 3: Start the audio session
console.log('[AudioSession] Step 3: Starting audio session...');
await AudioSession.startAudioSession();
console.log('[AudioSession] Configuration complete!');
} catch (error) {
console.error('[AudioSession] Configuration error:', error);
throw error;
}
}
/**
* Stop iOS AudioSession
*
* Should be called when disconnecting from voice call
*/
export async function stopAudioSession(): Promise<void> {
if (Platform.OS !== 'ios') {
return;
}
console.log('[AudioSession] Stopping audio session...');
try {
const AudioSession = await getAudioSession();
if (!AudioSession) {
return;
}
await AudioSession.stopAudioSession();
console.log('[AudioSession] Stopped');
} catch (error) {
console.error('[AudioSession] Error stopping:', error);
// Don't throw - cleanup errors are not critical
}
}
/**
* Reconfigure audio session after remote track arrives
*
* Sometimes iOS needs a kick to properly route audio after remote participant joins
*/
export async function reconfigureAudioForPlayback(): Promise<void> {
if (Platform.OS !== 'ios') {
return;
}
console.log('[AudioSession] Reconfiguring for playback...');
try {
const AudioSession = await getAudioSession();
if (!AudioSession) {
return;
}
// Just reconfigure the same settings - this "refreshes" the audio routing
await AudioSession.setAppleAudioConfiguration({
audioCategory: 'playAndRecord',
audioCategoryOptions: [
'allowBluetooth',
'allowBluetoothA2DP',
'defaultToSpeaker',
'mixWithOthers',
],
audioMode: 'voiceChat',
});
console.log('[AudioSession] Reconfigured successfully');
} catch (error) {
console.error('[AudioSession] Reconfigure error:', error);
// Don't throw - this is a best-effort operation
}
}