Compare commits
10 Commits
6a6c85f7c3
...
4b97689dd3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b97689dd3 | ||
|
|
0d872a09b7 | ||
|
|
e3192ead12 | ||
|
|
57577b42c9 | ||
|
|
bbc59e61ce | ||
|
|
122f521af6 | ||
|
|
9b152bdf9d | ||
|
|
173c0a8262 | ||
|
|
bc33230739 | ||
|
|
cd9dddda34 |
@ -46,11 +46,14 @@ export default function TabLayout() {
|
||||
href: null,
|
||||
}}
|
||||
/>
|
||||
{/* Chat hidden for now - testing via debug */}
|
||||
{/* Chat with Julia AI */}
|
||||
<Tabs.Screen
|
||||
name="chat"
|
||||
options={{
|
||||
href: null,
|
||||
title: 'Julia',
|
||||
tabBarIcon: ({ color, size }) => (
|
||||
<Feather name="message-circle" size={22} color={color} />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Voice tab hidden - using Debug for testing */}
|
||||
@ -69,14 +72,11 @@ export default function TabLayout() {
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Debug tab for testing */}
|
||||
{/* Debug tab - hidden in production */}
|
||||
<Tabs.Screen
|
||||
name="debug"
|
||||
options={{
|
||||
title: 'Debug',
|
||||
tabBarIcon: ({ color, size }) => (
|
||||
<Feather name="code" size={22} color={color} />
|
||||
),
|
||||
href: null,
|
||||
}}
|
||||
/>
|
||||
{/* Hide explore tab */}
|
||||
|
||||
@ -21,7 +21,6 @@ import {
|
||||
} from 'react-native';
|
||||
import { Ionicons } from '@expo/vector-icons';
|
||||
import { SafeAreaView } from 'react-native-safe-area-context';
|
||||
import * as SecureStore from 'expo-secure-store';
|
||||
import { useRouter } from 'expo-router';
|
||||
import { api } from '@/services/api';
|
||||
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
||||
@ -31,6 +30,73 @@ import type { Message, Beneficiary } from '@/types';
|
||||
|
||||
const API_URL = 'https://eluxnetworks.net/function/well-api/api';
|
||||
|
||||
// WellNuo API credentials (same as julia-agent)
|
||||
const WELLNUO_USER = 'anandk';
|
||||
const WELLNUO_PASSWORD = 'anandk_8';
|
||||
|
||||
// Keywords for question normalization (same as julia-agent/julia-ai/src/agent.py)
|
||||
const STATUS_KEYWORDS = [
|
||||
/\bhow\s+is\b/i,
|
||||
/\bhow'?s\b/i,
|
||||
/\bhow\s+are\b/i,
|
||||
/\btell\s+me\s+about\b/i,
|
||||
/\bwhat'?s\s+up\s+with\b/i,
|
||||
/\bupdate\s+on\b/i,
|
||||
/\bstatus\b/i,
|
||||
/\bdoing\b/i,
|
||||
/\bfeeling\b/i,
|
||||
/\bcheck\s+on\b/i,
|
||||
/\bis\s+\w+\s+okay\b/i,
|
||||
/\bis\s+\w+\s+alright\b/i,
|
||||
/\bis\s+\w+\s+fine\b/i,
|
||||
/\bokay\?\b/i,
|
||||
/\balright\?\b/i,
|
||||
];
|
||||
|
||||
const SUBJECT_KEYWORDS = [
|
||||
/\bdad\b/i,
|
||||
/\bfather\b/i,
|
||||
/\bferdinand\b/i,
|
||||
/\bhim\b/i,
|
||||
/\bhe\b/i,
|
||||
/\bmy\s+dad\b/i,
|
||||
/\bmy\s+father\b/i,
|
||||
/\bthe\s+patient\b/i,
|
||||
/\bloved\s+one\b/i,
|
||||
/\bparent\b/i,
|
||||
/\bgrandpa\b/i,
|
||||
/\bgrandfather\b/i,
|
||||
];
|
||||
|
||||
/**
|
||||
* Transform user questions into format WellNuo API understands.
|
||||
* WellNuo API only responds with real sensor data for very specific phrases.
|
||||
* This function maps common user questions to those phrases.
|
||||
* (Same logic as julia-agent/julia-ai/src/agent.py normalize_question)
|
||||
*/
|
||||
function normalizeQuestion(userMessage: string): string {
|
||||
const msgLower = userMessage.toLowerCase().trim();
|
||||
|
||||
const isStatusQuery = STATUS_KEYWORDS.some(pattern => pattern.test(msgLower));
|
||||
const isAboutRecipient = SUBJECT_KEYWORDS.some(pattern => pattern.test(msgLower));
|
||||
|
||||
// If asking about the care recipient's general status
|
||||
if (isStatusQuery && isAboutRecipient) {
|
||||
console.log(`[Chat] Normalized '${userMessage}' -> 'how is dad doing'`);
|
||||
return 'how is dad doing';
|
||||
}
|
||||
|
||||
// Generic status questions without clear subject - assume they mean the care recipient
|
||||
if (isStatusQuery && !isAboutRecipient) {
|
||||
console.log(`[Chat] Normalized '${userMessage}' -> 'how is dad doing' (assumed recipient)`);
|
||||
return 'how is dad doing';
|
||||
}
|
||||
|
||||
// If no transformation needed, return original
|
||||
console.log(`[Chat] No normalization applied to: '${userMessage}'`);
|
||||
return userMessage;
|
||||
}
|
||||
|
||||
export default function ChatScreen() {
|
||||
const router = useRouter();
|
||||
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
||||
@ -110,6 +176,19 @@ export default function ChatScreen() {
|
||||
autoSelect();
|
||||
}, []);
|
||||
|
||||
// Scroll to end when keyboard shows
|
||||
useEffect(() => {
|
||||
const keyboardShowListener = Keyboard.addListener(
|
||||
Platform.OS === 'ios' ? 'keyboardWillShow' : 'keyboardDidShow',
|
||||
() => {
|
||||
setTimeout(() => {
|
||||
flatListRef.current?.scrollToEnd({ animated: true });
|
||||
}, 100);
|
||||
}
|
||||
);
|
||||
return () => keyboardShowListener.remove();
|
||||
}, []);
|
||||
|
||||
const openBeneficiaryPicker = useCallback(() => {
|
||||
setShowBeneficiaryPicker(true);
|
||||
loadBeneficiaries();
|
||||
@ -125,7 +204,38 @@ export default function ChatScreen() {
|
||||
router.push('/voice-call');
|
||||
}, [router]);
|
||||
|
||||
// Text chat - send message via API
|
||||
// Cached API token for WellNuo
|
||||
const apiTokenRef = useRef<string | null>(null);
|
||||
|
||||
// Get WellNuo API token (same credentials as julia-agent)
|
||||
const getWellNuoToken = useCallback(async (): Promise<string> => {
|
||||
if (apiTokenRef.current) {
|
||||
return apiTokenRef.current;
|
||||
}
|
||||
|
||||
const nonce = Math.floor(Math.random() * 1000000).toString();
|
||||
const response = await fetch(API_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: new URLSearchParams({
|
||||
function: 'credentials',
|
||||
clientId: 'MA_001',
|
||||
user_name: WELLNUO_USER,
|
||||
ps: WELLNUO_PASSWORD,
|
||||
nonce: nonce,
|
||||
}).toString(),
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
if (data.status === '200 OK' && data.access_token) {
|
||||
apiTokenRef.current = data.access_token;
|
||||
console.log('[Chat] WellNuo token obtained');
|
||||
return data.access_token;
|
||||
}
|
||||
throw new Error('Failed to authenticate with WellNuo API');
|
||||
}, []);
|
||||
|
||||
// Text chat - send message via API (same as julia-agent)
|
||||
const sendTextMessage = useCallback(async () => {
|
||||
const trimmedInput = input.trim();
|
||||
if (!trimmedInput || isSending) return;
|
||||
@ -143,38 +253,25 @@ export default function ChatScreen() {
|
||||
Keyboard.dismiss();
|
||||
|
||||
try {
|
||||
const token = await SecureStore.getItemAsync('accessToken');
|
||||
const userName = await SecureStore.getItemAsync('userName');
|
||||
// Get WellNuo API token (uses anandk credentials like julia-agent)
|
||||
const token = await getWellNuoToken();
|
||||
|
||||
if (!token || !userName) {
|
||||
throw new Error('Please log in');
|
||||
}
|
||||
// Normalize question to format WellNuo API understands
|
||||
// (same logic as julia-agent/julia-ai/src/agent.py)
|
||||
const normalizedQuestion = normalizeQuestion(trimmedInput);
|
||||
|
||||
// Get beneficiary context
|
||||
let beneficiary = currentBeneficiary;
|
||||
if (!beneficiary?.id) {
|
||||
const loaded = await loadBeneficiaries();
|
||||
if (loaded.length > 0) {
|
||||
beneficiary = loaded[0];
|
||||
setCurrentBeneficiary(beneficiary);
|
||||
}
|
||||
}
|
||||
|
||||
const beneficiaryName = beneficiary?.name || 'the patient';
|
||||
const deploymentId = beneficiary?.id?.toString() || '';
|
||||
|
||||
// Call API
|
||||
// Call API with EXACT same params as voice agent
|
||||
// Using ask_wellnuo_ai instead of voice_ask (same params, same response format)
|
||||
const response = await fetch(API_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: new URLSearchParams({
|
||||
function: 'voice_ask',
|
||||
clientId: '001',
|
||||
user_name: userName,
|
||||
function: 'ask_wellnuo_ai',
|
||||
clientId: 'MA_001',
|
||||
user_name: WELLNUO_USER,
|
||||
token: token,
|
||||
question: `You are Julia, a caring assistant helping monitor ${beneficiaryName}'s wellbeing. Answer: ${trimmedInput}`,
|
||||
deployment_id: deploymentId,
|
||||
context: '',
|
||||
question: normalizedQuestion,
|
||||
deployment_id: '21',
|
||||
}).toString(),
|
||||
});
|
||||
|
||||
@ -189,7 +286,12 @@ export default function ChatScreen() {
|
||||
};
|
||||
setMessages(prev => [...prev, assistantMessage]);
|
||||
} else {
|
||||
throw new Error(data.status === '401 Unauthorized' ? 'Session expired' : 'Could not get response');
|
||||
// Token might be expired, clear and retry once
|
||||
if (data.status === '401 Unauthorized') {
|
||||
apiTokenRef.current = null;
|
||||
throw new Error('Session expired, please try again');
|
||||
}
|
||||
throw new Error('Could not get response');
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage: Message = {
|
||||
@ -202,7 +304,7 @@ export default function ChatScreen() {
|
||||
} finally {
|
||||
setIsSending(false);
|
||||
}
|
||||
}, [input, isSending, currentBeneficiary, loadBeneficiaries, setCurrentBeneficiary]);
|
||||
}, [input, isSending, getWellNuoToken]);
|
||||
|
||||
// Render message bubble
|
||||
const renderMessage = ({ item }: { item: Message }) => {
|
||||
@ -249,7 +351,7 @@ export default function ChatScreen() {
|
||||
};
|
||||
|
||||
return (
|
||||
<SafeAreaView style={styles.container} edges={['top']}>
|
||||
<SafeAreaView style={styles.container} edges={['top', 'bottom']}>
|
||||
{/* Header */}
|
||||
<View style={styles.header}>
|
||||
<TouchableOpacity style={styles.backButton} onPress={() => router.push('/(tabs)')}>
|
||||
@ -333,8 +435,8 @@ export default function ChatScreen() {
|
||||
{/* Messages */}
|
||||
<KeyboardAvoidingView
|
||||
style={styles.chatContainer}
|
||||
behavior={Platform.OS === 'ios' ? 'padding' : undefined}
|
||||
keyboardVerticalOffset={Platform.OS === 'ios' ? 90 : 0}
|
||||
behavior={Platform.OS === 'ios' ? 'padding' : 'height'}
|
||||
keyboardVerticalOffset={Platform.OS === 'ios' ? 0 : 0}
|
||||
>
|
||||
<FlatList
|
||||
ref={flatListRef}
|
||||
|
||||
@ -3,8 +3,10 @@
|
||||
*
|
||||
* All-in-one screen for testing Julia AI voice:
|
||||
* - Start/End call buttons
|
||||
* - Speaker/Earpiece toggle with logging
|
||||
* - Real-time logs of all LiveKit events
|
||||
* - Copy logs button
|
||||
* - Works on both iOS and Android
|
||||
*/
|
||||
|
||||
import React, { useState, useEffect, useRef, useCallback } from 'react';
|
||||
@ -26,6 +28,17 @@ import { activateKeepAwakeAsync, deactivateKeepAwake } from 'expo-keep-awake';
|
||||
import type { Room as RoomType } from 'livekit-client';
|
||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||
import { getToken, VOICE_NAME } from '@/services/livekitService';
|
||||
import {
|
||||
configureAudioForVoiceCall,
|
||||
stopAudioSession,
|
||||
setAudioOutput,
|
||||
} from '@/utils/audioSession';
|
||||
import {
|
||||
startVoiceCallService,
|
||||
stopVoiceCallService,
|
||||
checkAndPromptBatteryOptimization,
|
||||
requestNotificationPermission,
|
||||
} from '@/utils/androidVoiceService';
|
||||
import Constants from 'expo-constants';
|
||||
|
||||
const APP_VERSION = Constants.expoConfig?.version ?? '?.?.?';
|
||||
@ -43,6 +56,7 @@ export default function DebugScreen() {
|
||||
const [logs, setLogs] = useState<LogEntry[]>([]);
|
||||
const [callState, setCallState] = useState<CallState>('idle');
|
||||
const [callDuration, setCallDuration] = useState(0);
|
||||
const [isSpeakerOn, setIsSpeakerOn] = useState(true); // Default to speaker
|
||||
const flatListRef = useRef<FlatList>(null);
|
||||
const roomRef = useRef<RoomType | null>(null);
|
||||
const callStartTimeRef = useRef<number | null>(null);
|
||||
@ -113,6 +127,20 @@ export default function DebugScreen() {
|
||||
return () => subscription.remove();
|
||||
}, [log]);
|
||||
|
||||
// Toggle speaker
|
||||
const toggleSpeaker = useCallback(async () => {
|
||||
const newState = !isSpeakerOn;
|
||||
log(`=== TOGGLING SPEAKER: ${isSpeakerOn ? 'ON' : 'OFF'} → ${newState ? 'ON' : 'OFF'} ===`, 'info');
|
||||
|
||||
try {
|
||||
await setAudioOutput(newState);
|
||||
setIsSpeakerOn(newState);
|
||||
log(`Speaker toggled to ${newState ? 'ON (loud speaker)' : 'OFF (earpiece)'}`, 'success');
|
||||
} catch (err: any) {
|
||||
log(`Speaker toggle error: ${err?.message || err}`, 'error');
|
||||
}
|
||||
}, [isSpeakerOn, log]);
|
||||
|
||||
// Start call
|
||||
const startCall = useCallback(async () => {
|
||||
if (callState !== 'idle') return;
|
||||
@ -120,10 +148,28 @@ export default function DebugScreen() {
|
||||
clearLogs();
|
||||
setCallState('connecting');
|
||||
setCallDuration(0);
|
||||
setIsSpeakerOn(true); // Reset speaker state
|
||||
callStartTimeRef.current = null;
|
||||
|
||||
try {
|
||||
log('=== STARTING VOICE CALL ===', 'info');
|
||||
log(`Platform: ${Platform.OS} ${Platform.Version}`, 'info');
|
||||
|
||||
// Android: Request notification permission and check battery optimization
|
||||
if (Platform.OS === 'android') {
|
||||
log('Android: Requesting notification permission...', 'info');
|
||||
const notifPermission = await requestNotificationPermission();
|
||||
log(`Notification permission: ${notifPermission ? 'granted' : 'denied'}`, notifPermission ? 'success' : 'info');
|
||||
|
||||
log('Android: Checking battery optimization...', 'info');
|
||||
const canProceed = await checkAndPromptBatteryOptimization();
|
||||
if (!canProceed) {
|
||||
log('User went to battery settings - call postponed', 'info');
|
||||
setCallState('idle');
|
||||
return;
|
||||
}
|
||||
log('Battery optimization check passed', 'success');
|
||||
}
|
||||
|
||||
// Keep screen awake
|
||||
await activateKeepAwakeAsync('voiceCall').catch(() => {});
|
||||
@ -131,7 +177,7 @@ export default function DebugScreen() {
|
||||
|
||||
// Step 1: Register WebRTC globals
|
||||
log('Step 1: Importing @livekit/react-native...', 'info');
|
||||
const { registerGlobals, AudioSession } = await import('@livekit/react-native');
|
||||
const { registerGlobals } = await import('@livekit/react-native');
|
||||
|
||||
if (typeof global.RTCPeerConnection === 'undefined') {
|
||||
log('Registering WebRTC globals...', 'info');
|
||||
@ -146,11 +192,14 @@ export default function DebugScreen() {
|
||||
const { Room, RoomEvent, ConnectionState, Track } = await import('livekit-client');
|
||||
log('livekit-client imported', 'success');
|
||||
|
||||
// Step 3: Start iOS AudioSession
|
||||
if (Platform.OS === 'ios') {
|
||||
log('Step 3: Starting iOS AudioSession...', 'info');
|
||||
await AudioSession.startAudioSession();
|
||||
log('iOS AudioSession started', 'success');
|
||||
// Step 3: Configure AudioSession (iOS + Android)
|
||||
log(`Step 3: Configuring AudioSession for ${Platform.OS}...`, 'info');
|
||||
try {
|
||||
await configureAudioForVoiceCall();
|
||||
log(`AudioSession configured for ${Platform.OS}`, 'success');
|
||||
} catch (audioErr: any) {
|
||||
log(`AudioSession config error: ${audioErr?.message || audioErr}`, 'error');
|
||||
// Continue anyway - might still work
|
||||
}
|
||||
|
||||
// Step 4: Get token from server
|
||||
@ -300,6 +349,19 @@ export default function DebugScreen() {
|
||||
});
|
||||
|
||||
log(`Local participant: ${newRoom.localParticipant.identity}`, 'info');
|
||||
|
||||
// Android: Start foreground service to keep call alive in background
|
||||
if (Platform.OS === 'android') {
|
||||
log('Android: Starting foreground service...', 'info');
|
||||
try {
|
||||
await startVoiceCallService();
|
||||
log('Foreground service started - call will continue in background', 'success');
|
||||
} catch (fgErr: any) {
|
||||
log(`Foreground service error: ${fgErr?.message || fgErr}`, 'error');
|
||||
// Continue anyway - call will still work, just may be killed in background
|
||||
}
|
||||
}
|
||||
|
||||
log('=== CALL ACTIVE ===', 'success');
|
||||
|
||||
} catch (err: any) {
|
||||
@ -325,11 +387,24 @@ export default function DebugScreen() {
|
||||
log('Disconnected from room', 'success');
|
||||
}
|
||||
|
||||
if (Platform.OS === 'ios') {
|
||||
log('Stopping iOS AudioSession...', 'info');
|
||||
const { AudioSession } = await import('@livekit/react-native');
|
||||
await AudioSession.stopAudioSession();
|
||||
log('iOS AudioSession stopped', 'success');
|
||||
// Android: Stop foreground service
|
||||
if (Platform.OS === 'android') {
|
||||
log('Android: Stopping foreground service...', 'info');
|
||||
try {
|
||||
await stopVoiceCallService();
|
||||
log('Foreground service stopped', 'success');
|
||||
} catch (fgErr: any) {
|
||||
log(`Foreground service stop error: ${fgErr?.message || fgErr}`, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
// Stop AudioSession (iOS + Android)
|
||||
log(`Stopping AudioSession on ${Platform.OS}...`, 'info');
|
||||
try {
|
||||
await stopAudioSession();
|
||||
log('AudioSession stopped', 'success');
|
||||
} catch (audioErr: any) {
|
||||
log(`AudioSession stop error: ${audioErr?.message || audioErr}`, 'error');
|
||||
}
|
||||
|
||||
deactivateKeepAwake('voiceCall');
|
||||
@ -388,7 +463,7 @@ export default function DebugScreen() {
|
||||
<Text style={styles.logCount}>{logs.length} logs</Text>
|
||||
</View>
|
||||
|
||||
{/* Control Buttons */}
|
||||
{/* Control Buttons - Row 1: Call controls */}
|
||||
<View style={styles.controls}>
|
||||
{callState === 'idle' ? (
|
||||
<TouchableOpacity style={styles.startButton} onPress={startCall}>
|
||||
@ -406,6 +481,23 @@ export default function DebugScreen() {
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
|
||||
{/* Speaker Toggle Button */}
|
||||
<TouchableOpacity
|
||||
style={[styles.speakerButton, isSpeakerOn ? styles.speakerOn : styles.speakerOff]}
|
||||
onPress={toggleSpeaker}
|
||||
disabled={callState === 'idle'}
|
||||
>
|
||||
<Ionicons
|
||||
name={isSpeakerOn ? 'volume-high' : 'ear'}
|
||||
size={20}
|
||||
color="#fff"
|
||||
/>
|
||||
<Text style={styles.smallButtonText}>{isSpeakerOn ? 'Speaker' : 'Ear'}</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
{/* Control Buttons - Row 2: Log controls */}
|
||||
<View style={styles.controlsRow2}>
|
||||
<TouchableOpacity style={styles.copyButton} onPress={copyLogs}>
|
||||
<Ionicons name="copy" size={20} color="#fff" />
|
||||
<Text style={styles.smallButtonText}>Copy</Text>
|
||||
@ -420,6 +512,10 @@ export default function DebugScreen() {
|
||||
<Ionicons name="trash" size={20} color="#fff" />
|
||||
<Text style={styles.smallButtonText}>Clear</Text>
|
||||
</TouchableOpacity>
|
||||
|
||||
<View style={styles.platformBadge}>
|
||||
<Text style={styles.platformText}>{Platform.OS} {Platform.Version}</Text>
|
||||
</View>
|
||||
</View>
|
||||
|
||||
{/* Logs */}
|
||||
@ -510,6 +606,13 @@ const styles = StyleSheet.create({
|
||||
controls: {
|
||||
flexDirection: 'row',
|
||||
padding: Spacing.md,
|
||||
paddingBottom: Spacing.sm,
|
||||
gap: 10,
|
||||
},
|
||||
controlsRow2: {
|
||||
flexDirection: 'row',
|
||||
paddingHorizontal: Spacing.md,
|
||||
paddingBottom: Spacing.md,
|
||||
gap: 10,
|
||||
borderBottomWidth: 1,
|
||||
borderBottomColor: '#333',
|
||||
@ -563,6 +666,29 @@ const styles = StyleSheet.create({
|
||||
paddingHorizontal: 12,
|
||||
borderRadius: 10,
|
||||
},
|
||||
speakerButton: {
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
paddingVertical: 10,
|
||||
paddingHorizontal: 16,
|
||||
borderRadius: 10,
|
||||
},
|
||||
speakerOn: {
|
||||
backgroundColor: '#f59e0b', // Orange when speaker is ON
|
||||
},
|
||||
speakerOff: {
|
||||
backgroundColor: '#4b5563', // Gray when earpiece
|
||||
},
|
||||
platformBadge: {
|
||||
flex: 1,
|
||||
alignItems: 'flex-end',
|
||||
justifyContent: 'center',
|
||||
},
|
||||
platformText: {
|
||||
color: '#888',
|
||||
fontSize: 11,
|
||||
fontWeight: '500',
|
||||
},
|
||||
smallButtonText: {
|
||||
color: '#fff',
|
||||
fontSize: 10,
|
||||
|
||||
@ -10,6 +10,8 @@ import { AppColors, FontSizes, Spacing } from '@/constants/theme';
|
||||
const DASHBOARD_URL = 'https://react.eluxnetworks.net/dashboard';
|
||||
// URLs that indicate session expired (login page)
|
||||
const LOGIN_URL_PATTERNS = ['/login', '/auth', '/signin'];
|
||||
// Text patterns that indicate session expired (shown in page content)
|
||||
const SESSION_EXPIRED_PATTERNS = ['session expired', 'session has expired', 'token expired', 'please log in'];
|
||||
|
||||
export default function HomeScreen() {
|
||||
const { user } = useAuth();
|
||||
@ -134,12 +136,13 @@ export default function HomeScreen() {
|
||||
}
|
||||
}, [isRefreshingToken]);
|
||||
|
||||
// JavaScript to inject auth token into localStorage
|
||||
// JavaScript to inject auth token into localStorage and monitor for session expiry
|
||||
// Web app expects auth2 as JSON: {username, token, user_id}
|
||||
const injectedJavaScript = authToken
|
||||
? `
|
||||
(function() {
|
||||
try {
|
||||
// Inject auth data
|
||||
var authData = {
|
||||
username: '${userName || ''}',
|
||||
token: '${authToken}',
|
||||
@ -147,6 +150,33 @@ export default function HomeScreen() {
|
||||
};
|
||||
localStorage.setItem('auth2', JSON.stringify(authData));
|
||||
console.log('Auth injected:', authData.username);
|
||||
|
||||
// Monitor page content for session expired messages
|
||||
var sessionExpiredPatterns = ${JSON.stringify(SESSION_EXPIRED_PATTERNS)};
|
||||
|
||||
function checkForSessionExpired() {
|
||||
var bodyText = (document.body?.innerText || '').toLowerCase();
|
||||
for (var i = 0; i < sessionExpiredPatterns.length; i++) {
|
||||
if (bodyText.includes(sessionExpiredPatterns[i])) {
|
||||
console.log('Session expired detected in page content');
|
||||
window.ReactNativeWebView?.postMessage(JSON.stringify({ type: 'SESSION_EXPIRED' }));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check after page loads and periodically
|
||||
setTimeout(checkForSessionExpired, 1000);
|
||||
setTimeout(checkForSessionExpired, 3000);
|
||||
|
||||
// Also observe DOM changes for dynamic content
|
||||
var observer = new MutationObserver(function() {
|
||||
checkForSessionExpired();
|
||||
});
|
||||
if (document.body) {
|
||||
observer.observe(document.body, { childList: true, subtree: true });
|
||||
}
|
||||
} catch(e) {
|
||||
console.error('Failed to inject token:', e);
|
||||
}
|
||||
@ -199,6 +229,19 @@ export default function HomeScreen() {
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
// Handle messages from WebView (session expired detection)
|
||||
const handleWebViewMessage = useCallback((event: { nativeEvent: { data: string } }) => {
|
||||
try {
|
||||
const message = JSON.parse(event.nativeEvent.data);
|
||||
if (message.type === 'SESSION_EXPIRED') {
|
||||
console.log('WebView reported session expired, refreshing token...');
|
||||
handleTokenRefresh();
|
||||
}
|
||||
} catch {
|
||||
// Ignore non-JSON messages
|
||||
}
|
||||
}, [handleTokenRefresh]);
|
||||
|
||||
// Wait for token to load
|
||||
if (!isTokenLoaded) {
|
||||
return (
|
||||
@ -285,6 +328,7 @@ export default function HomeScreen() {
|
||||
onHttpError={handleError}
|
||||
onNavigationStateChange={handleNavigationStateChange}
|
||||
onShouldStartLoadWithRequest={handleShouldStartLoadWithRequest}
|
||||
onMessage={handleWebViewMessage}
|
||||
javaScriptEnabled={true}
|
||||
domStorageEnabled={true}
|
||||
startInLoadingState={true}
|
||||
|
||||
@ -9,30 +9,16 @@
|
||||
* Features:
|
||||
* - Phone call-like UI with Julia avatar
|
||||
* - Call duration timer
|
||||
* - Mute/unmute
|
||||
* - Debug logs panel (collapsible)
|
||||
* - Mute/unmute and speaker toggle
|
||||
* - Proper cleanup on unmount
|
||||
*/
|
||||
|
||||
import React, { useEffect, useRef } from 'react';
|
||||
import {
|
||||
View,
|
||||
Text,
|
||||
StyleSheet,
|
||||
TouchableOpacity,
|
||||
Platform,
|
||||
Animated,
|
||||
Easing,
|
||||
Dimensions,
|
||||
ScrollView,
|
||||
Alert,
|
||||
} from 'react-native';
|
||||
import * as Clipboard from 'expo-clipboard';
|
||||
import { View, Text, StyleSheet, TouchableOpacity, Animated, Easing, Dimensions } from 'react-native';
|
||||
import { Ionicons } from '@expo/vector-icons';
|
||||
import { SafeAreaView } from 'react-native-safe-area-context';
|
||||
import { useRouter } from 'expo-router';
|
||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||
import { VOICE_NAME } from '@/services/livekitService';
|
||||
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
|
||||
import { useLiveKitRoom, ConnectionState } from '@/hooks/useLiveKitRoom';
|
||||
|
||||
@ -42,26 +28,18 @@ export default function VoiceCallScreen() {
|
||||
const router = useRouter();
|
||||
const { clearTranscript, addTranscriptEntry } = useVoiceTranscript();
|
||||
|
||||
// Debug logs panel state
|
||||
const [showLogs, setShowLogs] = React.useState(false);
|
||||
const [logsMinimized, setLogsMinimized] = React.useState(false);
|
||||
const logsScrollRef = useRef<ScrollView>(null);
|
||||
|
||||
// LiveKit hook - ALL logic is here
|
||||
const {
|
||||
state,
|
||||
error,
|
||||
roomName,
|
||||
callDuration,
|
||||
isMuted,
|
||||
isAgentSpeaking,
|
||||
canPlayAudio,
|
||||
logs,
|
||||
participantCount,
|
||||
connect,
|
||||
disconnect,
|
||||
toggleMute,
|
||||
clearLogs,
|
||||
} = useLiveKitRoom({
|
||||
userId: `user-${Date.now()}`,
|
||||
onTranscript: (role, text) => {
|
||||
@ -159,13 +137,6 @@ export default function VoiceCallScreen() {
|
||||
router.back();
|
||||
};
|
||||
|
||||
// Copy logs to clipboard
|
||||
const copyLogs = async () => {
|
||||
const logsText = logs.map(l => `[${l.timestamp}] ${l.message}`).join('\n');
|
||||
await Clipboard.setStringAsync(logsText);
|
||||
Alert.alert('Copied!', `${logs.length} log entries copied to clipboard`);
|
||||
};
|
||||
|
||||
// Format duration as MM:SS
|
||||
const formatDuration = (seconds: number): string => {
|
||||
const mins = Math.floor(seconds / 60);
|
||||
@ -224,27 +195,13 @@ export default function VoiceCallScreen() {
|
||||
{/* Background gradient effect */}
|
||||
<View style={styles.backgroundGradient} />
|
||||
|
||||
{/* Top bar */}
|
||||
{/* Top bar - minimal */}
|
||||
<View style={styles.topBar}>
|
||||
<TouchableOpacity style={styles.backButton} onPress={handleEndCall}>
|
||||
<Ionicons name="chevron-down" size={28} color={AppColors.white} />
|
||||
</TouchableOpacity>
|
||||
<View style={styles.topBarCenter}>
|
||||
<Text style={styles.encryptedText}>LiveKit + Deepgram</Text>
|
||||
{roomName && (
|
||||
<Text style={styles.roomNameText}>{roomName}</Text>
|
||||
)}
|
||||
</View>
|
||||
<TouchableOpacity
|
||||
style={styles.logsButton}
|
||||
onPress={() => setShowLogs(!showLogs)}
|
||||
>
|
||||
<Ionicons
|
||||
name={showLogs ? 'code-slash' : 'code'}
|
||||
size={22}
|
||||
color={showLogs ? AppColors.success : AppColors.white}
|
||||
/>
|
||||
</TouchableOpacity>
|
||||
<View style={styles.topBarCenter} />
|
||||
<View style={styles.backButton} />
|
||||
</View>
|
||||
|
||||
{/* Main content */}
|
||||
@ -269,7 +226,6 @@ export default function VoiceCallScreen() {
|
||||
|
||||
{/* Name and status */}
|
||||
<Text style={styles.name}>Julia AI</Text>
|
||||
<Text style={styles.voiceName}>{VOICE_NAME} voice</Text>
|
||||
|
||||
{isActive ? (
|
||||
<View style={styles.statusContainer}>
|
||||
@ -297,65 +253,8 @@ export default function VoiceCallScreen() {
|
||||
)}
|
||||
</View>
|
||||
|
||||
{/* Debug logs panel */}
|
||||
{showLogs && (
|
||||
<View style={[styles.logsPanel, logsMinimized && styles.logsPanelMinimized]}>
|
||||
<View style={styles.logsPanelHeader}>
|
||||
<TouchableOpacity
|
||||
style={styles.minimizeButton}
|
||||
onPress={() => setLogsMinimized(!logsMinimized)}
|
||||
>
|
||||
<Ionicons
|
||||
name={logsMinimized ? 'chevron-up' : 'chevron-down'}
|
||||
size={20}
|
||||
color={AppColors.white}
|
||||
/>
|
||||
</TouchableOpacity>
|
||||
<Text style={styles.logsPanelTitle}>
|
||||
Logs ({logs.length}) • State: {state}
|
||||
</Text>
|
||||
<View style={styles.logsPanelButtons}>
|
||||
<TouchableOpacity style={styles.copyButton} onPress={copyLogs}>
|
||||
<Ionicons name="copy-outline" size={16} color={AppColors.white} />
|
||||
</TouchableOpacity>
|
||||
<TouchableOpacity style={styles.clearButton} onPress={clearLogs}>
|
||||
<Ionicons name="trash-outline" size={16} color={AppColors.white} />
|
||||
</TouchableOpacity>
|
||||
<TouchableOpacity
|
||||
style={styles.closeLogsButton}
|
||||
onPress={() => setShowLogs(false)}
|
||||
>
|
||||
<Ionicons name="close" size={18} color={AppColors.white} />
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</View>
|
||||
{!logsMinimized && (
|
||||
<ScrollView
|
||||
ref={logsScrollRef}
|
||||
style={styles.logsScrollView}
|
||||
onContentSizeChange={() => logsScrollRef.current?.scrollToEnd()}
|
||||
>
|
||||
{logs.map((log, index) => (
|
||||
<Text
|
||||
key={index}
|
||||
style={[
|
||||
styles.logEntry,
|
||||
log.level === 'error' && styles.logEntryError,
|
||||
log.level === 'warn' && styles.logEntryWarn,
|
||||
]}
|
||||
>
|
||||
[{log.timestamp}] {log.message}
|
||||
</Text>
|
||||
))}
|
||||
{logs.length === 0 && (
|
||||
<Text style={styles.logEntryEmpty}>Waiting for events...</Text>
|
||||
)}
|
||||
</ScrollView>
|
||||
)}
|
||||
</View>
|
||||
)}
|
||||
|
||||
{/* Bottom controls */}
|
||||
{/* Bottom controls - centered layout with 2 buttons */}
|
||||
<View style={styles.controls}>
|
||||
{/* Mute button */}
|
||||
<TouchableOpacity
|
||||
@ -375,12 +274,6 @@ export default function VoiceCallScreen() {
|
||||
<TouchableOpacity style={styles.endCallButton} onPress={handleEndCall}>
|
||||
<Ionicons name="call" size={32} color={AppColors.white} />
|
||||
</TouchableOpacity>
|
||||
|
||||
{/* Speaker button (placeholder for future) */}
|
||||
<TouchableOpacity style={styles.controlButton} disabled>
|
||||
<Ionicons name="volume-high" size={28} color={AppColors.white} />
|
||||
<Text style={styles.controlLabel}>Speaker</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</SafeAreaView>
|
||||
);
|
||||
@ -419,21 +312,6 @@ const styles = StyleSheet.create({
|
||||
flex: 1,
|
||||
alignItems: 'center',
|
||||
},
|
||||
encryptedText: {
|
||||
fontSize: FontSizes.xs,
|
||||
color: 'rgba(255,255,255,0.5)',
|
||||
},
|
||||
roomNameText: {
|
||||
fontSize: 10,
|
||||
color: 'rgba(255,255,255,0.3)',
|
||||
marginTop: 2,
|
||||
},
|
||||
logsButton: {
|
||||
width: 44,
|
||||
height: 44,
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
},
|
||||
content: {
|
||||
flex: 1,
|
||||
alignItems: 'center',
|
||||
@ -480,11 +358,6 @@ const styles = StyleSheet.create({
|
||||
color: AppColors.white,
|
||||
marginBottom: Spacing.xs,
|
||||
},
|
||||
voiceName: {
|
||||
fontSize: FontSizes.sm,
|
||||
color: 'rgba(255,255,255,0.6)',
|
||||
marginBottom: Spacing.md,
|
||||
},
|
||||
statusContainer: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
@ -525,10 +398,11 @@ const styles = StyleSheet.create({
|
||||
},
|
||||
controls: {
|
||||
flexDirection: 'row',
|
||||
justifyContent: 'space-evenly',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
paddingVertical: Spacing.xl,
|
||||
paddingHorizontal: Spacing.lg,
|
||||
gap: 48, // Space between Mute and End Call buttons
|
||||
},
|
||||
controlButton: {
|
||||
alignItems: 'center',
|
||||
@ -561,80 +435,4 @@ const styles = StyleSheet.create({
|
||||
shadowRadius: 8,
|
||||
elevation: 8,
|
||||
},
|
||||
// Logs panel styles
|
||||
logsPanel: {
|
||||
position: 'absolute',
|
||||
top: 80,
|
||||
left: Spacing.md,
|
||||
right: Spacing.md,
|
||||
bottom: 180,
|
||||
backgroundColor: 'rgba(0,0,0,0.9)',
|
||||
borderRadius: BorderRadius.lg,
|
||||
padding: Spacing.sm,
|
||||
zIndex: 100,
|
||||
},
|
||||
logsPanelMinimized: {
|
||||
bottom: 'auto' as any,
|
||||
height: 44,
|
||||
},
|
||||
logsPanelHeader: {
|
||||
flexDirection: 'row',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
marginBottom: Spacing.sm,
|
||||
paddingBottom: Spacing.sm,
|
||||
borderBottomWidth: 1,
|
||||
borderBottomColor: 'rgba(255,255,255,0.2)',
|
||||
},
|
||||
minimizeButton: {
|
||||
padding: 4,
|
||||
marginRight: Spacing.sm,
|
||||
},
|
||||
logsPanelTitle: {
|
||||
flex: 1,
|
||||
fontSize: FontSizes.sm,
|
||||
fontWeight: '600',
|
||||
color: AppColors.white,
|
||||
},
|
||||
logsPanelButtons: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
gap: 8,
|
||||
},
|
||||
copyButton: {
|
||||
padding: 6,
|
||||
backgroundColor: 'rgba(255,255,255,0.15)',
|
||||
borderRadius: BorderRadius.sm,
|
||||
},
|
||||
clearButton: {
|
||||
padding: 6,
|
||||
backgroundColor: 'rgba(255,255,255,0.15)',
|
||||
borderRadius: BorderRadius.sm,
|
||||
},
|
||||
closeLogsButton: {
|
||||
padding: 6,
|
||||
},
|
||||
logsScrollView: {
|
||||
flex: 1,
|
||||
},
|
||||
logEntry: {
|
||||
fontSize: 11,
|
||||
fontFamily: Platform.OS === 'ios' ? 'Menlo' : 'monospace',
|
||||
color: '#4ade80',
|
||||
lineHeight: 16,
|
||||
marginBottom: 2,
|
||||
},
|
||||
logEntryError: {
|
||||
color: '#f87171',
|
||||
},
|
||||
logEntryWarn: {
|
||||
color: '#fbbf24',
|
||||
},
|
||||
logEntryEmpty: {
|
||||
fontSize: FontSizes.xs,
|
||||
color: 'rgba(255,255,255,0.5)',
|
||||
fontStyle: 'italic',
|
||||
textAlign: 'center',
|
||||
marginTop: Spacing.lg,
|
||||
},
|
||||
});
|
||||
|
||||
8
eas.json
8
eas.json
@ -16,12 +16,18 @@
|
||||
}
|
||||
},
|
||||
"preview": {
|
||||
"distribution": "internal"
|
||||
"distribution": "internal",
|
||||
"android": {
|
||||
"buildType": "apk"
|
||||
}
|
||||
},
|
||||
"production": {
|
||||
"autoIncrement": true,
|
||||
"ios": {
|
||||
"credentialsSource": "remote"
|
||||
},
|
||||
"android": {
|
||||
"buildType": "apk"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@ -26,6 +26,7 @@ import {
|
||||
stopAudioSession,
|
||||
reconfigureAudioForPlayback,
|
||||
} from '@/utils/audioSession';
|
||||
import { callManager } from '@/services/callManager';
|
||||
|
||||
// Connection states
|
||||
export type ConnectionState =
|
||||
@ -103,6 +104,7 @@ export function useLiveKitRoom(options: UseLiveKitRoomOptions): UseLiveKitRoomRe
|
||||
const connectionIdRef = useRef(0);
|
||||
const isUnmountingRef = useRef(false);
|
||||
const appStateRef = useRef<AppStateStatus>(AppState.currentState);
|
||||
const callIdRef = useRef<string | null>(null);
|
||||
|
||||
// ===================
|
||||
// LOGGING FUNCTIONS
|
||||
@ -158,10 +160,27 @@ export function useLiveKitRoom(options: UseLiveKitRoomOptions): UseLiveKitRoomRe
|
||||
// Prevent multiple concurrent connection attempts
|
||||
const currentConnectionId = ++connectionIdRef.current;
|
||||
|
||||
// Generate unique call ID for this session
|
||||
const callId = `call-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
callIdRef.current = callId;
|
||||
|
||||
logInfo('========== STARTING VOICE CALL ==========');
|
||||
logInfo(`User ID: ${userId}`);
|
||||
logInfo(`Platform: ${Platform.OS}`);
|
||||
logInfo(`Connection ID: ${currentConnectionId}`);
|
||||
logInfo(`Call ID: ${callId}`);
|
||||
|
||||
// Register with CallManager - this will disconnect any existing call
|
||||
logInfo('Registering call with CallManager...');
|
||||
await callManager.registerCall(callId, async () => {
|
||||
logInfo('CallManager requested disconnect (another call starting)');
|
||||
if (roomRef.current) {
|
||||
await roomRef.current.disconnect();
|
||||
roomRef.current = null;
|
||||
}
|
||||
await stopAudioSession();
|
||||
});
|
||||
logSuccess('Call registered with CallManager');
|
||||
|
||||
// Check if already connected
|
||||
if (roomRef.current) {
|
||||
@ -505,6 +524,13 @@ export function useLiveKitRoom(options: UseLiveKitRoomOptions): UseLiveKitRoomRe
|
||||
logInfo('========== DISCONNECTING ==========');
|
||||
setState('disconnecting');
|
||||
|
||||
// Unregister from CallManager
|
||||
if (callIdRef.current) {
|
||||
logInfo(`Unregistering call: ${callIdRef.current}`);
|
||||
callManager.unregisterCall(callIdRef.current);
|
||||
callIdRef.current = null;
|
||||
}
|
||||
|
||||
try {
|
||||
if (roomRef.current) {
|
||||
logInfo('Disconnecting from room...');
|
||||
@ -603,6 +629,12 @@ export function useLiveKitRoom(options: UseLiveKitRoomOptions): UseLiveKitRoomRe
|
||||
|
||||
// Cleanup
|
||||
const cleanup = async () => {
|
||||
// Unregister from CallManager
|
||||
if (callIdRef.current) {
|
||||
callManager.unregisterCall(callIdRef.current);
|
||||
callIdRef.current = null;
|
||||
}
|
||||
|
||||
if (roomRef.current) {
|
||||
try {
|
||||
await roomRef.current.disconnect();
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
"""
|
||||
WellNuo Voice Agent - Julia AI
|
||||
LiveKit Agents Cloud deployment
|
||||
Uses WellNuo voice_ask API for LLM responses, Deepgram for STT/TTS
|
||||
Uses WellNuo ask_wellnuo_ai API for LLM responses, Deepgram for STT/TTS
|
||||
"""
|
||||
|
||||
import logging
|
||||
@ -138,7 +138,7 @@ def normalize_question(user_message: str) -> str:
|
||||
|
||||
|
||||
class WellNuoLLM(llm.LLM):
|
||||
"""Custom LLM that uses WellNuo voice_ask API."""
|
||||
"""Custom LLM that uses WellNuo ask_wellnuo_ai API."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
@ -178,7 +178,7 @@ class WellNuoLLM(llm.LLM):
|
||||
raise Exception("Failed to authenticate with WellNuo API")
|
||||
|
||||
async def get_response(self, user_message: str) -> str:
|
||||
"""Call WellNuo voice_ask API and return response."""
|
||||
"""Call WellNuo ask_wellnuo_ai API and return response."""
|
||||
if not user_message:
|
||||
return "I'm here to help. What would you like to know?"
|
||||
|
||||
@ -191,8 +191,10 @@ class WellNuoLLM(llm.LLM):
|
||||
token = await self._ensure_token()
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Using ask_wellnuo_ai - latency ~1-2s after warmup
|
||||
# Provides more comprehensive responses than voice_ask
|
||||
data = {
|
||||
"function": "voice_ask",
|
||||
"function": "ask_wellnuo_ai",
|
||||
"clientId": "MA_001",
|
||||
"user_name": WELLNUO_USER,
|
||||
"token": token,
|
||||
@ -295,7 +297,7 @@ async def entrypoint(ctx: JobContext):
|
||||
await ctx.connect()
|
||||
|
||||
logger.info(f"Starting Julia AI session in room {ctx.room.name}")
|
||||
logger.info(f"Using WellNuo voice_ask API with deployment_id: {DEPLOYMENT_ID}")
|
||||
logger.info(f"Using WellNuo ask_wellnuo_ai API with deployment_id: {DEPLOYMENT_ID}")
|
||||
|
||||
session = AgentSession(
|
||||
# Deepgram Nova-2 for accurate speech-to-text
|
||||
|
||||
59
maestro/voice-call-test.yaml
Normal file
59
maestro/voice-call-test.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
appId: com.wellnuo.BluetoothScanner
|
||||
---
|
||||
# WellNuoLite Voice Call Test
|
||||
# Tests the voice call functionality with self-hosted LiveKit
|
||||
|
||||
- launchApp:
|
||||
clearState: false
|
||||
|
||||
# Wait for app to load - Dashboard screen should appear
|
||||
- extendedWaitUntil:
|
||||
visible: "Dashboard"
|
||||
timeout: 15000
|
||||
|
||||
# Wait extra time for loading modal to disappear
|
||||
- extendedWaitUntil:
|
||||
notVisible: "Please wait"
|
||||
timeout: 20000
|
||||
|
||||
# Take screenshot of Dashboard
|
||||
- takeScreenshot: 01-dashboard-loaded
|
||||
|
||||
# Tap on Voice Debug tab (3rd tab in bottom navigation)
|
||||
- tapOn:
|
||||
point: "75%,97%"
|
||||
|
||||
# Wait for Voice Debug screen to load
|
||||
- extendedWaitUntil:
|
||||
visible: "Voice Debug"
|
||||
timeout: 10000
|
||||
|
||||
# Take screenshot of Voice Debug screen
|
||||
- takeScreenshot: 02-voice-debug-screen
|
||||
|
||||
# Tap Start Voice Call button (green button at top ~15% from top)
|
||||
- tapOn:
|
||||
point: "50%,15%"
|
||||
|
||||
# Wait for voice call screen to appear
|
||||
- extendedWaitUntil:
|
||||
visible: "Julia AI"
|
||||
timeout: 15000
|
||||
|
||||
# Take screenshot of call screen
|
||||
- takeScreenshot: 03-voice-call-started
|
||||
|
||||
# Wait a bit for connection attempt
|
||||
- swipe:
|
||||
direction: DOWN
|
||||
duration: 500
|
||||
|
||||
# Take screenshot of current state
|
||||
- takeScreenshot: 04-voice-call-state
|
||||
|
||||
# End call - tap the red end call button at bottom
|
||||
- tapOn:
|
||||
point: "50%,90%"
|
||||
|
||||
# Take final screenshot
|
||||
- takeScreenshot: 05-call-ended
|
||||
10
package-lock.json
generated
10
package-lock.json
generated
@ -13,6 +13,7 @@
|
||||
"@expo/vector-icons": "^15.0.3",
|
||||
"@livekit/react-native": "^2.9.6",
|
||||
"@livekit/react-native-expo-plugin": "^1.0.1",
|
||||
"@notifee/react-native": "^9.1.8",
|
||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||
"@react-navigation/elements": "^2.6.3",
|
||||
"@react-navigation/native": "^7.1.8",
|
||||
@ -3383,6 +3384,15 @@
|
||||
"node": ">=12.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@notifee/react-native": {
|
||||
"version": "9.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@notifee/react-native/-/react-native-9.1.8.tgz",
|
||||
"integrity": "sha512-Az/dueoPerJsbbjRxu8a558wKY+gONUrfoy3Hs++5OqbeMsR0dYe6P+4oN6twrLFyzAhEA1tEoZRvQTFDRmvQg==",
|
||||
"license": "Apache-2.0",
|
||||
"peerDependencies": {
|
||||
"react-native": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/primitive": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
"@expo/vector-icons": "^15.0.3",
|
||||
"@livekit/react-native": "^2.9.6",
|
||||
"@livekit/react-native-expo-plugin": "^1.0.1",
|
||||
"@notifee/react-native": "^9.1.8",
|
||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||
"@react-navigation/elements": "^2.6.3",
|
||||
"@react-navigation/native": "^7.1.8",
|
||||
|
||||
@ -100,13 +100,20 @@ class ApiService {
|
||||
const userName = await SecureStore.getItemAsync('userName');
|
||||
const password = await SecureStore.getItemAsync('userPassword');
|
||||
|
||||
console.log('[API] refreshToken - userName:', userName ? 'exists' : 'missing');
|
||||
console.log('[API] refreshToken - password:', password ? 'exists' : 'missing');
|
||||
|
||||
if (!userName || !password) {
|
||||
console.log('[API] refreshToken - NO_CREDENTIALS');
|
||||
return { ok: false, error: { message: 'No stored credentials', code: 'NO_CREDENTIALS' } };
|
||||
}
|
||||
|
||||
console.log('Refreshing token for user:', userName);
|
||||
return await this.login(userName, password);
|
||||
console.log('[API] Refreshing token for user:', userName);
|
||||
const result = await this.login(userName, password);
|
||||
console.log('[API] refreshToken result:', result.ok ? 'SUCCESS' : result.error?.message);
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error('[API] refreshToken error:', error);
|
||||
return {
|
||||
ok: false,
|
||||
error: { message: 'Failed to refresh token', code: 'REFRESH_ERROR' }
|
||||
|
||||
111
services/callManager.ts
Normal file
111
services/callManager.ts
Normal file
@ -0,0 +1,111 @@
|
||||
/**
|
||||
* CallManager - Singleton to manage active voice calls
|
||||
*
|
||||
* Ensures only ONE voice call can be active at a time per device.
|
||||
* If a new call is started while another is active, the old one is disconnected first.
|
||||
*
|
||||
* This addresses the LiveKit concurrent agent jobs limit (5 per project).
|
||||
*/
|
||||
|
||||
type DisconnectCallback = () => Promise<void>;
|
||||
|
||||
class CallManager {
|
||||
private static instance: CallManager;
|
||||
private activeCallId: string | null = null;
|
||||
private disconnectCallback: DisconnectCallback | null = null;
|
||||
|
||||
private constructor() {
|
||||
// Singleton
|
||||
}
|
||||
|
||||
static getInstance(): CallManager {
|
||||
if (!CallManager.instance) {
|
||||
CallManager.instance = new CallManager();
|
||||
}
|
||||
return CallManager.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a new call. If there's an existing call, disconnect it first.
|
||||
* @param callId Unique ID for this call
|
||||
* @param onDisconnect Callback to disconnect this call
|
||||
* @returns true if this call can proceed
|
||||
*/
|
||||
async registerCall(
|
||||
callId: string,
|
||||
onDisconnect: DisconnectCallback
|
||||
): Promise<boolean> {
|
||||
console.log(`[CallManager] Registering call: ${callId}`);
|
||||
|
||||
// If there's an active call, disconnect it first
|
||||
if (this.activeCallId && this.activeCallId !== callId) {
|
||||
console.log(
|
||||
`[CallManager] Active call exists (${this.activeCallId}), disconnecting...`
|
||||
);
|
||||
|
||||
if (this.disconnectCallback) {
|
||||
try {
|
||||
await this.disconnectCallback();
|
||||
console.log(`[CallManager] Previous call disconnected`);
|
||||
} catch (err) {
|
||||
console.error(`[CallManager] Error disconnecting previous call:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Register the new call
|
||||
this.activeCallId = callId;
|
||||
this.disconnectCallback = onDisconnect;
|
||||
console.log(`[CallManager] Call ${callId} is now active`);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a call when it ends
|
||||
* @param callId The call ID to unregister
|
||||
*/
|
||||
unregisterCall(callId: string): void {
|
||||
if (this.activeCallId === callId) {
|
||||
console.log(`[CallManager] Unregistering call: ${callId}`);
|
||||
this.activeCallId = null;
|
||||
this.disconnectCallback = null;
|
||||
} else {
|
||||
console.log(
|
||||
`[CallManager] Call ${callId} is not active, ignoring unregister`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if there's an active call
|
||||
*/
|
||||
hasActiveCall(): boolean {
|
||||
return this.activeCallId !== null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current active call ID
|
||||
*/
|
||||
getActiveCallId(): string | null {
|
||||
return this.activeCallId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Force disconnect the active call (if any)
|
||||
*/
|
||||
async forceDisconnect(): Promise<void> {
|
||||
if (this.activeCallId && this.disconnectCallback) {
|
||||
console.log(`[CallManager] Force disconnecting call: ${this.activeCallId}`);
|
||||
try {
|
||||
await this.disconnectCallback();
|
||||
} catch (err) {
|
||||
console.error(`[CallManager] Error force disconnecting:`, err);
|
||||
}
|
||||
this.activeCallId = null;
|
||||
this.disconnectCallback = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const callManager = CallManager.getInstance();
|
||||
336
specs/FEATURE-002-livekit-voice-call.md
Normal file
336
specs/FEATURE-002-livekit-voice-call.md
Normal file
@ -0,0 +1,336 @@
|
||||
# FEATURE-002: LiveKit Voice Call with Julia AI
|
||||
|
||||
## Summary
|
||||
|
||||
Полноценный голосовой звонок с Julia AI через LiveKit Cloud. Пользователь нажимает кнопку "Start Voice Call", открывается экран звонка в стиле телефона, и он может разговаривать с Julia AI голосом.
|
||||
|
||||
## Status: 🔴 Not Started (требуется полная переделка)
|
||||
|
||||
## Priority: Critical
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Текущая реализация имеет следующие проблемы:
|
||||
1. **STT (Speech-to-Text) работает нестабильно** — микрофон иногда детектируется, иногда нет
|
||||
2. **TTS работает** — голос Julia слышен
|
||||
3. **Код сложный и запутанный** — много legacy кода, полифиллов, хаков
|
||||
4. **Нет четкой архитектуры** — все в одном файле voice-call.tsx
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
### Почему микрофон работает нестабильно:
|
||||
1. **iOS AudioSession** — неправильная конфигурация или race condition при настройке
|
||||
2. **registerGlobals()** — WebRTC polyfills могут не успевать инициализироваться
|
||||
3. **Permissions** — микрофон может быть не разрешен или занят другим процессом
|
||||
4. **Event handling** — события LiveKit могут теряться
|
||||
|
||||
### Что работает:
|
||||
- LiveKit Cloud connection ✅
|
||||
- Token generation ✅
|
||||
- TTS (Deepgram Asteria) ✅
|
||||
- Backend agent (Julia AI) ✅
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### System Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ WellNuo Lite App (iOS) │
|
||||
├─────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
|
||||
│ │ Voice Tab │───▶│ VoiceCallScreen │───▶│ LiveKit Room │ │
|
||||
│ │ (entry) │ │ (fullscreen) │ │ (WebRTC) │ │
|
||||
│ └──────────────┘ └──────────────────┘ └──────────────────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │useLiveKitRoom│ │ AudioSession │ │
|
||||
│ │ (hook) │ │ (iOS native) │ │
|
||||
│ └──────────────┘ └──────────────┘ │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ WebSocket + WebRTC
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ LiveKit Cloud │
|
||||
├─────────────────────────────────────────────────────────────────────┤
|
||||
│ Room: wellnuo-{userId}-{timestamp} │
|
||||
│ Participants: user + julia-agent │
|
||||
│ Audio Tracks: bidirectional │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ Agent dispatch
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ Julia AI Agent (Python) │
|
||||
├─────────────────────────────────────────────────────────────────────┤
|
||||
│ STT: Deepgram Nova-2 │
|
||||
│ LLM: WellNuo voice_ask API │
|
||||
│ TTS: Deepgram Aura Asteria │
|
||||
│ Framework: LiveKit Agents SDK 1.3.11 │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Data Flow
|
||||
|
||||
```
|
||||
User speaks → iOS Mic → WebRTC → LiveKit Cloud → Agent → Deepgram STT
|
||||
│
|
||||
▼
|
||||
WellNuo API (LLM)
|
||||
│
|
||||
▼
|
||||
Agent receives text ← LiveKit Cloud ← WebRTC ← Deepgram TTS (audio)
|
||||
│
|
||||
▼
|
||||
iOS Speaker → User hears Julia
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Technical Requirements
|
||||
|
||||
### Dependencies (package.json)
|
||||
|
||||
```json
|
||||
{
|
||||
"@livekit/react-native": "^2.x",
|
||||
"livekit-client": "^2.x",
|
||||
"expo-keep-awake": "^14.x"
|
||||
}
|
||||
```
|
||||
|
||||
### iOS Permissions (app.json)
|
||||
|
||||
```json
|
||||
{
|
||||
"ios": {
|
||||
"infoPlist": {
|
||||
"NSMicrophoneUsageDescription": "WellNuo needs microphone access for voice calls with Julia AI",
|
||||
"UIBackgroundModes": ["audio", "voip"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Token Server (already exists)
|
||||
|
||||
- **URL**: `https://wellnuo.smartlaunchhub.com/julia/token`
|
||||
- **Method**: POST
|
||||
- **Body**: `{ "userId": "string" }`
|
||||
- **Response**: `{ "success": true, "data": { "token", "roomName", "wsUrl" } }`
|
||||
|
||||
---
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Phase 1: Cleanup (DELETE old code)
|
||||
|
||||
- [ ] 1.1. Delete `app/voice-call.tsx` (current broken implementation)
|
||||
- [ ] 1.2. Keep `app/(tabs)/voice.tsx` (entry point) but simplify
|
||||
- [ ] 1.3. Keep `services/livekitService.ts` (token fetching)
|
||||
- [ ] 1.4. Keep `contexts/VoiceTranscriptContext.tsx` (transcript storage)
|
||||
- [ ] 1.5. Delete `components/VoiceIndicator.tsx` (unused)
|
||||
- [ ] 1.6. Delete `polyfills/livekit-globals.ts` (not needed with proper setup)
|
||||
|
||||
### Phase 2: New Architecture
|
||||
|
||||
- [ ] 2.1. Create `hooks/useLiveKitRoom.ts` — encapsulate all LiveKit logic
|
||||
- [ ] 2.2. Create `app/voice-call.tsx` — simple UI component using the hook
|
||||
- [ ] 2.3. Create `utils/audioSession.ts` — iOS AudioSession helper
|
||||
|
||||
### Phase 3: useLiveKitRoom Hook
|
||||
|
||||
**File**: `hooks/useLiveKitRoom.ts`
|
||||
|
||||
```typescript
|
||||
interface UseLiveKitRoomOptions {
|
||||
userId: string;
|
||||
onTranscript?: (role: 'user' | 'assistant', text: string) => void;
|
||||
}
|
||||
|
||||
interface UseLiveKitRoomReturn {
|
||||
// Connection state
|
||||
state: 'idle' | 'connecting' | 'connected' | 'reconnecting' | 'disconnected' | 'error';
|
||||
error: string | null;
|
||||
|
||||
// Call info
|
||||
roomName: string | null;
|
||||
callDuration: number; // seconds
|
||||
|
||||
// Audio state
|
||||
isMuted: boolean;
|
||||
isSpeaking: boolean; // agent is speaking
|
||||
|
||||
// Actions
|
||||
connect: () => Promise<void>;
|
||||
disconnect: () => Promise<void>;
|
||||
toggleMute: () => void;
|
||||
}
|
||||
```
|
||||
|
||||
**Implementation requirements**:
|
||||
1. MUST call `registerGlobals()` BEFORE importing `livekit-client`
|
||||
2. MUST configure iOS AudioSession BEFORE connecting to room
|
||||
3. MUST handle all RoomEvents properly
|
||||
4. MUST cleanup on unmount (disconnect, stop audio session)
|
||||
5. MUST handle background/foreground transitions
|
||||
|
||||
### Phase 4: iOS AudioSession Configuration
|
||||
|
||||
**Critical for microphone to work!**
|
||||
|
||||
```typescript
|
||||
// utils/audioSession.ts
|
||||
import { AudioSession } from '@livekit/react-native';
|
||||
import { Platform } from 'react-native';
|
||||
|
||||
export async function configureAudioForVoiceCall(): Promise<void> {
|
||||
if (Platform.OS !== 'ios') return;
|
||||
|
||||
// Step 1: Set Apple audio configuration
|
||||
await AudioSession.setAppleAudioConfiguration({
|
||||
audioCategory: 'playAndRecord',
|
||||
audioCategoryOptions: [
|
||||
'allowBluetooth',
|
||||
'allowBluetoothA2DP',
|
||||
'defaultToSpeaker',
|
||||
'mixWithOthers',
|
||||
],
|
||||
audioMode: 'voiceChat',
|
||||
});
|
||||
|
||||
// Step 2: Configure output
|
||||
await AudioSession.configureAudio({
|
||||
ios: {
|
||||
defaultOutput: 'speaker',
|
||||
},
|
||||
});
|
||||
|
||||
// Step 3: Start session
|
||||
await AudioSession.startAudioSession();
|
||||
}
|
||||
|
||||
export async function stopAudioSession(): Promise<void> {
|
||||
if (Platform.OS !== 'ios') return;
|
||||
await AudioSession.stopAudioSession();
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 5: Voice Call Screen UI
|
||||
|
||||
**File**: `app/voice-call.tsx`
|
||||
|
||||
Simple, clean UI:
|
||||
- Avatar with Julia "J" letter
|
||||
- Call duration timer
|
||||
- Status text (Connecting... / Connected / Julia is speaking...)
|
||||
- Mute button
|
||||
- End call button
|
||||
- Debug logs toggle (for development)
|
||||
|
||||
**NO complex logic in this file** — all LiveKit logic in the hook!
|
||||
|
||||
### Phase 6: Testing Checklist
|
||||
|
||||
- [ ] 6.1. Fresh app launch → Start call → Can hear Julia greeting
|
||||
- [ ] 6.2. Speak → Julia responds → Conversation works
|
||||
- [ ] 6.3. Mute → Unmute → Still works
|
||||
- [ ] 6.4. End call → Clean disconnect
|
||||
- [ ] 6.5. App to background → Audio continues
|
||||
- [ ] 6.6. App to foreground → Still connected
|
||||
- [ ] 6.7. Multiple calls in a row → No memory leaks
|
||||
- [ ] 6.8. No microphone permission → Shows error
|
||||
|
||||
---
|
||||
|
||||
## Files to Create/Modify
|
||||
|
||||
| File | Action | Description |
|
||||
|------|--------|-------------|
|
||||
| `hooks/useLiveKitRoom.ts` | CREATE | Main LiveKit hook with all logic |
|
||||
| `utils/audioSession.ts` | CREATE | iOS AudioSession helpers |
|
||||
| `app/voice-call.tsx` | REPLACE | Simple UI using the hook |
|
||||
| `app/(tabs)/voice.tsx` | SIMPLIFY | Just entry point, remove debug UI |
|
||||
| `services/livekitService.ts` | KEEP | Token fetching (already works) |
|
||||
| `contexts/VoiceTranscriptContext.tsx` | KEEP | Transcript storage |
|
||||
| `components/VoiceIndicator.tsx` | DELETE | Not needed |
|
||||
| `polyfills/livekit-globals.ts` | DELETE | Not needed |
|
||||
|
||||
---
|
||||
|
||||
## Key Principles
|
||||
|
||||
### 1. Separation of Concerns
|
||||
- **Hook** handles ALL LiveKit/WebRTC logic
|
||||
- **Screen** only renders UI based on hook state
|
||||
- **Utils** for platform-specific code (AudioSession)
|
||||
|
||||
### 2. Proper Initialization Order
|
||||
```
|
||||
1. registerGlobals() — WebRTC polyfills
|
||||
2. configureAudioForVoiceCall() — iOS audio
|
||||
3. getToken() — fetch from server
|
||||
4. room.connect() — connect to LiveKit
|
||||
5. room.localParticipant.setMicrophoneEnabled(true) — enable mic
|
||||
```
|
||||
|
||||
### 3. Proper Cleanup Order
|
||||
```
|
||||
1. room.disconnect() — leave room
|
||||
2. stopAudioSession() — release iOS audio
|
||||
3. Clear all refs and state
|
||||
```
|
||||
|
||||
### 4. Error Handling
|
||||
- Every async operation wrapped in try/catch
|
||||
- User-friendly error messages
|
||||
- Automatic retry for network issues
|
||||
- Graceful degradation
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. ✅ User can start voice call and hear Julia greeting
|
||||
2. ✅ User can speak and Julia understands (STT works reliably)
|
||||
3. ✅ Julia responds with voice (TTS works)
|
||||
4. ✅ Conversation can continue back and forth
|
||||
5. ✅ Mute/unmute works
|
||||
6. ✅ End call cleanly disconnects
|
||||
7. ✅ No console errors or warnings
|
||||
8. ✅ Works on iOS device (not just simulator)
|
||||
|
||||
---
|
||||
|
||||
## Related Links
|
||||
|
||||
- [LiveKit React Native SDK](https://docs.livekit.io/client-sdk-js/react-native/)
|
||||
- [LiveKit Agents Python](https://docs.livekit.io/agents/)
|
||||
- [Deepgram STT/TTS](https://deepgram.com/)
|
||||
- [iOS AVAudioSession](https://developer.apple.com/documentation/avfaudio/avaudiosession)
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
### Why previous approach failed:
|
||||
|
||||
1. **Too much code in one file** — voice-call.tsx had 900+ lines with all logic mixed
|
||||
2. **Polyfills applied wrong** — Event class polyfill was inside the component
|
||||
3. **AudioSession configured too late** — sometimes after connect() already started
|
||||
4. **No proper error boundaries** — errors silently failed
|
||||
5. **Race conditions** — multiple async operations without proper sequencing
|
||||
|
||||
### What's different this time:
|
||||
|
||||
1. **Hook-based architecture** — single source of truth for state
|
||||
2. **Proper initialization sequence** — documented and enforced
|
||||
3. **Clean separation** — UI knows nothing about WebRTC
|
||||
4. **Comprehensive logging** — every step logged for debugging
|
||||
5. **Test-driven** — write tests before implementation
|
||||
268
utils/androidVoiceService.ts
Normal file
268
utils/androidVoiceService.ts
Normal file
@ -0,0 +1,268 @@
|
||||
/**
|
||||
* Android Voice Call Service
|
||||
*
|
||||
* Handles:
|
||||
* 1. Foreground Service notification - keeps call alive in background
|
||||
* 2. Battery Optimization check - warns user if optimization is enabled
|
||||
*
|
||||
* Only runs on Android - iOS handles background audio differently.
|
||||
*/
|
||||
|
||||
import { Platform, Alert, Linking, NativeModules } from 'react-native';
|
||||
|
||||
// Notifee for foreground service
|
||||
let notifee: any = null;
|
||||
|
||||
/**
|
||||
* Lazy load notifee to avoid issues on iOS
|
||||
*/
|
||||
async function getNotifee() {
|
||||
if (Platform.OS !== 'android') return null;
|
||||
|
||||
if (!notifee) {
|
||||
try {
|
||||
notifee = (await import('@notifee/react-native')).default;
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to load notifee:', e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return notifee;
|
||||
}
|
||||
|
||||
// Channel ID for voice call notifications
|
||||
const CHANNEL_ID = 'voice-call-channel';
|
||||
const NOTIFICATION_ID = 'voice-call-active';
|
||||
|
||||
/**
|
||||
* Create notification channel (required for Android 8+)
|
||||
*/
|
||||
async function createNotificationChannel(): Promise<void> {
|
||||
const notifeeModule = await getNotifee();
|
||||
if (!notifeeModule) return;
|
||||
|
||||
try {
|
||||
await notifeeModule.createChannel({
|
||||
id: CHANNEL_ID,
|
||||
name: 'Voice Calls',
|
||||
description: 'Notifications for active voice calls with Julia AI',
|
||||
importance: 4, // HIGH - shows notification but no sound
|
||||
vibration: false,
|
||||
sound: undefined,
|
||||
});
|
||||
console.log('[AndroidVoiceService] Notification channel created');
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to create channel:', e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start foreground service with notification
|
||||
* Call this when voice call starts
|
||||
*/
|
||||
export async function startVoiceCallService(): Promise<void> {
|
||||
if (Platform.OS !== 'android') {
|
||||
console.log('[AndroidVoiceService] Skipping - not Android');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[AndroidVoiceService] Starting foreground service...');
|
||||
|
||||
const notifeeModule = await getNotifee();
|
||||
if (!notifeeModule) {
|
||||
console.log('[AndroidVoiceService] Notifee not available');
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Create channel first
|
||||
await createNotificationChannel();
|
||||
|
||||
// Display foreground service notification
|
||||
await notifeeModule.displayNotification({
|
||||
id: NOTIFICATION_ID,
|
||||
title: 'Julia AI - Call Active',
|
||||
body: 'Voice call in progress. Tap to return to the app.',
|
||||
android: {
|
||||
channelId: CHANNEL_ID,
|
||||
asForegroundService: true,
|
||||
ongoing: true, // Can't be swiped away
|
||||
autoCancel: false,
|
||||
smallIcon: 'ic_notification', // Uses default if not found
|
||||
color: '#22c55e', // Green color
|
||||
pressAction: {
|
||||
id: 'default',
|
||||
launchActivity: 'default',
|
||||
},
|
||||
// Important for keeping audio alive
|
||||
importance: 4, // HIGH
|
||||
category: 2, // CATEGORY_CALL
|
||||
},
|
||||
});
|
||||
|
||||
console.log('[AndroidVoiceService] Foreground service started');
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to start foreground service:', e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop foreground service
|
||||
* Call this when voice call ends
|
||||
*/
|
||||
export async function stopVoiceCallService(): Promise<void> {
|
||||
if (Platform.OS !== 'android') return;
|
||||
|
||||
console.log('[AndroidVoiceService] Stopping foreground service...');
|
||||
|
||||
const notifeeModule = await getNotifee();
|
||||
if (!notifeeModule) return;
|
||||
|
||||
try {
|
||||
await notifeeModule.stopForegroundService();
|
||||
await notifeeModule.cancelNotification(NOTIFICATION_ID);
|
||||
console.log('[AndroidVoiceService] Foreground service stopped');
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to stop foreground service:', e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if battery optimization is disabled for our app
|
||||
* Returns true if optimization is DISABLED (good for us)
|
||||
* Returns false if optimization is ENABLED (bad - system may kill our app)
|
||||
*/
|
||||
export async function isBatteryOptimizationDisabled(): Promise<boolean> {
|
||||
if (Platform.OS !== 'android') {
|
||||
return true; // iOS doesn't need this
|
||||
}
|
||||
|
||||
try {
|
||||
const notifeeModule = await getNotifee();
|
||||
if (!notifeeModule) return true; // Assume OK if can't check
|
||||
|
||||
// Notifee provides a way to check power manager settings
|
||||
const powerManagerInfo = await notifeeModule.getPowerManagerInfo();
|
||||
|
||||
// If device has power manager restrictions
|
||||
if (powerManagerInfo.activity) {
|
||||
return false; // Battery optimization is likely enabled
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (e) {
|
||||
console.log('[AndroidVoiceService] Could not check battery optimization:', e);
|
||||
return true; // Assume OK on error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Open battery optimization settings for our app
|
||||
*/
|
||||
export async function openBatteryOptimizationSettings(): Promise<void> {
|
||||
if (Platform.OS !== 'android') return;
|
||||
|
||||
try {
|
||||
const notifeeModule = await getNotifee();
|
||||
if (notifeeModule) {
|
||||
// Try to open power manager settings via notifee
|
||||
await notifeeModule.openPowerManagerSettings();
|
||||
return;
|
||||
}
|
||||
} catch (e) {
|
||||
console.log('[AndroidVoiceService] Notifee openPowerManagerSettings failed:', e);
|
||||
}
|
||||
|
||||
// Fallback: try to open battery optimization settings directly
|
||||
try {
|
||||
// Try generic battery settings
|
||||
await Linking.openSettings();
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to open settings:', e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Show alert about battery optimization
|
||||
* Call this before starting a voice call on Android
|
||||
*/
|
||||
export function showBatteryOptimizationAlert(): void {
|
||||
if (Platform.OS !== 'android') return;
|
||||
|
||||
Alert.alert(
|
||||
'Optimize for Voice Calls',
|
||||
'To ensure voice calls continue working when the app is in the background, please disable battery optimization for WellNuo.\n\nThis prevents Android from stopping the call when you switch apps or lock your screen.',
|
||||
[
|
||||
{
|
||||
text: 'Later',
|
||||
style: 'cancel',
|
||||
},
|
||||
{
|
||||
text: 'Open Settings',
|
||||
onPress: () => openBatteryOptimizationSettings(),
|
||||
},
|
||||
],
|
||||
{ cancelable: true }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check battery optimization and show alert if needed
|
||||
* Returns true if we should proceed with the call
|
||||
* Returns false if user chose to go to settings (call should be postponed)
|
||||
*/
|
||||
export async function checkAndPromptBatteryOptimization(): Promise<boolean> {
|
||||
if (Platform.OS !== 'android') {
|
||||
return true; // iOS - proceed
|
||||
}
|
||||
|
||||
const isDisabled = await isBatteryOptimizationDisabled();
|
||||
|
||||
if (isDisabled) {
|
||||
console.log('[AndroidVoiceService] Battery optimization already disabled - good!');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Show alert and wait for user response
|
||||
return new Promise((resolve) => {
|
||||
Alert.alert(
|
||||
'Optimize for Voice Calls',
|
||||
'For reliable voice calls in the background, we recommend disabling battery optimization for WellNuo.\n\nWould you like to adjust this setting now?',
|
||||
[
|
||||
{
|
||||
text: 'Skip for Now',
|
||||
style: 'cancel',
|
||||
onPress: () => resolve(true), // Proceed anyway
|
||||
},
|
||||
{
|
||||
text: 'Open Settings',
|
||||
onPress: async () => {
|
||||
await openBatteryOptimizationSettings();
|
||||
resolve(false); // Don't start call - user went to settings
|
||||
},
|
||||
},
|
||||
],
|
||||
{ cancelable: false }
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Request notification permission (required for Android 13+)
|
||||
*/
|
||||
export async function requestNotificationPermission(): Promise<boolean> {
|
||||
if (Platform.OS !== 'android') return true;
|
||||
|
||||
const notifeeModule = await getNotifee();
|
||||
if (!notifeeModule) return false;
|
||||
|
||||
try {
|
||||
const settings = await notifeeModule.requestPermission();
|
||||
const granted = settings.authorizationStatus >= 1; // AUTHORIZED or PROVISIONAL
|
||||
console.log('[AndroidVoiceService] Notification permission:', granted ? 'granted' : 'denied');
|
||||
return granted;
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to request notification permission:', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1,8 +1,9 @@
|
||||
/**
|
||||
* iOS AudioSession Configuration Helpers
|
||||
* Audio Session Configuration Helpers (iOS + Android)
|
||||
*
|
||||
* CRITICAL: This must be configured BEFORE connecting to LiveKit room!
|
||||
* Without proper AudioSession setup, microphone won't work on iOS.
|
||||
* On Android, this controls speaker/earpiece routing.
|
||||
*/
|
||||
|
||||
import { Platform } from 'react-native';
|
||||
@ -16,8 +17,6 @@ let audioSessionModule: any = null;
|
||||
* This is needed because @livekit/react-native must be imported after registerGlobals()
|
||||
*/
|
||||
async function getAudioSession(): Promise<any | null> {
|
||||
if (Platform.OS !== 'ios') return null;
|
||||
|
||||
if (!audioSessionModule) {
|
||||
const livekit = await import('@livekit/react-native');
|
||||
audioSessionModule = livekit.AudioSession;
|
||||
@ -27,22 +26,21 @@ async function getAudioSession(): Promise<any | null> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure iOS AudioSession for bidirectional voice call
|
||||
* Configure AudioSession for bidirectional voice call (iOS + Android)
|
||||
*
|
||||
* MUST be called BEFORE connecting to LiveKit room!
|
||||
*
|
||||
* Configuration:
|
||||
* iOS Configuration:
|
||||
* - Category: playAndRecord (both speaker and mic)
|
||||
* - Mode: voiceChat (optimized for voice calls)
|
||||
* - Options: Bluetooth, speaker, mix with others
|
||||
*
|
||||
* Android Configuration:
|
||||
* - audioTypeOptions: communication (for voice calls)
|
||||
* - forceHandleAudioRouting: true (to control speaker/earpiece)
|
||||
*/
|
||||
export async function configureAudioForVoiceCall(): Promise<void> {
|
||||
if (Platform.OS !== 'ios') {
|
||||
console.log('[AudioSession] Skipping on non-iOS platform');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[AudioSession] Configuring for voice call...');
|
||||
console.log(`[AudioSession] Configuring for voice call on ${Platform.OS}...`);
|
||||
|
||||
try {
|
||||
const AudioSession = await getAudioSession();
|
||||
@ -51,20 +49,22 @@ export async function configureAudioForVoiceCall(): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
// Step 1: Set Apple-specific audio configuration
|
||||
if (Platform.OS === 'ios') {
|
||||
// iOS-specific configuration
|
||||
console.log('[AudioSession] Step 1: Setting Apple audio config...');
|
||||
await AudioSession.setAppleAudioConfiguration({
|
||||
audioCategory: 'playAndRecord',
|
||||
// Note: removed 'allowBluetoothA2DP' - it's incompatible with playAndRecord
|
||||
// on some iOS versions and causes "status -50" error.
|
||||
// 'allowBluetooth' (HFP profile) is sufficient for voice calls.
|
||||
audioCategoryOptions: [
|
||||
'allowBluetooth',
|
||||
'allowBluetoothA2DP',
|
||||
'defaultToSpeaker',
|
||||
'mixWithOthers',
|
||||
],
|
||||
audioMode: 'voiceChat',
|
||||
});
|
||||
|
||||
// Step 2: Configure default output to speaker
|
||||
console.log('[AudioSession] Step 2: Setting default output...');
|
||||
await AudioSession.configureAudio({
|
||||
ios: {
|
||||
@ -72,9 +72,35 @@ export async function configureAudioForVoiceCall(): Promise<void> {
|
||||
},
|
||||
});
|
||||
|
||||
// Step 3: Start the audio session
|
||||
console.log('[AudioSession] Step 3: Starting audio session...');
|
||||
await AudioSession.startAudioSession();
|
||||
} else if (Platform.OS === 'android') {
|
||||
// Android-specific configuration
|
||||
// IMPORTANT: Using 'music' stream type to force output to speaker
|
||||
// 'voiceCall' stream type defaults to earpiece on many Android devices
|
||||
console.log('[AudioSession] Configuring Android audio for SPEAKER...');
|
||||
await AudioSession.configureAudio({
|
||||
android: {
|
||||
// Use MEDIA mode to ensure speaker output
|
||||
audioTypeOptions: {
|
||||
manageAudioFocus: true,
|
||||
audioMode: 'normal',
|
||||
audioFocusMode: 'gain',
|
||||
// Use 'music' stream - goes to speaker by default
|
||||
audioStreamType: 'music',
|
||||
audioAttributesUsageType: 'media',
|
||||
audioAttributesContentType: 'music',
|
||||
},
|
||||
// Force speaker as output
|
||||
preferredOutputList: ['speaker'],
|
||||
// Allow us to control audio routing
|
||||
forceHandleAudioRouting: true,
|
||||
},
|
||||
});
|
||||
|
||||
console.log('[AudioSession] Starting Android audio session...');
|
||||
await AudioSession.startAudioSession();
|
||||
}
|
||||
|
||||
console.log('[AudioSession] Configuration complete!');
|
||||
} catch (error) {
|
||||
@ -84,16 +110,16 @@ export async function configureAudioForVoiceCall(): Promise<void> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop iOS AudioSession
|
||||
* Stop AudioSession (iOS + Android)
|
||||
*
|
||||
* Should be called when disconnecting from voice call
|
||||
*/
|
||||
export async function stopAudioSession(): Promise<void> {
|
||||
if (Platform.OS !== 'ios') {
|
||||
if (Platform.OS !== 'ios' && Platform.OS !== 'android') {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[AudioSession] Stopping audio session...');
|
||||
console.log(`[AudioSession] Stopping audio session on ${Platform.OS}...`);
|
||||
|
||||
try {
|
||||
const AudioSession = await getAudioSession();
|
||||
@ -110,16 +136,16 @@ export async function stopAudioSession(): Promise<void> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Reconfigure audio session after remote track arrives
|
||||
* Reconfigure audio session after remote track arrives (iOS + Android)
|
||||
*
|
||||
* Sometimes iOS needs a kick to properly route audio after remote participant joins
|
||||
* Sometimes the OS needs a kick to properly route audio after remote participant joins
|
||||
*/
|
||||
export async function reconfigureAudioForPlayback(): Promise<void> {
|
||||
if (Platform.OS !== 'ios') {
|
||||
if (Platform.OS !== 'ios' && Platform.OS !== 'android') {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[AudioSession] Reconfiguring for playback...');
|
||||
console.log(`[AudioSession] Reconfiguring for playback on ${Platform.OS}...`);
|
||||
|
||||
try {
|
||||
const AudioSession = await getAudioSession();
|
||||
@ -127,17 +153,38 @@ export async function reconfigureAudioForPlayback(): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
if (Platform.OS === 'ios') {
|
||||
// Just reconfigure the same settings - this "refreshes" the audio routing
|
||||
await AudioSession.setAppleAudioConfiguration({
|
||||
audioCategory: 'playAndRecord',
|
||||
// Note: removed 'allowBluetoothA2DP' - it's incompatible with playAndRecord
|
||||
// on some iOS versions and causes "status -50" error.
|
||||
// 'allowBluetooth' (HFP profile) is sufficient for voice calls.
|
||||
audioCategoryOptions: [
|
||||
'allowBluetooth',
|
||||
'allowBluetoothA2DP',
|
||||
'defaultToSpeaker',
|
||||
'mixWithOthers',
|
||||
],
|
||||
audioMode: 'voiceChat',
|
||||
});
|
||||
} else if (Platform.OS === 'android') {
|
||||
// Reconfigure Android audio to ensure speaker output
|
||||
// Using 'music' stream type to force speaker
|
||||
await AudioSession.configureAudio({
|
||||
android: {
|
||||
audioTypeOptions: {
|
||||
manageAudioFocus: true,
|
||||
audioMode: 'normal',
|
||||
audioFocusMode: 'gain',
|
||||
audioStreamType: 'music',
|
||||
audioAttributesUsageType: 'media',
|
||||
audioAttributesContentType: 'music',
|
||||
},
|
||||
preferredOutputList: ['speaker'],
|
||||
forceHandleAudioRouting: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
console.log('[AudioSession] Reconfigured successfully');
|
||||
} catch (error) {
|
||||
@ -145,3 +192,63 @@ export async function reconfigureAudioForPlayback(): Promise<void> {
|
||||
// Don't throw - this is a best-effort operation
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Switch audio output between speaker and earpiece (iOS + Android)
|
||||
*
|
||||
* @param useSpeaker - true for speaker, false for earpiece
|
||||
*/
|
||||
export async function setAudioOutput(useSpeaker: boolean): Promise<void> {
|
||||
console.log(`[AudioSession] Setting audio output to ${useSpeaker ? 'SPEAKER' : 'EARPIECE'} on ${Platform.OS}...`);
|
||||
|
||||
try {
|
||||
const AudioSession = await getAudioSession();
|
||||
if (!AudioSession) {
|
||||
console.error('[AudioSession] Failed to get AudioSession module');
|
||||
return;
|
||||
}
|
||||
|
||||
if (Platform.OS === 'ios') {
|
||||
// iOS: Configure audio output
|
||||
await AudioSession.configureAudio({
|
||||
ios: {
|
||||
defaultOutput: useSpeaker ? 'speaker' : 'earpiece',
|
||||
},
|
||||
});
|
||||
|
||||
// Also update the full configuration to ensure it takes effect
|
||||
// Note: removed 'allowBluetoothA2DP' - causes "status -50" error
|
||||
await AudioSession.setAppleAudioConfiguration({
|
||||
audioCategory: 'playAndRecord',
|
||||
audioCategoryOptions: useSpeaker
|
||||
? ['allowBluetooth', 'defaultToSpeaker', 'mixWithOthers']
|
||||
: ['allowBluetooth', 'mixWithOthers'],
|
||||
audioMode: 'voiceChat',
|
||||
});
|
||||
} else if (Platform.OS === 'android') {
|
||||
// Android: Switch stream type to control speaker/earpiece
|
||||
// - 'music' stream goes to speaker by default
|
||||
// - 'voiceCall' stream goes to earpiece by default
|
||||
await AudioSession.configureAudio({
|
||||
android: {
|
||||
audioTypeOptions: {
|
||||
manageAudioFocus: true,
|
||||
audioMode: useSpeaker ? 'normal' : 'inCommunication',
|
||||
audioFocusMode: 'gain',
|
||||
// Key difference: music→speaker, voiceCall→earpiece
|
||||
audioStreamType: useSpeaker ? 'music' : 'voiceCall',
|
||||
audioAttributesUsageType: useSpeaker ? 'media' : 'voiceCommunication',
|
||||
audioAttributesContentType: useSpeaker ? 'music' : 'speech',
|
||||
},
|
||||
// Also set preferred output list
|
||||
preferredOutputList: useSpeaker ? ['speaker'] : ['earpiece'],
|
||||
forceHandleAudioRouting: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
console.log(`[AudioSession] Audio output set to ${useSpeaker ? 'SPEAKER' : 'EARPIECE'}`);
|
||||
} catch (error) {
|
||||
console.error('[AudioSession] setAudioOutput error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user