fix: voice session improvements - FAB stop, echo prevention, chat TTS
- FAB button now correctly stops session during speaking/processing states - Echo prevention: STT stopped during TTS playback, results ignored during speaking - Chat TTS only speaks when voice session is active (no auto-speak for text chat) - Session stop now aborts in-flight API requests and prevents race conditions - STT restarts after TTS with 800ms delay for audio focus release - Pending interrupt transcript processed after TTS completion - ChatContext added for message persistence across tab navigation - VoiceFAB redesigned with state-based animations - console.error replaced with console.warn across voice pipeline - no-speech STT errors silenced (normal silence behavior)
This commit is contained in:
parent
f2803ca5db
commit
05f872d067
@ -1,6 +1,6 @@
|
||||
import { Tabs } from 'expo-router';
|
||||
import React, { useCallback, useEffect, useRef } from 'react';
|
||||
import { Platform, View, AppState, AppStateStatus } from 'react-native';
|
||||
import { Platform, View, AppState, AppStateStatus, TouchableOpacity, StyleSheet } from 'react-native';
|
||||
import { Feather } from '@expo/vector-icons';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
|
||||
@ -40,14 +40,13 @@ export default function TabLayout() {
|
||||
const pendingInterruptTranscriptRef = useRef<string | null>(null);
|
||||
|
||||
// Callback for voice detection - interrupt TTS when user speaks
|
||||
// NOTE: On Android, STT doesn't run during TTS (shared audio focus),
|
||||
// so interruption on Android happens via FAB press instead.
|
||||
// On iOS, STT can run alongside TTS, so voice detection works.
|
||||
const handleVoiceDetected = useCallback(() => {
|
||||
// Interrupt TTS when user starts speaking during 'speaking' state
|
||||
if (status === 'speaking' || isSpeaking) {
|
||||
console.log('[TabLayout] Voice detected during TTS playback - INTERRUPTING Julia');
|
||||
const wasInterrupted = interruptIfSpeaking();
|
||||
if (wasInterrupted) {
|
||||
console.log('[TabLayout] TTS interrupted successfully, now listening to user');
|
||||
}
|
||||
if (Platform.OS === 'ios' && (status === 'speaking' || isSpeaking)) {
|
||||
console.log('[TabLayout] Voice detected during TTS (iOS) - INTERRUPTING Julia');
|
||||
interruptIfSpeaking();
|
||||
}
|
||||
}, [status, isSpeaking, interruptIfSpeaking]);
|
||||
|
||||
@ -63,21 +62,24 @@ export default function TabLayout() {
|
||||
|
||||
// Callback for STT results
|
||||
const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => {
|
||||
if (isFinal) {
|
||||
// Check if we're still in speaking mode (user interrupted Julia)
|
||||
if (isSpeaking || status === 'speaking') {
|
||||
// Store the transcript to send after TTS fully stops
|
||||
console.log('[TabLayout] Got final result while TTS playing - storing for after interruption:', transcript);
|
||||
// Ignore any STT results during TTS playback or processing (echo prevention)
|
||||
if (status === 'speaking' || status === 'processing') {
|
||||
if (isFinal) {
|
||||
// User interrupted Julia with speech — store to send after TTS stops
|
||||
console.log('[TabLayout] Got final result during TTS/processing - storing for after interruption:', transcript);
|
||||
pendingInterruptTranscriptRef.current = transcript;
|
||||
} else {
|
||||
// Normal case: not speaking, send immediately
|
||||
setTranscript(transcript);
|
||||
sendTranscript(transcript);
|
||||
}
|
||||
// Ignore partial transcripts during TTS (they're likely echo)
|
||||
return;
|
||||
}
|
||||
|
||||
if (isFinal) {
|
||||
setTranscript(transcript);
|
||||
sendTranscript(transcript);
|
||||
} else {
|
||||
setPartialTranscript(transcript);
|
||||
}
|
||||
}, [setTranscript, setPartialTranscript, sendTranscript, isSpeaking, status]);
|
||||
}, [setTranscript, setPartialTranscript, sendTranscript, status]);
|
||||
|
||||
// Speech recognition with voice detection callback
|
||||
const {
|
||||
@ -85,7 +87,7 @@ export default function TabLayout() {
|
||||
stopListening,
|
||||
isListening: sttIsListening,
|
||||
} = useSpeechRecognition({
|
||||
lang: 'ru-RU',
|
||||
lang: 'en-US',
|
||||
continuous: true,
|
||||
interimResults: true,
|
||||
onVoiceDetected: handleVoiceDetected,
|
||||
@ -93,6 +95,26 @@ export default function TabLayout() {
|
||||
onEnd: handleSTTEnd,
|
||||
});
|
||||
|
||||
// Ref to prevent concurrent startListening calls
|
||||
const sttStartingRef = useRef(false);
|
||||
|
||||
// Safe wrapper to start STT with debounce protection
|
||||
const safeStartSTT = useCallback(() => {
|
||||
if (sttIsListening || sttStartingRef.current) {
|
||||
return; // Already listening or starting
|
||||
}
|
||||
// Don't start STT during TTS on Android - they share audio focus
|
||||
if (Platform.OS === 'android' && (status === 'speaking' || isSpeaking)) {
|
||||
console.log('[TabLayout] Skipping STT start - TTS is playing (Android audio focus)');
|
||||
return;
|
||||
}
|
||||
sttStartingRef.current = true;
|
||||
console.log('[TabLayout] Starting STT...');
|
||||
startListening().finally(() => {
|
||||
sttStartingRef.current = false;
|
||||
});
|
||||
}, [sttIsListening, status, isSpeaking, startListening]);
|
||||
|
||||
// Update session active ref when isListening changes
|
||||
useEffect(() => {
|
||||
sessionActiveRef.current = isListening;
|
||||
@ -104,42 +126,32 @@ export default function TabLayout() {
|
||||
// Start/stop STT when voice session starts/stops
|
||||
useEffect(() => {
|
||||
if (isListening) {
|
||||
console.log('[TabLayout] Starting STT for voice session');
|
||||
startListening();
|
||||
console.log('[TabLayout] Voice session started - starting STT');
|
||||
safeStartSTT();
|
||||
} else {
|
||||
console.log('[TabLayout] Stopping STT - session ended');
|
||||
console.log('[TabLayout] Voice session ended - stopping STT');
|
||||
stopListening();
|
||||
}
|
||||
}, [isListening, startListening, stopListening]);
|
||||
|
||||
// Restart STT if it ended while session is still active
|
||||
// This ensures continuous listening even during/after TTS playback
|
||||
useEffect(() => {
|
||||
if (shouldRestartSTTRef.current && sessionActiveRef.current && !sttIsListening) {
|
||||
console.log('[TabLayout] Restarting STT - session still active');
|
||||
shouldRestartSTTRef.current = false;
|
||||
// Small delay to ensure clean restart
|
||||
const timer = setTimeout(() => {
|
||||
if (sessionActiveRef.current) {
|
||||
startListening();
|
||||
}
|
||||
}, 100);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [sttIsListening, startListening]);
|
||||
}, [isListening]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
// Track previous status to detect transition from speaking to listening
|
||||
const prevStatusRef = useRef<typeof status>('idle');
|
||||
|
||||
// Auto-restart STT when TTS finishes (status changes from 'speaking' to 'listening')
|
||||
// Also process any pending transcript from user interruption
|
||||
// Stop STT when entering processing or speaking state (prevent echo)
|
||||
// Restart STT when TTS finishes (speaking → listening)
|
||||
useEffect(() => {
|
||||
const prevStatus = prevStatusRef.current;
|
||||
prevStatusRef.current = status;
|
||||
|
||||
// When transitioning from speaking to listening, handle pending interrupt transcript
|
||||
// Stop STT when processing starts or TTS starts (prevent Julia hearing herself)
|
||||
if ((status === 'processing' || status === 'speaking') && sttIsListening) {
|
||||
console.log('[TabLayout] Stopping STT during', status, '(echo prevention)');
|
||||
stopListening();
|
||||
}
|
||||
|
||||
// When TTS finishes (speaking → listening), restart STT
|
||||
if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) {
|
||||
console.log('[TabLayout] TTS finished/interrupted - checking for pending transcript');
|
||||
console.log('[TabLayout] TTS finished - restarting STT');
|
||||
|
||||
// Process pending transcript from interruption if any
|
||||
const pendingTranscript = pendingInterruptTranscriptRef.current;
|
||||
@ -150,76 +162,74 @@ export default function TabLayout() {
|
||||
sendTranscript(pendingTranscript);
|
||||
}
|
||||
|
||||
// Small delay to ensure TTS cleanup is complete, then restart STT
|
||||
// Delay to let TTS fully release audio focus, then restart STT
|
||||
const timer = setTimeout(() => {
|
||||
if (sessionActiveRef.current && !sttIsListening) {
|
||||
startListening();
|
||||
if (sessionActiveRef.current) {
|
||||
safeStartSTT();
|
||||
}
|
||||
}, 200);
|
||||
}, 800); // 800ms to ensure TTS audio fully fades
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [status, sttIsListening, startListening, setTranscript, sendTranscript]);
|
||||
|
||||
// ============================================================================
|
||||
// TAB NAVIGATION PERSISTENCE
|
||||
// Ensure voice session continues when user switches between tabs.
|
||||
// The session state is in VoiceContext (root level), but STT may stop due to:
|
||||
// 1. Native audio session changes
|
||||
// 2. Tab unmount/remount (though tabs layout doesn't unmount)
|
||||
// 3. AppState changes (background/foreground)
|
||||
// ============================================================================
|
||||
// When processing finishes and goes to speaking, STT is already stopped (above)
|
||||
// When speaking finishes and goes to listening, STT restarts (above)
|
||||
}, [status]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
// Monitor and recover STT state during tab navigation
|
||||
// If session is active but STT stopped unexpectedly, restart it
|
||||
// IMPORTANT: STT should run DURING TTS playback to detect user interruption!
|
||||
// When STT ends unexpectedly during active session, restart it (but not during TTS)
|
||||
useEffect(() => {
|
||||
// Check every 500ms if STT needs to be restarted
|
||||
const intervalId = setInterval(() => {
|
||||
// Only act if session should be active (isListening from VoiceContext)
|
||||
// but STT is not actually listening
|
||||
// Note: We DO want STT running during 'speaking' to detect interruption!
|
||||
// Only skip during 'processing' (API call in progress)
|
||||
if (
|
||||
sessionActiveRef.current &&
|
||||
!sttIsListening &&
|
||||
status !== 'processing'
|
||||
) {
|
||||
console.log('[TabLayout] STT watchdog: restarting STT (session active but STT stopped, status:', status, ')');
|
||||
startListening();
|
||||
}
|
||||
}, 500);
|
||||
|
||||
return () => clearInterval(intervalId);
|
||||
}, [sttIsListening, status, startListening]);
|
||||
if (
|
||||
shouldRestartSTTRef.current &&
|
||||
sessionActiveRef.current &&
|
||||
!sttIsListening &&
|
||||
status !== 'processing' &&
|
||||
status !== 'speaking'
|
||||
) {
|
||||
shouldRestartSTTRef.current = false;
|
||||
console.log('[TabLayout] STT ended unexpectedly - restarting');
|
||||
const timer = setTimeout(() => {
|
||||
if (sessionActiveRef.current) {
|
||||
safeStartSTT();
|
||||
}
|
||||
}, 300);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [sttIsListening]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
// Handle app state changes (background/foreground)
|
||||
// When app comes back to foreground, restart STT if session was active
|
||||
useEffect(() => {
|
||||
const handleAppStateChange = (nextAppState: AppStateStatus) => {
|
||||
if (nextAppState === 'active' && sessionActiveRef.current) {
|
||||
// App came to foreground, give it a moment then check STT
|
||||
// STT should run even during 'speaking' to detect user interruption
|
||||
setTimeout(() => {
|
||||
if (sessionActiveRef.current && !sttIsListening && status !== 'processing') {
|
||||
if (sessionActiveRef.current && !sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||
console.log('[TabLayout] App foregrounded - restarting STT');
|
||||
startListening();
|
||||
safeStartSTT();
|
||||
}
|
||||
}, 300);
|
||||
}, 500);
|
||||
}
|
||||
};
|
||||
|
||||
const subscription = AppState.addEventListener('change', handleAppStateChange);
|
||||
return () => subscription.remove();
|
||||
}, [sttIsListening, status, startListening]);
|
||||
}, [sttIsListening, status, safeStartSTT]);
|
||||
|
||||
// Handle voice FAB press - toggle listening mode
|
||||
// Must check ALL active states (listening, processing, speaking), not just isListening
|
||||
const handleVoiceFABPress = useCallback(() => {
|
||||
if (isListening) {
|
||||
const isSessionActive = isListening || status === 'speaking' || status === 'processing';
|
||||
console.log('[TabLayout] FAB pressed, isSessionActive:', isSessionActive, 'status:', status, 'isListening:', isListening);
|
||||
|
||||
if (isSessionActive) {
|
||||
// Force-stop everything: STT, TTS, and session state
|
||||
console.log('[TabLayout] Force-stopping everything');
|
||||
stopListening();
|
||||
stopSession();
|
||||
sessionActiveRef.current = false;
|
||||
shouldRestartSTTRef.current = false;
|
||||
pendingInterruptTranscriptRef.current = null;
|
||||
} else {
|
||||
startSession();
|
||||
}
|
||||
}, [isListening, startSession, stopSession]);
|
||||
}, [isListening, status, startSession, stopSession, stopListening]);
|
||||
|
||||
// Calculate tab bar height based on safe area
|
||||
// On iOS with home indicator, insets.bottom is ~34px
|
||||
@ -278,6 +288,18 @@ export default function TabLayout() {
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Voice FAB - center tab button */}
|
||||
<Tabs.Screen
|
||||
name="explore"
|
||||
options={{
|
||||
title: '',
|
||||
tabBarButton: () => (
|
||||
<View style={tabFABStyles.fabWrapper}>
|
||||
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening || status === 'speaking' || status === 'processing'} />
|
||||
</View>
|
||||
),
|
||||
}}
|
||||
/>
|
||||
<Tabs.Screen
|
||||
name="profile"
|
||||
options={{
|
||||
@ -287,13 +309,6 @@ export default function TabLayout() {
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Hide explore tab */}
|
||||
<Tabs.Screen
|
||||
name="explore"
|
||||
options={{
|
||||
href: null,
|
||||
}}
|
||||
/>
|
||||
{/* Audio Debug - hidden */}
|
||||
<Tabs.Screen
|
||||
name="audio-debug"
|
||||
@ -309,9 +324,15 @@ export default function TabLayout() {
|
||||
}}
|
||||
/>
|
||||
</Tabs>
|
||||
|
||||
{/* Voice FAB - toggle listening mode */}
|
||||
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening} />
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
const tabFABStyles = StyleSheet.create({
|
||||
fabWrapper: {
|
||||
flex: 1,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
top: -20,
|
||||
},
|
||||
});
|
||||
|
||||
@ -40,7 +40,7 @@ export default function BeneficiaryDashboardScreen() {
|
||||
setUserId(uid);
|
||||
console.log('Loaded credentials for WebView:', { hasToken: !!token, user, uid });
|
||||
} catch (err) {
|
||||
console.error('Failed to load credentials:', err);
|
||||
console.warn('Failed to load credentials:', err);
|
||||
} finally {
|
||||
setIsTokenLoaded(true);
|
||||
}
|
||||
@ -80,7 +80,7 @@ export default function BeneficiaryDashboardScreen() {
|
||||
console.log('MobileAppLogin timeout - function not found');
|
||||
}, 5000);
|
||||
} catch(e) {
|
||||
console.error('Failed to call MobileAppLogin:', e);
|
||||
console.warn('Failed to call MobileAppLogin:', e);
|
||||
}
|
||||
})();
|
||||
true;
|
||||
|
||||
@ -25,6 +25,8 @@ import { useRouter, useFocusEffect } from 'expo-router';
|
||||
import { api } from '@/services/api';
|
||||
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
||||
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
|
||||
import { useVoice } from '@/contexts/VoiceContext';
|
||||
import { useChat } from '@/contexts/ChatContext';
|
||||
import { useTextToSpeech } from '@/hooks/useTextToSpeech';
|
||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||
import type { Message, Beneficiary } from '@/types';
|
||||
@ -113,6 +115,9 @@ export default function ChatScreen() {
|
||||
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
||||
const { transcript, hasNewTranscript, markTranscriptAsShown, getTranscriptAsMessages } = useVoiceTranscript();
|
||||
|
||||
// Voice context for real-time transcript display and checking if voice session is active
|
||||
const { partialTranscript: voicePartial, isListening: voiceIsListening, status: voiceStatus, isActive: voiceIsActive } = useVoice();
|
||||
|
||||
// TTS for reading Julia's responses aloud
|
||||
const { speak, stop: stopTTS, isSpeaking } = useTextToSpeech({
|
||||
language: 'ru-RU',
|
||||
@ -131,8 +136,8 @@ export default function ChatScreen() {
|
||||
const [customDeploymentId, setCustomDeploymentId] = useState<string | null>(null);
|
||||
const [deploymentName, setDeploymentName] = useState<string | null>(null);
|
||||
|
||||
// Chat state - initialized after deployment ID is loaded
|
||||
const [messages, setMessages] = useState<Message[]>([createInitialMessage(null)]);
|
||||
// Chat state - stored in context to persist across tab navigation
|
||||
const { messages, setMessages } = useChat();
|
||||
const [sortNewestFirst, setSortNewestFirst] = useState(false);
|
||||
|
||||
const [input, setInput] = useState('');
|
||||
@ -145,6 +150,23 @@ export default function ChatScreen() {
|
||||
inputRef.current = input;
|
||||
}, [input]);
|
||||
|
||||
// Show partial voice transcript in input field in real-time
|
||||
useEffect(() => {
|
||||
if (voiceIsListening && voicePartial) {
|
||||
setInput(voicePartial);
|
||||
}
|
||||
}, [voicePartial, voiceIsListening]);
|
||||
|
||||
// Clear input when voice switches to processing (transcript was sent)
|
||||
const prevVoiceStatusRef = useRef(voiceStatus);
|
||||
useEffect(() => {
|
||||
const prev = prevVoiceStatusRef.current;
|
||||
prevVoiceStatusRef.current = voiceStatus;
|
||||
if (prev === 'listening' && voiceStatus === 'processing') {
|
||||
setInput('');
|
||||
}
|
||||
}, [voiceStatus]);
|
||||
|
||||
// Beneficiary picker
|
||||
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
|
||||
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
|
||||
@ -235,7 +257,7 @@ export default function ChatScreen() {
|
||||
}
|
||||
return [];
|
||||
} catch (error) {
|
||||
console.error('Failed to load beneficiaries:', error);
|
||||
console.warn('Failed to load beneficiaries:', error);
|
||||
return [];
|
||||
} finally {
|
||||
setLoadingBeneficiaries(false);
|
||||
@ -392,8 +414,11 @@ export default function ChatScreen() {
|
||||
};
|
||||
setMessages(prev => [...prev, assistantMessage]);
|
||||
|
||||
// Speak the response using TTS
|
||||
speak(responseText);
|
||||
// Only speak the response if voice session is active (FAB pressed)
|
||||
// Don't auto-speak for text-only chat messages
|
||||
if (voiceIsActive) {
|
||||
speak(responseText);
|
||||
}
|
||||
} else {
|
||||
// Token might be expired, clear and retry once
|
||||
if (data.status === '401 Unauthorized') {
|
||||
@ -413,7 +438,7 @@ export default function ChatScreen() {
|
||||
} finally {
|
||||
setIsSending(false);
|
||||
}
|
||||
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak]);
|
||||
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak, voiceIsActive]);
|
||||
|
||||
// Render message bubble
|
||||
const renderMessage = ({ item }: { item: Message }) => {
|
||||
@ -610,9 +635,9 @@ export default function ChatScreen() {
|
||||
{/* Input */}
|
||||
<View style={styles.inputContainer}>
|
||||
<TextInput
|
||||
style={styles.input}
|
||||
placeholder="Type a message..."
|
||||
placeholderTextColor={AppColors.textMuted}
|
||||
style={[styles.input, voiceIsListening && styles.inputListening]}
|
||||
placeholder={voiceIsListening ? "Listening..." : "Type a message..."}
|
||||
placeholderTextColor={voiceIsListening ? AppColors.error : AppColors.textMuted}
|
||||
value={input}
|
||||
onChangeText={setInput}
|
||||
multiline
|
||||
@ -774,6 +799,10 @@ const styles = StyleSheet.create({
|
||||
maxHeight: 100,
|
||||
marginRight: Spacing.sm,
|
||||
},
|
||||
inputListening: {
|
||||
borderWidth: 1.5,
|
||||
borderColor: AppColors.error,
|
||||
},
|
||||
sendButton: {
|
||||
width: 44,
|
||||
height: 44,
|
||||
|
||||
@ -51,7 +51,7 @@ export default function HomeScreen() {
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to load credentials:', err);
|
||||
console.warn('Failed to load credentials:', err);
|
||||
} finally {
|
||||
setIsTokenLoaded(true);
|
||||
}
|
||||
@ -126,11 +126,11 @@ export default function HomeScreen() {
|
||||
webViewRef.current?.injectJavaScript(injectScript);
|
||||
}
|
||||
} else {
|
||||
console.error('Token refresh failed');
|
||||
console.warn('Token refresh failed');
|
||||
setError('Session expired. Please restart the app.');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error refreshing token:', err);
|
||||
console.warn('Error refreshing token:', err);
|
||||
} finally {
|
||||
setIsRefreshingToken(false);
|
||||
}
|
||||
@ -178,7 +178,7 @@ export default function HomeScreen() {
|
||||
observer.observe(document.body, { childList: true, subtree: true });
|
||||
}
|
||||
} catch(e) {
|
||||
console.error('Failed to inject token:', e);
|
||||
console.warn('Failed to inject token:', e);
|
||||
}
|
||||
})();
|
||||
true;
|
||||
|
||||
@ -13,6 +13,7 @@ import { BeneficiaryProvider } from '@/contexts/BeneficiaryContext';
|
||||
import { VoiceTranscriptProvider } from '@/contexts/VoiceTranscriptContext';
|
||||
import { VoiceCallProvider } from '@/contexts/VoiceCallContext';
|
||||
import { VoiceProvider } from '@/contexts/VoiceContext';
|
||||
import { ChatProvider } from '@/contexts/ChatContext';
|
||||
import { LoadingSpinner } from '@/components/ui/LoadingSpinner';
|
||||
import { FloatingCallBubble } from '@/components/FloatingCallBubble';
|
||||
|
||||
@ -69,7 +70,9 @@ export default function RootLayout() {
|
||||
<VoiceTranscriptProvider>
|
||||
<VoiceCallProvider>
|
||||
<VoiceProvider>
|
||||
<RootLayoutNav />
|
||||
<ChatProvider>
|
||||
<RootLayoutNav />
|
||||
</ChatProvider>
|
||||
</VoiceProvider>
|
||||
</VoiceCallProvider>
|
||||
</VoiceTranscriptProvider>
|
||||
|
||||
@ -1,9 +1,12 @@
|
||||
/**
|
||||
* Voice Floating Action Button Component
|
||||
*
|
||||
* A floating action button for toggling voice listening mode.
|
||||
* Tap to start/stop listening.
|
||||
* Hidden when a call is already active.
|
||||
* Positioned at the center of the tab bar.
|
||||
* Shows different animations for each voice state:
|
||||
* - idle: white mic icon, green background
|
||||
* - listening: red background, expanding pulse rings
|
||||
* - processing: blue background, spinning indicator
|
||||
* - speaking: green background, wave-like pulse
|
||||
*/
|
||||
|
||||
import React, { useRef, useEffect } from 'react';
|
||||
@ -12,12 +15,13 @@ import {
|
||||
TouchableOpacity,
|
||||
Animated,
|
||||
ViewStyle,
|
||||
ActivityIndicator,
|
||||
} from 'react-native';
|
||||
import { Ionicons } from '@expo/vector-icons';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
import * as Haptics from 'expo-haptics';
|
||||
import { AppColors, BorderRadius } from '@/constants/theme';
|
||||
import { useVoiceCall } from '@/contexts/VoiceCallContext';
|
||||
import { useVoice } from '@/contexts/VoiceContext';
|
||||
|
||||
interface VoiceFABProps {
|
||||
onPress: () => void;
|
||||
@ -26,184 +30,272 @@ interface VoiceFABProps {
|
||||
isListening?: boolean;
|
||||
}
|
||||
|
||||
const FAB_SIZE = 56;
|
||||
const FAB_SIZE = 60;
|
||||
|
||||
export function VoiceFAB({ onPress, style, disabled = false, isListening = false }: VoiceFABProps) {
|
||||
const { isCallActive } = useVoiceCall();
|
||||
const insets = useSafeAreaInsets();
|
||||
const { status: voiceStatus } = useVoice();
|
||||
|
||||
// Animation values
|
||||
const scale = useRef(new Animated.Value(1)).current;
|
||||
const opacity = useRef(new Animated.Value(1)).current;
|
||||
const pulseScale = useRef(new Animated.Value(1)).current;
|
||||
const pulseOpacity = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Pulse ring 1 (main expanding ring)
|
||||
const pulse1Scale = useRef(new Animated.Value(1)).current;
|
||||
const pulse1Opacity = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Pulse ring 2 (second ring, offset timing)
|
||||
const pulse2Scale = useRef(new Animated.Value(1)).current;
|
||||
const pulse2Opacity = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Speaking glow animation
|
||||
const glowScale = useRef(new Animated.Value(1)).current;
|
||||
|
||||
// Processing rotation
|
||||
const rotation = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Store animation refs for cleanup
|
||||
const animationRef = useRef<Animated.CompositeAnimation | null>(null);
|
||||
|
||||
// Determine effective state
|
||||
const effectiveStatus = isListening
|
||||
? (voiceStatus === 'processing' ? 'processing' : voiceStatus === 'speaking' ? 'speaking' : 'listening')
|
||||
: 'idle';
|
||||
|
||||
// Hide FAB when call is active
|
||||
useEffect(() => {
|
||||
if (isCallActive) {
|
||||
Animated.parallel([
|
||||
Animated.timing(scale, {
|
||||
toValue: 0,
|
||||
duration: 200,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(opacity, {
|
||||
toValue: 0,
|
||||
duration: 200,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(scale, { toValue: 0, duration: 200, useNativeDriver: true }),
|
||||
Animated.timing(opacity, { toValue: 0, duration: 200, useNativeDriver: true }),
|
||||
]).start();
|
||||
} else {
|
||||
Animated.parallel([
|
||||
Animated.spring(scale, {
|
||||
toValue: 1,
|
||||
friction: 5,
|
||||
tension: 40,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(opacity, {
|
||||
toValue: 1,
|
||||
duration: 200,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.spring(scale, { toValue: 1, friction: 5, tension: 40, useNativeDriver: true }),
|
||||
Animated.timing(opacity, { toValue: 1, duration: 200, useNativeDriver: true }),
|
||||
]).start();
|
||||
}
|
||||
}, [isCallActive, scale, opacity]);
|
||||
|
||||
// Pulse animation when listening
|
||||
// Animations based on voice status
|
||||
useEffect(() => {
|
||||
if (isListening && !isCallActive) {
|
||||
// Start pulsing animation
|
||||
const pulseAnimation = Animated.loop(
|
||||
Animated.sequence([
|
||||
// Stop previous animation
|
||||
if (animationRef.current) {
|
||||
animationRef.current.stop();
|
||||
animationRef.current = null;
|
||||
}
|
||||
|
||||
// Reset all animation values
|
||||
pulse1Scale.setValue(1);
|
||||
pulse1Opacity.setValue(0);
|
||||
pulse2Scale.setValue(1);
|
||||
pulse2Opacity.setValue(0);
|
||||
glowScale.setValue(1);
|
||||
rotation.setValue(0);
|
||||
|
||||
if (effectiveStatus === 'listening') {
|
||||
// Double pulse ring animation - more active/dynamic
|
||||
const pulseAnim = Animated.loop(
|
||||
Animated.stagger(500, [
|
||||
Animated.parallel([
|
||||
Animated.timing(pulseScale, {
|
||||
toValue: 1.8,
|
||||
duration: 1000,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(pulseOpacity, {
|
||||
toValue: 0,
|
||||
duration: 1000,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(pulse1Scale, { toValue: 2.0, duration: 1200, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
|
||||
]),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulseScale, {
|
||||
toValue: 1,
|
||||
duration: 0,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(pulseOpacity, {
|
||||
toValue: 0.6,
|
||||
duration: 0,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(pulse1Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0.5, duration: 0, useNativeDriver: true }),
|
||||
]),
|
||||
])
|
||||
);
|
||||
pulseAnimation.start();
|
||||
|
||||
return () => {
|
||||
pulseAnimation.stop();
|
||||
pulseScale.setValue(1);
|
||||
pulseOpacity.setValue(0);
|
||||
};
|
||||
} else {
|
||||
pulseScale.setValue(1);
|
||||
pulseOpacity.setValue(0);
|
||||
const pulse2Anim = Animated.loop(
|
||||
Animated.sequence([
|
||||
Animated.delay(400),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse2Scale, { toValue: 1.8, duration: 1200, useNativeDriver: true }),
|
||||
Animated.timing(pulse2Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
|
||||
]),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse2Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
|
||||
Animated.timing(pulse2Opacity, { toValue: 0.4, duration: 0, useNativeDriver: true }),
|
||||
]),
|
||||
])
|
||||
);
|
||||
|
||||
const combined = Animated.parallel([pulseAnim, pulse2Anim]);
|
||||
animationRef.current = combined;
|
||||
combined.start();
|
||||
|
||||
} else if (effectiveStatus === 'speaking') {
|
||||
// Gentle breathing glow when speaking
|
||||
const glowAnim = Animated.loop(
|
||||
Animated.sequence([
|
||||
Animated.timing(glowScale, { toValue: 1.15, duration: 600, useNativeDriver: true }),
|
||||
Animated.timing(glowScale, { toValue: 1.0, duration: 600, useNativeDriver: true }),
|
||||
])
|
||||
);
|
||||
|
||||
// Soft outer glow
|
||||
const softPulse = Animated.loop(
|
||||
Animated.sequence([
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse1Scale, { toValue: 1.4, duration: 800, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0.3, duration: 400, useNativeDriver: true }),
|
||||
]),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse1Scale, { toValue: 1.0, duration: 800, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0, duration: 400, useNativeDriver: true }),
|
||||
]),
|
||||
])
|
||||
);
|
||||
|
||||
const combined = Animated.parallel([glowAnim, softPulse]);
|
||||
animationRef.current = combined;
|
||||
combined.start();
|
||||
|
||||
} else if (effectiveStatus === 'processing') {
|
||||
// Spinning rotation for processing
|
||||
const spinAnim = Animated.loop(
|
||||
Animated.timing(rotation, { toValue: 1, duration: 1500, useNativeDriver: true })
|
||||
);
|
||||
animationRef.current = spinAnim;
|
||||
spinAnim.start();
|
||||
}
|
||||
}, [isListening, isCallActive, pulseScale, pulseOpacity]);
|
||||
|
||||
return () => {
|
||||
if (animationRef.current) {
|
||||
animationRef.current.stop();
|
||||
animationRef.current = null;
|
||||
}
|
||||
};
|
||||
}, [effectiveStatus]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
// Press animation with haptic feedback
|
||||
const handlePressIn = () => {
|
||||
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
|
||||
Animated.spring(scale, {
|
||||
toValue: 0.9,
|
||||
friction: 5,
|
||||
useNativeDriver: true,
|
||||
}).start();
|
||||
Animated.spring(scale, { toValue: 0.85, friction: 5, useNativeDriver: true }).start();
|
||||
};
|
||||
|
||||
const handlePressOut = () => {
|
||||
Animated.spring(scale, {
|
||||
toValue: 1,
|
||||
friction: 5,
|
||||
useNativeDriver: true,
|
||||
}).start();
|
||||
Animated.spring(scale, { toValue: 1, friction: 5, useNativeDriver: true }).start();
|
||||
};
|
||||
|
||||
// Don't render if call is active
|
||||
if (isCallActive) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Determine colors and icon based on state
|
||||
let fabBgColor = AppColors.success; // idle: green
|
||||
let iconName: 'mic-outline' | 'mic' | 'volume-high' = 'mic-outline';
|
||||
let pulseColor = AppColors.error;
|
||||
|
||||
if (effectiveStatus === 'listening') {
|
||||
fabBgColor = '#FF3B30'; // red
|
||||
iconName = 'mic';
|
||||
pulseColor = '#FF3B30';
|
||||
} else if (effectiveStatus === 'processing') {
|
||||
fabBgColor = AppColors.primary; // blue
|
||||
iconName = 'mic';
|
||||
pulseColor = AppColors.primary;
|
||||
} else if (effectiveStatus === 'speaking') {
|
||||
fabBgColor = '#34C759'; // green
|
||||
iconName = 'volume-high';
|
||||
pulseColor = '#34C759';
|
||||
}
|
||||
|
||||
const spin = rotation.interpolate({
|
||||
inputRange: [0, 1],
|
||||
outputRange: ['0deg', '360deg'],
|
||||
});
|
||||
|
||||
return (
|
||||
<Animated.View
|
||||
style={[
|
||||
styles.container,
|
||||
{
|
||||
bottom: insets.bottom + 80, // Above tab bar
|
||||
transform: [{ scale }],
|
||||
opacity,
|
||||
},
|
||||
style,
|
||||
]}
|
||||
>
|
||||
{/* Pulse ring when listening */}
|
||||
{isListening && (
|
||||
{/* Pulse ring 1 */}
|
||||
{(effectiveStatus === 'listening' || effectiveStatus === 'speaking') && (
|
||||
<Animated.View
|
||||
style={[
|
||||
styles.pulseRing,
|
||||
{
|
||||
transform: [{ scale: pulseScale }],
|
||||
opacity: pulseOpacity,
|
||||
backgroundColor: pulseColor,
|
||||
transform: [{ scale: pulse1Scale }],
|
||||
opacity: pulse1Opacity,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
<TouchableOpacity
|
||||
style={[
|
||||
styles.fab,
|
||||
isListening && styles.fabListening,
|
||||
disabled && styles.fabDisabled,
|
||||
]}
|
||||
onPress={onPress}
|
||||
onPressIn={handlePressIn}
|
||||
onPressOut={handlePressOut}
|
||||
disabled={disabled}
|
||||
activeOpacity={0.9}
|
||||
>
|
||||
<Ionicons
|
||||
name={isListening ? 'mic' : 'mic-outline'}
|
||||
size={28}
|
||||
color={disabled ? AppColors.textMuted : AppColors.white}
|
||||
|
||||
{/* Pulse ring 2 (listening only) */}
|
||||
{effectiveStatus === 'listening' && (
|
||||
<Animated.View
|
||||
style={[
|
||||
styles.pulseRing,
|
||||
{
|
||||
backgroundColor: pulseColor,
|
||||
transform: [{ scale: pulse2Scale }],
|
||||
opacity: pulse2Opacity,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
|
||||
<Animated.View
|
||||
style={[
|
||||
{ transform: [{ scale: effectiveStatus === 'speaking' ? glowScale : 1 }] },
|
||||
]}
|
||||
>
|
||||
<TouchableOpacity
|
||||
style={[
|
||||
styles.fab,
|
||||
{ backgroundColor: disabled ? AppColors.surface : fabBgColor },
|
||||
disabled && styles.fabDisabled,
|
||||
]}
|
||||
onPress={onPress}
|
||||
onPressIn={handlePressIn}
|
||||
onPressOut={handlePressOut}
|
||||
disabled={disabled}
|
||||
activeOpacity={0.9}
|
||||
>
|
||||
{effectiveStatus === 'processing' ? (
|
||||
<Animated.View style={{ transform: [{ rotate: spin }] }}>
|
||||
<ActivityIndicator size="small" color={AppColors.white} />
|
||||
</Animated.View>
|
||||
) : (
|
||||
<Ionicons
|
||||
name={iconName}
|
||||
size={28}
|
||||
color={disabled ? AppColors.textMuted : AppColors.white}
|
||||
/>
|
||||
)}
|
||||
</TouchableOpacity>
|
||||
</Animated.View>
|
||||
</Animated.View>
|
||||
);
|
||||
}
|
||||
|
||||
const styles = StyleSheet.create({
|
||||
container: {
|
||||
position: 'absolute',
|
||||
left: 0,
|
||||
right: 0,
|
||||
alignItems: 'center',
|
||||
zIndex: 100,
|
||||
justifyContent: 'center',
|
||||
},
|
||||
pulseRing: {
|
||||
position: 'absolute',
|
||||
width: FAB_SIZE,
|
||||
height: FAB_SIZE,
|
||||
borderRadius: BorderRadius.full,
|
||||
backgroundColor: AppColors.error,
|
||||
borderRadius: FAB_SIZE / 2,
|
||||
},
|
||||
fab: {
|
||||
width: FAB_SIZE,
|
||||
height: FAB_SIZE,
|
||||
borderRadius: BorderRadius.full,
|
||||
backgroundColor: AppColors.success,
|
||||
borderRadius: FAB_SIZE / 2,
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
shadowColor: '#000',
|
||||
@ -212,11 +304,7 @@ const styles = StyleSheet.create({
|
||||
shadowRadius: 8,
|
||||
elevation: 8,
|
||||
},
|
||||
fabListening: {
|
||||
backgroundColor: AppColors.error,
|
||||
},
|
||||
fabDisabled: {
|
||||
backgroundColor: AppColors.surface,
|
||||
shadowOpacity: 0.1,
|
||||
},
|
||||
});
|
||||
|
||||
51
contexts/ChatContext.tsx
Normal file
51
contexts/ChatContext.tsx
Normal file
@ -0,0 +1,51 @@
|
||||
/**
|
||||
* Chat Context - Persists chat messages across tab navigation
|
||||
*
|
||||
* Without this context, messages are lost when switching tabs
|
||||
* because ChatScreen component unmounts and remounts.
|
||||
*/
|
||||
|
||||
import React, { createContext, useContext, useState, useCallback, ReactNode } from 'react';
|
||||
import type { Message } from '@/types';
|
||||
|
||||
interface ChatContextValue {
|
||||
messages: Message[];
|
||||
setMessages: React.Dispatch<React.SetStateAction<Message[]>>;
|
||||
addMessage: (message: Message) => void;
|
||||
clearMessages: (initialMessage: Message) => void;
|
||||
}
|
||||
|
||||
const ChatContext = createContext<ChatContextValue | undefined>(undefined);
|
||||
|
||||
export function ChatProvider({ children }: { children: ReactNode }) {
|
||||
const [messages, setMessages] = useState<Message[]>([
|
||||
{
|
||||
id: '1',
|
||||
role: 'assistant',
|
||||
content: "Hello! I'm Julia, your AI wellness companion.\n\nType a message below to chat with me.",
|
||||
timestamp: new Date(),
|
||||
},
|
||||
]);
|
||||
|
||||
const addMessage = useCallback((message: Message) => {
|
||||
setMessages(prev => [...prev, message]);
|
||||
}, []);
|
||||
|
||||
const clearMessages = useCallback((initialMessage: Message) => {
|
||||
setMessages([initialMessage]);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ChatContext.Provider value={{ messages, setMessages, addMessage, clearMessages }}>
|
||||
{children}
|
||||
</ChatContext.Provider>
|
||||
);
|
||||
}
|
||||
|
||||
export function useChat() {
|
||||
const context = useContext(ChatContext);
|
||||
if (!context) {
|
||||
throw new Error('useChat must be used within ChatProvider');
|
||||
}
|
||||
return context;
|
||||
}
|
||||
@ -153,6 +153,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
// API token cache
|
||||
const apiTokenRef = useRef<string | null>(null);
|
||||
|
||||
// Abort controller for cancelling in-flight API requests
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
|
||||
// Flag to prevent speak() after session stopped
|
||||
const sessionStoppedRef = useRef(false);
|
||||
|
||||
// Deployment ID from settings
|
||||
const deploymentIdRef = useRef<string | null>(null);
|
||||
|
||||
@ -207,6 +213,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Don't send if session was stopped
|
||||
if (sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Session stopped, skipping API call');
|
||||
return null;
|
||||
}
|
||||
|
||||
console.log('[VoiceContext] Sending transcript to API:', trimmedText);
|
||||
setStatus('processing');
|
||||
setError(null);
|
||||
@ -214,10 +226,23 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
// Add user message to transcript for chat display
|
||||
addTranscriptEntry('user', trimmedText);
|
||||
|
||||
// Create abort controller for this request
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort();
|
||||
}
|
||||
const abortController = new AbortController();
|
||||
abortControllerRef.current = abortController;
|
||||
|
||||
try {
|
||||
// Get API token
|
||||
const token = await getWellNuoToken();
|
||||
|
||||
// Check if aborted
|
||||
if (abortController.signal.aborted || sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Request aborted before API call');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Normalize question
|
||||
const normalizedQuestion = normalizeQuestion(trimmedText);
|
||||
|
||||
@ -244,10 +269,17 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: new URLSearchParams(requestParams).toString(),
|
||||
signal: abortController.signal,
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
// Check if session was stopped while waiting for response
|
||||
if (sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Session stopped during API call, discarding response');
|
||||
return null;
|
||||
}
|
||||
|
||||
if (data.ok && data.response?.body) {
|
||||
const responseText = data.response.body;
|
||||
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
|
||||
@ -256,7 +288,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
// Add Julia's response to transcript for chat display
|
||||
addTranscriptEntry('assistant', responseText);
|
||||
|
||||
// Speak the response
|
||||
// Speak the response (will be skipped if session stopped)
|
||||
await speak(responseText);
|
||||
|
||||
return responseText;
|
||||
@ -269,8 +301,13 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
throw new Error(data.message || 'Could not get response');
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore abort errors
|
||||
if (err instanceof Error && err.name === 'AbortError') {
|
||||
console.log('[VoiceContext] API request aborted');
|
||||
return null;
|
||||
}
|
||||
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
||||
console.error('[VoiceContext] API error:', errorMsg);
|
||||
console.warn('[VoiceContext] API error:', errorMsg);
|
||||
setError(errorMsg);
|
||||
setStatus('idle');
|
||||
return null;
|
||||
@ -300,6 +337,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
const speak = useCallback(async (text: string): Promise<void> => {
|
||||
if (!text.trim()) return;
|
||||
|
||||
// Don't speak if session was stopped
|
||||
if (sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Session stopped, skipping TTS');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[VoiceContext] Speaking:', text.slice(0, 50) + '...');
|
||||
setStatus('speaking');
|
||||
setIsSpeaking(true);
|
||||
@ -315,20 +358,27 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
onDone: () => {
|
||||
console.log('[VoiceContext] TTS completed');
|
||||
setIsSpeaking(false);
|
||||
// Return to listening state after speaking (if session is active)
|
||||
setStatus('listening');
|
||||
// Return to listening state after speaking (if session wasn't stopped)
|
||||
if (!sessionStoppedRef.current) {
|
||||
setStatus('listening');
|
||||
}
|
||||
resolve();
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error('[VoiceContext] TTS error:', error);
|
||||
console.warn('[VoiceContext] TTS error:', error);
|
||||
setIsSpeaking(false);
|
||||
setStatus('listening');
|
||||
if (!sessionStoppedRef.current) {
|
||||
setStatus('listening');
|
||||
}
|
||||
resolve();
|
||||
},
|
||||
onStopped: () => {
|
||||
console.log('[VoiceContext] TTS stopped (interrupted)');
|
||||
setIsSpeaking(false);
|
||||
setStatus('listening');
|
||||
// Don't set status to listening if session was stopped by user
|
||||
if (!sessionStoppedRef.current) {
|
||||
setStatus('listening');
|
||||
}
|
||||
resolve();
|
||||
},
|
||||
});
|
||||
@ -348,6 +398,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
*/
|
||||
const startSession = useCallback(() => {
|
||||
console.log('[VoiceContext] Starting voice session');
|
||||
sessionStoppedRef.current = false;
|
||||
setStatus('listening');
|
||||
setIsListening(true);
|
||||
setError(null);
|
||||
@ -360,7 +411,16 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
*/
|
||||
const stopSession = useCallback(() => {
|
||||
console.log('[VoiceContext] Stopping voice session');
|
||||
// Mark session as stopped FIRST to prevent any pending callbacks
|
||||
sessionStoppedRef.current = true;
|
||||
// Abort any in-flight API requests
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort();
|
||||
abortControllerRef.current = null;
|
||||
}
|
||||
// Stop TTS
|
||||
Speech.stop();
|
||||
// Reset all state
|
||||
setStatus('idle');
|
||||
setIsListening(false);
|
||||
setIsSpeaking(false);
|
||||
|
||||
@ -120,7 +120,7 @@ export function useSpeechRecognition(
|
||||
setIsAvailable(true);
|
||||
console.log('[SpeechRecognition] Available, permission status:', status.status);
|
||||
} catch (err) {
|
||||
console.error('[SpeechRecognition] Not available:', err);
|
||||
console.warn('[SpeechRecognition] Not available:', err);
|
||||
setIsAvailable(false);
|
||||
}
|
||||
};
|
||||
@ -177,15 +177,20 @@ export function useSpeechRecognition(
|
||||
|
||||
// Event: Error occurred
|
||||
useSpeechRecognitionEvent('error', (event: any) => {
|
||||
const errorMessage = event.message || event.error || 'Speech recognition error';
|
||||
console.error('[SpeechRecognition] Error:', errorMessage);
|
||||
const errorCode = event.error || '';
|
||||
const errorMessage = event.message || errorCode || 'Speech recognition error';
|
||||
|
||||
// Don't set error for "no-speech" - this is normal when user doesn't say anything
|
||||
if (event.error !== 'no-speech') {
|
||||
setError(errorMessage);
|
||||
onError?.(errorMessage);
|
||||
// "no-speech" is normal when user is silent — ignore completely
|
||||
if (errorCode === 'no-speech') {
|
||||
console.log('[SpeechRecognition] No speech detected (silence) - ignoring');
|
||||
setIsListening(false);
|
||||
isStartingRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
console.warn('[SpeechRecognition] Error:', errorMessage);
|
||||
setError(errorMessage);
|
||||
onError?.(errorMessage);
|
||||
setIsListening(false);
|
||||
isStartingRef.current = false;
|
||||
});
|
||||
@ -207,7 +212,7 @@ export function useSpeechRecognition(
|
||||
|
||||
if (!isAvailable) {
|
||||
const msg = 'Speech recognition is not available on this device';
|
||||
console.error('[SpeechRecognition]', msg);
|
||||
console.warn('[SpeechRecognition]', msg);
|
||||
setError(msg);
|
||||
onError?.(msg);
|
||||
return false;
|
||||
@ -224,7 +229,7 @@ export function useSpeechRecognition(
|
||||
|
||||
if (!permissionResult.granted) {
|
||||
const msg = 'Microphone permission denied';
|
||||
console.error('[SpeechRecognition]', msg);
|
||||
console.warn('[SpeechRecognition]', msg);
|
||||
setError(msg);
|
||||
onError?.(msg);
|
||||
isStartingRef.current = false;
|
||||
@ -249,7 +254,7 @@ export function useSpeechRecognition(
|
||||
return true;
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : 'Failed to start speech recognition';
|
||||
console.error('[SpeechRecognition] Start error:', msg);
|
||||
console.warn('[SpeechRecognition] Start error:', msg);
|
||||
setError(msg);
|
||||
onError?.(msg);
|
||||
isStartingRef.current = false;
|
||||
|
||||
@ -185,7 +185,7 @@ export function useTextToSpeech(
|
||||
},
|
||||
onError: (err) => {
|
||||
const errorMsg = typeof err === 'string' ? err : 'Speech synthesis error';
|
||||
console.error('[TTS] Error:', errorMsg);
|
||||
console.warn('[TTS] Error:', errorMsg);
|
||||
if (isMountedRef.current) {
|
||||
setIsSpeaking(false);
|
||||
setCurrentText(null);
|
||||
@ -227,7 +227,7 @@ export function useTextToSpeech(
|
||||
console.log('[TTS] Available voices:', voices.length);
|
||||
return voices;
|
||||
} catch (err) {
|
||||
console.error('[TTS] Could not get voices:', err);
|
||||
console.warn('[TTS] Could not get voices:', err);
|
||||
return [];
|
||||
}
|
||||
}, []);
|
||||
|
||||
34
package-lock.json
generated
34
package-lock.json
generated
@ -12,6 +12,7 @@
|
||||
"@expo/vector-icons": "^15.0.3",
|
||||
"@jamsch/expo-speech-recognition": "^0.2.15",
|
||||
"@notifee/react-native": "^9.1.8",
|
||||
"@react-native-async-storage/async-storage": "2.2.0",
|
||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||
"@react-navigation/elements": "^2.6.3",
|
||||
"@react-navigation/native": "^7.1.8",
|
||||
@ -3568,6 +3569,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@react-native-async-storage/async-storage": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@react-native-async-storage/async-storage/-/async-storage-2.2.0.tgz",
|
||||
"integrity": "sha512-gvRvjR5JAaUZF8tv2Kcq/Gbt3JHwbKFYfmb445rhOj6NUMx3qPLixmDx5pZAyb9at1bYvJ4/eTUipU5aki45xw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"merge-options": "^3.0.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react-native": "^0.0.0-0 || >=0.65 <1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@react-native/assets-registry": {
|
||||
"version": "0.81.5",
|
||||
"resolved": "https://registry.npmjs.org/@react-native/assets-registry/-/assets-registry-0.81.5.tgz",
|
||||
@ -8754,6 +8767,15 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/is-plain-obj": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
|
||||
"integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/is-regex": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz",
|
||||
@ -9785,6 +9807,18 @@
|
||||
"integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/merge-options": {
|
||||
"version": "3.0.4",
|
||||
"resolved": "https://registry.npmjs.org/merge-options/-/merge-options-3.0.4.tgz",
|
||||
"integrity": "sha512-2Sug1+knBjkaMsMgf1ctR1Ujx+Ayku4EdJN4Z+C2+JzoeF7A3OZ9KM2GY0CpQS51NR61LTurMJrRKPhSs3ZRTQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-plain-obj": "^2.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/merge-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
"@expo/vector-icons": "^15.0.3",
|
||||
"@jamsch/expo-speech-recognition": "^0.2.15",
|
||||
"@notifee/react-native": "^9.1.8",
|
||||
"@react-native-async-storage/async-storage": "2.2.0",
|
||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||
"@react-navigation/elements": "^2.6.3",
|
||||
"@react-navigation/native": "^7.1.8",
|
||||
|
||||
@ -113,7 +113,7 @@ class ApiService {
|
||||
console.log('[API] refreshToken result:', result.ok ? 'SUCCESS' : result.error?.message);
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error('[API] refreshToken error:', error);
|
||||
console.warn('[API] refreshToken error:', error);
|
||||
return {
|
||||
ok: false,
|
||||
error: { message: 'Failed to refresh token', code: 'REFRESH_ERROR' }
|
||||
|
||||
@ -46,7 +46,7 @@ class CallManager {
|
||||
await this.disconnectCallback();
|
||||
console.log(`[CallManager] Previous call disconnected`);
|
||||
} catch (err) {
|
||||
console.error(`[CallManager] Error disconnecting previous call:`, err);
|
||||
console.warn(`[CallManager] Error disconnecting previous call:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,7 +98,7 @@ class CallManager {
|
||||
try {
|
||||
await this.disconnectCallback();
|
||||
} catch (err) {
|
||||
console.error(`[CallManager] Error force disconnecting:`, err);
|
||||
console.warn(`[CallManager] Error force disconnecting:`, err);
|
||||
}
|
||||
this.activeCallId = null;
|
||||
this.disconnectCallback = null;
|
||||
|
||||
@ -228,7 +228,7 @@ export async function createCall(options: {
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
console.error('[Ultravox] API error:', response.status, errorData);
|
||||
console.warn('[Ultravox] API error:', response.status, errorData);
|
||||
return {
|
||||
success: false,
|
||||
error: errorData.message || `API error: ${response.status}`,
|
||||
@ -239,7 +239,7 @@ export async function createCall(options: {
|
||||
console.log('[Ultravox] Call created:', data.callId);
|
||||
return { success: true, data };
|
||||
} catch (error) {
|
||||
console.error('[Ultravox] Create call error:', error);
|
||||
console.warn('[Ultravox] Create call error:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to create call',
|
||||
@ -265,7 +265,7 @@ export async function getCall(callId: string): Promise<CreateCallResponse | null
|
||||
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.error('[Ultravox] Get call error:', error);
|
||||
console.warn('[Ultravox] Get call error:', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -284,7 +284,7 @@ export async function endCall(callId: string): Promise<boolean> {
|
||||
|
||||
return response.ok;
|
||||
} catch (error) {
|
||||
console.error('[Ultravox] End call error:', error);
|
||||
console.warn('[Ultravox] End call error:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,7 +23,7 @@ async function getNotifee() {
|
||||
try {
|
||||
notifee = (await import('@notifee/react-native')).default;
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to load notifee:', e);
|
||||
console.warn('[AndroidVoiceService] Failed to load notifee:', e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -52,7 +52,7 @@ async function createNotificationChannel(): Promise<void> {
|
||||
});
|
||||
console.log('[AndroidVoiceService] Notification channel created');
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to create channel:', e);
|
||||
console.warn('[AndroidVoiceService] Failed to create channel:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ export async function startVoiceCallService(): Promise<void> {
|
||||
|
||||
console.log('[AndroidVoiceService] Foreground service started');
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to start foreground service:', e);
|
||||
console.warn('[AndroidVoiceService] Failed to start foreground service:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ export async function stopVoiceCallService(): Promise<void> {
|
||||
await notifeeModule.cancelNotification(NOTIFICATION_ID);
|
||||
console.log('[AndroidVoiceService] Foreground service stopped');
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to stop foreground service:', e);
|
||||
console.warn('[AndroidVoiceService] Failed to stop foreground service:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ export async function openBatteryOptimizationSettings(): Promise<void> {
|
||||
// Try generic battery settings
|
||||
await Linking.openSettings();
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to open settings:', e);
|
||||
console.warn('[AndroidVoiceService] Failed to open settings:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ export async function requestNotificationPermission(): Promise<boolean> {
|
||||
console.log('[AndroidVoiceService] Notification permission:', granted ? 'granted' : 'denied');
|
||||
return granted;
|
||||
} catch (e) {
|
||||
console.error('[AndroidVoiceService] Failed to request notification permission:', e);
|
||||
console.warn('[AndroidVoiceService] Failed to request notification permission:', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user