Compare commits
No commits in common. "d6353c85336878f1257b79d2777fd2f70fd0293f" and "f2803ca5db759664ebcb825bd686be7b4600aae9" have entirely different histories.
d6353c8533
...
f2803ca5db
@ -1 +0,0 @@
|
||||
Limit reached · resets 1pm (America/Los_Angeles) · turn on /extra-usage
|
||||
@ -1,6 +1,6 @@
|
||||
import { Tabs } from 'expo-router';
|
||||
import React, { useCallback, useEffect, useRef } from 'react';
|
||||
import { Platform, View, AppState, AppStateStatus, TouchableOpacity, StyleSheet } from 'react-native';
|
||||
import { Platform, View, AppState, AppStateStatus } from 'react-native';
|
||||
import { Feather } from '@expo/vector-icons';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
|
||||
@ -29,7 +29,6 @@ export default function TabLayout() {
|
||||
interruptIfSpeaking,
|
||||
setTranscript,
|
||||
setPartialTranscript,
|
||||
partialTranscript, // for iOS auto-stop timer
|
||||
sendTranscript,
|
||||
} = useVoice();
|
||||
|
||||
@ -41,13 +40,14 @@ export default function TabLayout() {
|
||||
const pendingInterruptTranscriptRef = useRef<string | null>(null);
|
||||
|
||||
// Callback for voice detection - interrupt TTS when user speaks
|
||||
// NOTE: On Android, STT doesn't run during TTS (shared audio focus),
|
||||
// so interruption on Android happens via FAB press instead.
|
||||
// On iOS, STT can run alongside TTS, so voice detection works.
|
||||
const handleVoiceDetected = useCallback(() => {
|
||||
if (Platform.OS === 'ios' && (status === 'speaking' || isSpeaking)) {
|
||||
console.log('[TabLayout] Voice detected during TTS (iOS) - INTERRUPTING Julia');
|
||||
interruptIfSpeaking();
|
||||
// Interrupt TTS when user starts speaking during 'speaking' state
|
||||
if (status === 'speaking' || isSpeaking) {
|
||||
console.log('[TabLayout] Voice detected during TTS playback - INTERRUPTING Julia');
|
||||
const wasInterrupted = interruptIfSpeaking();
|
||||
if (wasInterrupted) {
|
||||
console.log('[TabLayout] TTS interrupted successfully, now listening to user');
|
||||
}
|
||||
}
|
||||
}, [status, isSpeaking, interruptIfSpeaking]);
|
||||
|
||||
@ -63,24 +63,21 @@ export default function TabLayout() {
|
||||
|
||||
// Callback for STT results
|
||||
const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => {
|
||||
// Ignore any STT results during TTS playback or processing (echo prevention)
|
||||
if (status === 'speaking' || status === 'processing') {
|
||||
if (isFinal) {
|
||||
// User interrupted Julia with speech — store to send after TTS stops
|
||||
console.log('[TabLayout] Got final result during TTS/processing - storing for after interruption:', transcript);
|
||||
// Check if we're still in speaking mode (user interrupted Julia)
|
||||
if (isSpeaking || status === 'speaking') {
|
||||
// Store the transcript to send after TTS fully stops
|
||||
console.log('[TabLayout] Got final result while TTS playing - storing for after interruption:', transcript);
|
||||
pendingInterruptTranscriptRef.current = transcript;
|
||||
}
|
||||
// Ignore partial transcripts during TTS (they're likely echo)
|
||||
return;
|
||||
}
|
||||
|
||||
if (isFinal) {
|
||||
} else {
|
||||
// Normal case: not speaking, send immediately
|
||||
setTranscript(transcript);
|
||||
sendTranscript(transcript);
|
||||
}
|
||||
} else {
|
||||
setPartialTranscript(transcript);
|
||||
}
|
||||
}, [setTranscript, setPartialTranscript, sendTranscript, status]);
|
||||
}, [setTranscript, setPartialTranscript, sendTranscript, isSpeaking, status]);
|
||||
|
||||
// Speech recognition with voice detection callback
|
||||
const {
|
||||
@ -88,7 +85,7 @@ export default function TabLayout() {
|
||||
stopListening,
|
||||
isListening: sttIsListening,
|
||||
} = useSpeechRecognition({
|
||||
lang: 'en-US',
|
||||
lang: 'ru-RU',
|
||||
continuous: true,
|
||||
interimResults: true,
|
||||
onVoiceDetected: handleVoiceDetected,
|
||||
@ -96,65 +93,6 @@ export default function TabLayout() {
|
||||
onEnd: handleSTTEnd,
|
||||
});
|
||||
|
||||
// Ref to prevent concurrent startListening calls
|
||||
const sttStartingRef = useRef(false);
|
||||
// Ref to track last partial transcript for iOS auto-stop
|
||||
const lastPartialTextRef = useRef('');
|
||||
const silenceTimerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
|
||||
// iOS AUTO-STOP: Stop STT after 2 seconds of silence (no new partial transcripts)
|
||||
// This triggers onEnd → iOS fix sends lastPartial as final
|
||||
useEffect(() => {
|
||||
// Clear existing timer
|
||||
if (silenceTimerRef.current) {
|
||||
clearTimeout(silenceTimerRef.current);
|
||||
silenceTimerRef.current = null;
|
||||
}
|
||||
|
||||
// Only track silence when STT is listening (not during processing/speaking)
|
||||
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||
// Get current partial from VoiceContext (set by handleSpeechResult)
|
||||
const currentPartial = partialTranscript;
|
||||
|
||||
// If partial changed, update ref and set new 2s timer
|
||||
if (currentPartial !== lastPartialTextRef.current) {
|
||||
lastPartialTextRef.current = currentPartial;
|
||||
|
||||
// Start 2-second silence timer
|
||||
silenceTimerRef.current = setTimeout(() => {
|
||||
if (sttIsListening && sessionActiveRef.current) {
|
||||
console.log('[TabLayout] 🍎 iOS AUTO-STOP: 2s silence - stopping STT to trigger onEnd → iOS fix');
|
||||
stopListening();
|
||||
}
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (silenceTimerRef.current) {
|
||||
clearTimeout(silenceTimerRef.current);
|
||||
silenceTimerRef.current = null;
|
||||
}
|
||||
};
|
||||
}, [sttIsListening, status, partialTranscript, stopListening]);
|
||||
|
||||
// Safe wrapper to start STT with debounce protection
|
||||
const safeStartSTT = useCallback(() => {
|
||||
if (sttIsListening || sttStartingRef.current) {
|
||||
return; // Already listening or starting
|
||||
}
|
||||
// Don't start STT during TTS on Android - they share audio focus
|
||||
if (Platform.OS === 'android' && (status === 'speaking' || isSpeaking)) {
|
||||
console.log('[TabLayout] Skipping STT start - TTS is playing (Android audio focus)');
|
||||
return;
|
||||
}
|
||||
sttStartingRef.current = true;
|
||||
console.log('[TabLayout] Starting STT...');
|
||||
startListening().finally(() => {
|
||||
sttStartingRef.current = false;
|
||||
});
|
||||
}, [sttIsListening, status, isSpeaking, startListening]);
|
||||
|
||||
// Update session active ref when isListening changes
|
||||
useEffect(() => {
|
||||
sessionActiveRef.current = isListening;
|
||||
@ -166,32 +104,42 @@ export default function TabLayout() {
|
||||
// Start/stop STT when voice session starts/stops
|
||||
useEffect(() => {
|
||||
if (isListening) {
|
||||
console.log('[TabLayout] Voice session started - starting STT');
|
||||
safeStartSTT();
|
||||
console.log('[TabLayout] Starting STT for voice session');
|
||||
startListening();
|
||||
} else {
|
||||
console.log('[TabLayout] Voice session ended - stopping STT');
|
||||
console.log('[TabLayout] Stopping STT - session ended');
|
||||
stopListening();
|
||||
}
|
||||
}, [isListening]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
}, [isListening, startListening, stopListening]);
|
||||
|
||||
// Restart STT if it ended while session is still active
|
||||
// This ensures continuous listening even during/after TTS playback
|
||||
useEffect(() => {
|
||||
if (shouldRestartSTTRef.current && sessionActiveRef.current && !sttIsListening) {
|
||||
console.log('[TabLayout] Restarting STT - session still active');
|
||||
shouldRestartSTTRef.current = false;
|
||||
// Small delay to ensure clean restart
|
||||
const timer = setTimeout(() => {
|
||||
if (sessionActiveRef.current) {
|
||||
startListening();
|
||||
}
|
||||
}, 100);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [sttIsListening, startListening]);
|
||||
|
||||
// Track previous status to detect transition from speaking to listening
|
||||
const prevStatusRef = useRef<typeof status>('idle');
|
||||
|
||||
// Stop STT when entering processing or speaking state (prevent echo)
|
||||
// Restart STT when TTS finishes (speaking → listening)
|
||||
// Auto-restart STT when TTS finishes (status changes from 'speaking' to 'listening')
|
||||
// Also process any pending transcript from user interruption
|
||||
useEffect(() => {
|
||||
const prevStatus = prevStatusRef.current;
|
||||
prevStatusRef.current = status;
|
||||
|
||||
// Stop STT when processing starts or TTS starts (prevent Julia hearing herself)
|
||||
if ((status === 'processing' || status === 'speaking') && sttIsListening) {
|
||||
console.log('[TabLayout] Stopping STT during', status, '(echo prevention)');
|
||||
stopListening();
|
||||
}
|
||||
|
||||
// When TTS finishes (speaking → listening), restart STT
|
||||
// When transitioning from speaking to listening, handle pending interrupt transcript
|
||||
if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) {
|
||||
console.log('[TabLayout] TTS finished - restarting STT');
|
||||
console.log('[TabLayout] TTS finished/interrupted - checking for pending transcript');
|
||||
|
||||
// Process pending transcript from interruption if any
|
||||
const pendingTranscript = pendingInterruptTranscriptRef.current;
|
||||
@ -202,74 +150,76 @@ export default function TabLayout() {
|
||||
sendTranscript(pendingTranscript);
|
||||
}
|
||||
|
||||
// Delay to let TTS fully release audio focus, then restart STT
|
||||
// Small delay to ensure TTS cleanup is complete, then restart STT
|
||||
const timer = setTimeout(() => {
|
||||
if (sessionActiveRef.current) {
|
||||
safeStartSTT();
|
||||
if (sessionActiveRef.current && !sttIsListening) {
|
||||
startListening();
|
||||
}
|
||||
}, 800); // 800ms to ensure TTS audio fully fades
|
||||
}, 200);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [status, sttIsListening, startListening, setTranscript, sendTranscript]);
|
||||
|
||||
// When processing finishes and goes to speaking, STT is already stopped (above)
|
||||
// When speaking finishes and goes to listening, STT restarts (above)
|
||||
}, [status]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
// ============================================================================
|
||||
// TAB NAVIGATION PERSISTENCE
|
||||
// Ensure voice session continues when user switches between tabs.
|
||||
// The session state is in VoiceContext (root level), but STT may stop due to:
|
||||
// 1. Native audio session changes
|
||||
// 2. Tab unmount/remount (though tabs layout doesn't unmount)
|
||||
// 3. AppState changes (background/foreground)
|
||||
// ============================================================================
|
||||
|
||||
// When STT ends unexpectedly during active session, restart it (but not during TTS)
|
||||
// Monitor and recover STT state during tab navigation
|
||||
// If session is active but STT stopped unexpectedly, restart it
|
||||
// IMPORTANT: STT should run DURING TTS playback to detect user interruption!
|
||||
useEffect(() => {
|
||||
// Check every 500ms if STT needs to be restarted
|
||||
const intervalId = setInterval(() => {
|
||||
// Only act if session should be active (isListening from VoiceContext)
|
||||
// but STT is not actually listening
|
||||
// Note: We DO want STT running during 'speaking' to detect interruption!
|
||||
// Only skip during 'processing' (API call in progress)
|
||||
if (
|
||||
shouldRestartSTTRef.current &&
|
||||
sessionActiveRef.current &&
|
||||
!sttIsListening &&
|
||||
status !== 'processing' &&
|
||||
status !== 'speaking'
|
||||
status !== 'processing'
|
||||
) {
|
||||
shouldRestartSTTRef.current = false;
|
||||
console.log('[TabLayout] STT ended unexpectedly - restarting');
|
||||
const timer = setTimeout(() => {
|
||||
if (sessionActiveRef.current) {
|
||||
safeStartSTT();
|
||||
console.log('[TabLayout] STT watchdog: restarting STT (session active but STT stopped, status:', status, ')');
|
||||
startListening();
|
||||
}
|
||||
}, 300);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [sttIsListening]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
}, 500);
|
||||
|
||||
return () => clearInterval(intervalId);
|
||||
}, [sttIsListening, status, startListening]);
|
||||
|
||||
// Handle app state changes (background/foreground)
|
||||
// When app comes back to foreground, restart STT if session was active
|
||||
useEffect(() => {
|
||||
const handleAppStateChange = (nextAppState: AppStateStatus) => {
|
||||
if (nextAppState === 'active' && sessionActiveRef.current) {
|
||||
// App came to foreground, give it a moment then check STT
|
||||
// STT should run even during 'speaking' to detect user interruption
|
||||
setTimeout(() => {
|
||||
if (sessionActiveRef.current && !sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||
if (sessionActiveRef.current && !sttIsListening && status !== 'processing') {
|
||||
console.log('[TabLayout] App foregrounded - restarting STT');
|
||||
safeStartSTT();
|
||||
startListening();
|
||||
}
|
||||
}, 500);
|
||||
}, 300);
|
||||
}
|
||||
};
|
||||
|
||||
const subscription = AppState.addEventListener('change', handleAppStateChange);
|
||||
return () => subscription.remove();
|
||||
}, [sttIsListening, status, safeStartSTT]);
|
||||
}, [sttIsListening, status, startListening]);
|
||||
|
||||
// Handle voice FAB press - toggle listening mode
|
||||
// Must check ALL active states (listening, processing, speaking), not just isListening
|
||||
const handleVoiceFABPress = useCallback(() => {
|
||||
const isSessionActive = isListening || status === 'speaking' || status === 'processing';
|
||||
console.log('[TabLayout] FAB pressed, isSessionActive:', isSessionActive, 'status:', status, 'isListening:', isListening);
|
||||
|
||||
if (isSessionActive) {
|
||||
// Force-stop everything: STT, TTS, and session state
|
||||
console.log('[TabLayout] Force-stopping everything');
|
||||
stopListening();
|
||||
if (isListening) {
|
||||
stopSession();
|
||||
sessionActiveRef.current = false;
|
||||
shouldRestartSTTRef.current = false;
|
||||
pendingInterruptTranscriptRef.current = null;
|
||||
} else {
|
||||
startSession();
|
||||
}
|
||||
}, [isListening, status, startSession, stopSession, stopListening]);
|
||||
}, [isListening, startSession, stopSession]);
|
||||
|
||||
// Calculate tab bar height based on safe area
|
||||
// On iOS with home indicator, insets.bottom is ~34px
|
||||
@ -328,28 +278,6 @@ export default function TabLayout() {
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Voice FAB - center tab button */}
|
||||
<Tabs.Screen
|
||||
name="explore"
|
||||
options={{
|
||||
title: '',
|
||||
tabBarButton: () => (
|
||||
<View style={tabFABStyles.fabWrapper}>
|
||||
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening || status === 'speaking' || status === 'processing'} />
|
||||
</View>
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Voice Debug - visible tab */}
|
||||
<Tabs.Screen
|
||||
name="voice-debug"
|
||||
options={{
|
||||
title: 'Debug',
|
||||
tabBarIcon: ({ color, size }) => (
|
||||
<Feather name="activity" size={22} color={color} />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
<Tabs.Screen
|
||||
name="profile"
|
||||
options={{
|
||||
@ -359,6 +287,13 @@ export default function TabLayout() {
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{/* Hide explore tab */}
|
||||
<Tabs.Screen
|
||||
name="explore"
|
||||
options={{
|
||||
href: null,
|
||||
}}
|
||||
/>
|
||||
{/* Audio Debug - hidden */}
|
||||
<Tabs.Screen
|
||||
name="audio-debug"
|
||||
@ -374,15 +309,9 @@ export default function TabLayout() {
|
||||
}}
|
||||
/>
|
||||
</Tabs>
|
||||
|
||||
{/* Voice FAB - toggle listening mode */}
|
||||
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening} />
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
const tabFABStyles = StyleSheet.create({
|
||||
fabWrapper: {
|
||||
flex: 1,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
top: -20,
|
||||
},
|
||||
});
|
||||
|
||||
@ -40,7 +40,7 @@ export default function BeneficiaryDashboardScreen() {
|
||||
setUserId(uid);
|
||||
console.log('Loaded credentials for WebView:', { hasToken: !!token, user, uid });
|
||||
} catch (err) {
|
||||
console.warn('Failed to load credentials:', err);
|
||||
console.error('Failed to load credentials:', err);
|
||||
} finally {
|
||||
setIsTokenLoaded(true);
|
||||
}
|
||||
@ -80,7 +80,7 @@ export default function BeneficiaryDashboardScreen() {
|
||||
console.log('MobileAppLogin timeout - function not found');
|
||||
}, 5000);
|
||||
} catch(e) {
|
||||
console.warn('Failed to call MobileAppLogin:', e);
|
||||
console.error('Failed to call MobileAppLogin:', e);
|
||||
}
|
||||
})();
|
||||
true;
|
||||
|
||||
@ -25,8 +25,6 @@ import { useRouter, useFocusEffect } from 'expo-router';
|
||||
import { api } from '@/services/api';
|
||||
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
||||
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
|
||||
import { useVoice } from '@/contexts/VoiceContext';
|
||||
import { useChat } from '@/contexts/ChatContext';
|
||||
import { useTextToSpeech } from '@/hooks/useTextToSpeech';
|
||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||
import type { Message, Beneficiary } from '@/types';
|
||||
@ -115,9 +113,6 @@ export default function ChatScreen() {
|
||||
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
||||
const { transcript, hasNewTranscript, markTranscriptAsShown, getTranscriptAsMessages } = useVoiceTranscript();
|
||||
|
||||
// Voice context for real-time transcript display and checking if voice session is active
|
||||
const { partialTranscript: voicePartial, isListening: voiceIsListening, status: voiceStatus, isActive: voiceIsActive } = useVoice();
|
||||
|
||||
// TTS for reading Julia's responses aloud
|
||||
const { speak, stop: stopTTS, isSpeaking } = useTextToSpeech({
|
||||
language: 'ru-RU',
|
||||
@ -136,8 +131,8 @@ export default function ChatScreen() {
|
||||
const [customDeploymentId, setCustomDeploymentId] = useState<string | null>(null);
|
||||
const [deploymentName, setDeploymentName] = useState<string | null>(null);
|
||||
|
||||
// Chat state - stored in context to persist across tab navigation
|
||||
const { messages, setMessages } = useChat();
|
||||
// Chat state - initialized after deployment ID is loaded
|
||||
const [messages, setMessages] = useState<Message[]>([createInitialMessage(null)]);
|
||||
const [sortNewestFirst, setSortNewestFirst] = useState(false);
|
||||
|
||||
const [input, setInput] = useState('');
|
||||
@ -150,23 +145,6 @@ export default function ChatScreen() {
|
||||
inputRef.current = input;
|
||||
}, [input]);
|
||||
|
||||
// Show partial voice transcript in input field in real-time
|
||||
useEffect(() => {
|
||||
if (voiceIsListening && voicePartial) {
|
||||
setInput(voicePartial);
|
||||
}
|
||||
}, [voicePartial, voiceIsListening]);
|
||||
|
||||
// Clear input when voice switches to processing (transcript was sent)
|
||||
const prevVoiceStatusRef = useRef(voiceStatus);
|
||||
useEffect(() => {
|
||||
const prev = prevVoiceStatusRef.current;
|
||||
prevVoiceStatusRef.current = voiceStatus;
|
||||
if (prev === 'listening' && voiceStatus === 'processing') {
|
||||
setInput('');
|
||||
}
|
||||
}, [voiceStatus]);
|
||||
|
||||
// Beneficiary picker
|
||||
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
|
||||
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
|
||||
@ -257,7 +235,7 @@ export default function ChatScreen() {
|
||||
}
|
||||
return [];
|
||||
} catch (error) {
|
||||
console.warn('Failed to load beneficiaries:', error);
|
||||
console.error('Failed to load beneficiaries:', error);
|
||||
return [];
|
||||
} finally {
|
||||
setLoadingBeneficiaries(false);
|
||||
@ -414,11 +392,8 @@ export default function ChatScreen() {
|
||||
};
|
||||
setMessages(prev => [...prev, assistantMessage]);
|
||||
|
||||
// Only speak the response if voice session is active (FAB pressed)
|
||||
// Don't auto-speak for text-only chat messages
|
||||
if (voiceIsActive) {
|
||||
// Speak the response using TTS
|
||||
speak(responseText);
|
||||
}
|
||||
} else {
|
||||
// Token might be expired, clear and retry once
|
||||
if (data.status === '401 Unauthorized') {
|
||||
@ -438,7 +413,7 @@ export default function ChatScreen() {
|
||||
} finally {
|
||||
setIsSending(false);
|
||||
}
|
||||
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak, voiceIsActive]);
|
||||
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak]);
|
||||
|
||||
// Render message bubble
|
||||
const renderMessage = ({ item }: { item: Message }) => {
|
||||
@ -635,9 +610,9 @@ export default function ChatScreen() {
|
||||
{/* Input */}
|
||||
<View style={styles.inputContainer}>
|
||||
<TextInput
|
||||
style={[styles.input, voiceIsListening && styles.inputListening]}
|
||||
placeholder={voiceIsListening ? "Listening..." : "Type a message..."}
|
||||
placeholderTextColor={voiceIsListening ? AppColors.error : AppColors.textMuted}
|
||||
style={styles.input}
|
||||
placeholder="Type a message..."
|
||||
placeholderTextColor={AppColors.textMuted}
|
||||
value={input}
|
||||
onChangeText={setInput}
|
||||
multiline
|
||||
@ -799,10 +774,6 @@ const styles = StyleSheet.create({
|
||||
maxHeight: 100,
|
||||
marginRight: Spacing.sm,
|
||||
},
|
||||
inputListening: {
|
||||
borderWidth: 1.5,
|
||||
borderColor: AppColors.error,
|
||||
},
|
||||
sendButton: {
|
||||
width: 44,
|
||||
height: 44,
|
||||
|
||||
@ -51,7 +51,7 @@ export default function HomeScreen() {
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Failed to load credentials:', err);
|
||||
console.error('Failed to load credentials:', err);
|
||||
} finally {
|
||||
setIsTokenLoaded(true);
|
||||
}
|
||||
@ -126,11 +126,11 @@ export default function HomeScreen() {
|
||||
webViewRef.current?.injectJavaScript(injectScript);
|
||||
}
|
||||
} else {
|
||||
console.warn('Token refresh failed');
|
||||
console.error('Token refresh failed');
|
||||
setError('Session expired. Please restart the app.');
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Error refreshing token:', err);
|
||||
console.error('Error refreshing token:', err);
|
||||
} finally {
|
||||
setIsRefreshingToken(false);
|
||||
}
|
||||
@ -178,7 +178,7 @@ export default function HomeScreen() {
|
||||
observer.observe(document.body, { childList: true, subtree: true });
|
||||
}
|
||||
} catch(e) {
|
||||
console.warn('Failed to inject token:', e);
|
||||
console.error('Failed to inject token:', e);
|
||||
}
|
||||
})();
|
||||
true;
|
||||
|
||||
@ -15,7 +15,6 @@ import { router } from 'expo-router';
|
||||
import { Ionicons } from '@expo/vector-icons';
|
||||
import { SafeAreaView } from 'react-native-safe-area-context';
|
||||
import { useAuth } from '@/contexts/AuthContext';
|
||||
import { useVoice } from '@/contexts/VoiceContext';
|
||||
import { api } from '@/services/api';
|
||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||
|
||||
@ -56,7 +55,6 @@ function MenuItem({
|
||||
|
||||
export default function ProfileScreen() {
|
||||
const { user, logout } = useAuth();
|
||||
const { updateVoiceApiType } = useVoice();
|
||||
const [deploymentId, setDeploymentId] = useState<string>('');
|
||||
const [deploymentName, setDeploymentName] = useState<string>('');
|
||||
const [showDeploymentModal, setShowDeploymentModal] = useState(false);
|
||||
@ -64,11 +62,6 @@ export default function ProfileScreen() {
|
||||
const [isValidating, setIsValidating] = useState(false);
|
||||
const [validationError, setValidationError] = useState<string | null>(null);
|
||||
|
||||
// Voice API Type state
|
||||
const [voiceApiType, setVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
|
||||
const [showVoiceApiModal, setShowVoiceApiModal] = useState(false);
|
||||
const [tempVoiceApiType, setTempVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
|
||||
|
||||
// Load saved deployment ID or auto-populate from first available
|
||||
useEffect(() => {
|
||||
const loadDeploymentId = async () => {
|
||||
@ -95,26 +88,12 @@ export default function ProfileScreen() {
|
||||
loadDeploymentId();
|
||||
}, []);
|
||||
|
||||
// Load saved Voice API type
|
||||
useEffect(() => {
|
||||
const loadVoiceApiType = async () => {
|
||||
const saved = await api.getVoiceApiType();
|
||||
setVoiceApiType(saved);
|
||||
};
|
||||
loadVoiceApiType();
|
||||
}, []);
|
||||
|
||||
const openDeploymentModal = useCallback(() => {
|
||||
setTempDeploymentId(deploymentId);
|
||||
setValidationError(null);
|
||||
setShowDeploymentModal(true);
|
||||
}, [deploymentId]);
|
||||
|
||||
const openVoiceApiModal = useCallback(() => {
|
||||
setTempVoiceApiType(voiceApiType);
|
||||
setShowVoiceApiModal(true);
|
||||
}, [voiceApiType]);
|
||||
|
||||
const saveDeploymentId = useCallback(async () => {
|
||||
const trimmed = tempDeploymentId.trim();
|
||||
setValidationError(null);
|
||||
@ -149,13 +128,6 @@ export default function ProfileScreen() {
|
||||
}
|
||||
}, [tempDeploymentId]);
|
||||
|
||||
const saveVoiceApiType = useCallback(async () => {
|
||||
await api.setVoiceApiType(tempVoiceApiType);
|
||||
setVoiceApiType(tempVoiceApiType);
|
||||
updateVoiceApiType(tempVoiceApiType);
|
||||
setShowVoiceApiModal(false);
|
||||
}, [tempVoiceApiType, updateVoiceApiType]);
|
||||
|
||||
const openTerms = () => {
|
||||
router.push('/terms');
|
||||
};
|
||||
@ -213,15 +185,6 @@ export default function ProfileScreen() {
|
||||
subtitle={deploymentId ? (deploymentName || `ID: ${deploymentId}`) : 'Auto'}
|
||||
onPress={openDeploymentModal}
|
||||
/>
|
||||
<View style={styles.menuDivider} />
|
||||
<MenuItem
|
||||
icon="radio-outline"
|
||||
iconColor="#9333EA"
|
||||
iconBgColor="#F3E8FF"
|
||||
title="Voice API"
|
||||
subtitle={voiceApiType === 'voice_ask' ? 'voice_ask' : 'ask_wellnuo_ai (LLaMA)'}
|
||||
onPress={openVoiceApiModal}
|
||||
/>
|
||||
</View>
|
||||
</View>
|
||||
|
||||
@ -308,65 +271,6 @@ export default function ProfileScreen() {
|
||||
</View>
|
||||
</KeyboardAvoidingView>
|
||||
</Modal>
|
||||
|
||||
{/* Voice API Modal */}
|
||||
<Modal
|
||||
visible={showVoiceApiModal}
|
||||
transparent
|
||||
animationType="fade"
|
||||
onRequestClose={() => setShowVoiceApiModal(false)}
|
||||
>
|
||||
<View style={styles.modalOverlay}>
|
||||
<View style={styles.modalContent}>
|
||||
<Text style={styles.modalTitle}>Voice API</Text>
|
||||
<Text style={styles.modalDescription}>
|
||||
Choose which API function to use for voice requests.
|
||||
</Text>
|
||||
|
||||
{/* Radio buttons */}
|
||||
<TouchableOpacity
|
||||
style={styles.radioOption}
|
||||
onPress={() => setTempVoiceApiType('ask_wellnuo_ai')}
|
||||
>
|
||||
<View style={styles.radioCircle}>
|
||||
{tempVoiceApiType === 'ask_wellnuo_ai' && <View style={styles.radioCircleSelected} />}
|
||||
</View>
|
||||
<View style={styles.radioTextContainer}>
|
||||
<Text style={styles.radioLabel}>ask_wellnuo_ai</Text>
|
||||
<Text style={styles.radioDescription}>LLaMA with WellNuo data</Text>
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
|
||||
<TouchableOpacity
|
||||
style={styles.radioOption}
|
||||
onPress={() => setTempVoiceApiType('voice_ask')}
|
||||
>
|
||||
<View style={styles.radioCircle}>
|
||||
{tempVoiceApiType === 'voice_ask' && <View style={styles.radioCircleSelected} />}
|
||||
</View>
|
||||
<View style={styles.radioTextContainer}>
|
||||
<Text style={styles.radioLabel}>voice_ask</Text>
|
||||
<Text style={styles.radioDescription}>Alternative voice API</Text>
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
|
||||
<View style={styles.modalButtons}>
|
||||
<TouchableOpacity
|
||||
style={styles.modalButtonCancel}
|
||||
onPress={() => setShowVoiceApiModal(false)}
|
||||
>
|
||||
<Text style={styles.modalButtonCancelText}>Cancel</Text>
|
||||
</TouchableOpacity>
|
||||
<TouchableOpacity
|
||||
style={styles.modalButtonSave}
|
||||
onPress={saveVoiceApiType}
|
||||
>
|
||||
<Text style={styles.modalButtonSaveText}>Save</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</View>
|
||||
</View>
|
||||
</Modal>
|
||||
</SafeAreaView>
|
||||
);
|
||||
}
|
||||
@ -568,40 +472,4 @@ const styles = StyleSheet.create({
|
||||
disabledText: {
|
||||
opacity: 0.5,
|
||||
},
|
||||
// Radio button styles
|
||||
radioOption: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
paddingVertical: Spacing.sm + 4,
|
||||
marginBottom: Spacing.xs,
|
||||
},
|
||||
radioCircle: {
|
||||
width: 24,
|
||||
height: 24,
|
||||
borderRadius: 12,
|
||||
borderWidth: 2,
|
||||
borderColor: AppColors.primary,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
marginRight: Spacing.md,
|
||||
},
|
||||
radioCircleSelected: {
|
||||
width: 12,
|
||||
height: 12,
|
||||
borderRadius: 6,
|
||||
backgroundColor: AppColors.primary,
|
||||
},
|
||||
radioTextContainer: {
|
||||
flex: 1,
|
||||
},
|
||||
radioLabel: {
|
||||
fontSize: FontSizes.base,
|
||||
fontWeight: '500',
|
||||
color: AppColors.textPrimary,
|
||||
marginBottom: 2,
|
||||
},
|
||||
radioDescription: {
|
||||
fontSize: FontSizes.xs,
|
||||
color: AppColors.textSecondary,
|
||||
},
|
||||
});
|
||||
|
||||
@ -1,504 +0,0 @@
|
||||
/**
|
||||
* Voice Debug Screen
|
||||
*
|
||||
* Real-time debugging interface for voice recognition pipeline.
|
||||
* Shows all events, timers, API calls, and state changes.
|
||||
*/
|
||||
|
||||
import React, { useState, useEffect, useRef, useCallback } from 'react';
|
||||
import {
|
||||
View,
|
||||
Text,
|
||||
ScrollView,
|
||||
StyleSheet,
|
||||
TouchableOpacity,
|
||||
} from 'react-native';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
import { Feather } from '@expo/vector-icons';
|
||||
|
||||
import { useVoice } from '@/contexts/VoiceContext';
|
||||
import { useSpeechRecognition } from '@/hooks/useSpeechRecognition';
|
||||
import { AppColors } from '@/constants/theme';
|
||||
import { useColorScheme } from '@/hooks/use-color-scheme';
|
||||
|
||||
interface LogEntry {
|
||||
id: string;
|
||||
timestamp: number;
|
||||
category: 'stt' | 'api' | 'tts' | 'timer' | 'system';
|
||||
message: string;
|
||||
level: 'info' | 'warning' | 'error' | 'success';
|
||||
data?: any;
|
||||
}
|
||||
|
||||
export default function VoiceDebugScreen() {
|
||||
const colorScheme = useColorScheme();
|
||||
const isDark = colorScheme === 'dark';
|
||||
const insets = useSafeAreaInsets();
|
||||
|
||||
const {
|
||||
isListening,
|
||||
isSpeaking,
|
||||
status,
|
||||
startSession,
|
||||
stopSession,
|
||||
} = useVoice();
|
||||
|
||||
const {
|
||||
isListening: sttIsListening,
|
||||
partialTranscript,
|
||||
recognizedText,
|
||||
} = useSpeechRecognition({
|
||||
lang: 'en-US',
|
||||
continuous: true,
|
||||
interimResults: true,
|
||||
});
|
||||
|
||||
const [logs, setLogs] = useState<LogEntry[]>([]);
|
||||
const [silenceTimer, setSilenceTimer] = useState(0);
|
||||
const scrollViewRef = useRef<ScrollView>(null);
|
||||
const logIdCounter = useRef(0);
|
||||
const lastPartialRef = useRef('');
|
||||
|
||||
// Add log entry
|
||||
const addLog = useCallback((
|
||||
category: LogEntry['category'],
|
||||
message: string,
|
||||
level: LogEntry['level'] = 'info',
|
||||
data?: any
|
||||
) => {
|
||||
const entry: LogEntry = {
|
||||
id: `log-${logIdCounter.current++}`,
|
||||
timestamp: Date.now(),
|
||||
category,
|
||||
message,
|
||||
level,
|
||||
data,
|
||||
};
|
||||
|
||||
console.log(`[VoiceDebug:${category}]`, message, data || '');
|
||||
|
||||
setLogs(prev => {
|
||||
const updated = [...prev, entry];
|
||||
// Keep only last 100 logs
|
||||
return updated.slice(-100);
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
scrollViewRef.current?.scrollToEnd({ animated: true });
|
||||
}, 50);
|
||||
}, []);
|
||||
|
||||
// Clear logs
|
||||
const clearLogs = useCallback(() => {
|
||||
setLogs([]);
|
||||
logIdCounter.current = 0;
|
||||
addLog('system', 'Logs cleared', 'info');
|
||||
}, [addLog]);
|
||||
|
||||
// Monitor voice session state
|
||||
useEffect(() => {
|
||||
if (isListening) {
|
||||
addLog('system', '🎤 Voice session STARTED', 'success');
|
||||
} else {
|
||||
addLog('system', '⏹️ Voice session STOPPED', 'info');
|
||||
setSilenceTimer(0);
|
||||
}
|
||||
}, [isListening, addLog]);
|
||||
|
||||
// Monitor STT state
|
||||
useEffect(() => {
|
||||
if (sttIsListening) {
|
||||
addLog('stt', '▶️ STT listening started', 'success');
|
||||
} else if (isListening) {
|
||||
addLog('stt', '⏸️ STT stopped (but session active)', 'warning');
|
||||
}
|
||||
}, [sttIsListening, isListening, addLog]);
|
||||
|
||||
// Monitor status changes
|
||||
useEffect(() => {
|
||||
if (status === 'processing') {
|
||||
addLog('api', '⚙️ Processing transcript → sending to API', 'info');
|
||||
} else if (status === 'speaking') {
|
||||
addLog('tts', '🔊 TTS playing (Julia speaking)', 'info');
|
||||
} else if (status === 'listening') {
|
||||
addLog('system', '👂 Ready to listen', 'info');
|
||||
}
|
||||
}, [status, addLog]);
|
||||
|
||||
// Monitor partial transcripts
|
||||
useEffect(() => {
|
||||
if (partialTranscript && partialTranscript !== lastPartialRef.current) {
|
||||
lastPartialRef.current = partialTranscript;
|
||||
addLog('stt', `📝 Partial: "${partialTranscript.slice(0, 40)}${partialTranscript.length > 40 ? '...' : ''}"`, 'info');
|
||||
|
||||
// Reset silence timer
|
||||
setSilenceTimer(0);
|
||||
addLog('timer', '🔄 Silence timer RESET', 'warning');
|
||||
}
|
||||
}, [partialTranscript, addLog]);
|
||||
|
||||
// Monitor final transcripts
|
||||
useEffect(() => {
|
||||
if (recognizedText && recognizedText !== lastPartialRef.current) {
|
||||
addLog('stt', `✅ FINAL: "${recognizedText.slice(0, 40)}${recognizedText.length > 40 ? '...' : ''}"`, 'success', {
|
||||
length: recognizedText.length,
|
||||
transcript: recognizedText
|
||||
});
|
||||
addLog('api', '📤 Sending to API...', 'info');
|
||||
}
|
||||
}, [recognizedText, addLog]);
|
||||
|
||||
// Silence timer (only when STT is listening and not processing/speaking)
|
||||
useEffect(() => {
|
||||
let interval: NodeJS.Timeout | null = null;
|
||||
|
||||
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||
interval = setInterval(() => {
|
||||
setSilenceTimer(prev => {
|
||||
const next = prev + 100;
|
||||
|
||||
// Log milestones
|
||||
if (next === 1000) {
|
||||
addLog('timer', '⏱️ Silence: 1.0s', 'info');
|
||||
} else if (next === 1500) {
|
||||
addLog('timer', '⏱️ Silence: 1.5s', 'warning');
|
||||
} else if (next === 2000) {
|
||||
addLog('timer', '🛑 Silence: 2.0s → AUTO-STOP triggered', 'error');
|
||||
}
|
||||
|
||||
return next;
|
||||
});
|
||||
}, 100);
|
||||
} else {
|
||||
setSilenceTimer(0);
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (interval) clearInterval(interval);
|
||||
};
|
||||
}, [sttIsListening, status, addLog]);
|
||||
|
||||
// Get status indicator
|
||||
const getStatusDisplay = () => {
|
||||
if (status === 'speaking' || isSpeaking) {
|
||||
return { color: '#9333EA', icon: '🔊', text: 'Speaking' };
|
||||
}
|
||||
if (status === 'processing') {
|
||||
return { color: '#F59E0B', icon: '⚙️', text: 'Processing' };
|
||||
}
|
||||
if (isListening && sttIsListening) {
|
||||
return { color: '#10B981', icon: '🟢', text: 'Listening' };
|
||||
}
|
||||
if (isListening && !sttIsListening) {
|
||||
return { color: '#F59E0B', icon: '🟡', text: 'Session Active (STT Off)' };
|
||||
}
|
||||
return { color: '#6B7280', icon: '⚪', text: 'Idle' };
|
||||
};
|
||||
|
||||
const statusDisplay = getStatusDisplay();
|
||||
const silenceProgress = Math.min(silenceTimer / 2000, 1);
|
||||
const silenceSeconds = (silenceTimer / 1000).toFixed(1);
|
||||
|
||||
// Log level colors
|
||||
const getLogColor = (level: LogEntry['level']) => {
|
||||
switch (level) {
|
||||
case 'error': return '#EF4444';
|
||||
case 'warning': return '#F59E0B';
|
||||
case 'success': return '#10B981';
|
||||
default: return isDark ? '#D1D5DB' : '#374151';
|
||||
}
|
||||
};
|
||||
|
||||
// Category icons
|
||||
const getCategoryIcon = (category: LogEntry['category']) => {
|
||||
switch (category) {
|
||||
case 'stt': return '🎤';
|
||||
case 'api': return '📡';
|
||||
case 'tts': return '🔊';
|
||||
case 'timer': return '⏱️';
|
||||
case 'system': return '⚙️';
|
||||
default: return '•';
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<View style={[styles.container, { backgroundColor: isDark ? '#0A0A0A' : '#FFFFFF' }]}>
|
||||
{/* Header */}
|
||||
<View style={[styles.header, { paddingTop: insets.top + 16 }]}>
|
||||
<Text style={[styles.headerTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
|
||||
Voice Debug
|
||||
</Text>
|
||||
<TouchableOpacity onPress={clearLogs} style={styles.clearButton}>
|
||||
<Feather name="trash-2" size={20} color={isDark ? '#9CA3AF' : '#6B7280'} />
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
{/* Status Card */}
|
||||
<View style={[styles.statusCard, {
|
||||
backgroundColor: isDark ? '#1F2937' : '#F3F4F6',
|
||||
borderColor: statusDisplay.color,
|
||||
}]}>
|
||||
<View style={styles.statusRow}>
|
||||
<Text style={styles.statusIcon}>{statusDisplay.icon}</Text>
|
||||
<View style={styles.statusTextContainer}>
|
||||
<Text style={[styles.statusLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||
Status
|
||||
</Text>
|
||||
<Text style={[styles.statusText, { color: statusDisplay.color }]}>
|
||||
{statusDisplay.text}
|
||||
</Text>
|
||||
</View>
|
||||
</View>
|
||||
|
||||
{/* Silence Timer */}
|
||||
{sttIsListening && status !== 'processing' && status !== 'speaking' && (
|
||||
<View style={styles.timerContainer}>
|
||||
<Text style={[styles.timerLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||
Silence Timer (iOS auto-stop at 2.0s)
|
||||
</Text>
|
||||
<View style={styles.timerRow}>
|
||||
<Text style={[styles.timerText, {
|
||||
color: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : isDark ? '#D1D5DB' : '#374151'
|
||||
}]}>
|
||||
{silenceSeconds}s / 2.0s
|
||||
</Text>
|
||||
</View>
|
||||
<View style={[styles.progressBarContainer, { backgroundColor: isDark ? '#374151' : '#E5E7EB' }]}>
|
||||
<View style={[styles.progressBarFill, {
|
||||
width: `${silenceProgress * 100}%`,
|
||||
backgroundColor: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : '#10B981'
|
||||
}]} />
|
||||
</View>
|
||||
</View>
|
||||
)}
|
||||
|
||||
{/* Current Transcripts */}
|
||||
{partialTranscript && (
|
||||
<View style={styles.transcriptContainer}>
|
||||
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||
Partial:
|
||||
</Text>
|
||||
<Text style={[styles.transcriptText, { color: isDark ? '#F59E0B' : '#D97706' }]}>
|
||||
"{partialTranscript}"
|
||||
</Text>
|
||||
</View>
|
||||
)}
|
||||
{recognizedText && (
|
||||
<View style={styles.transcriptContainer}>
|
||||
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||
Final:
|
||||
</Text>
|
||||
<Text style={[styles.transcriptText, { color: isDark ? '#10B981' : '#059669' }]}>
|
||||
"{recognizedText}"
|
||||
</Text>
|
||||
</View>
|
||||
)}
|
||||
</View>
|
||||
|
||||
{/* Logs */}
|
||||
<View style={styles.logsContainer}>
|
||||
<Text style={[styles.logsTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
|
||||
Event Log
|
||||
</Text>
|
||||
<ScrollView
|
||||
ref={scrollViewRef}
|
||||
style={[styles.logsScrollView, { backgroundColor: isDark ? '#111827' : '#F9FAFB' }]}
|
||||
contentContainerStyle={styles.logsContent}
|
||||
>
|
||||
{logs.length === 0 ? (
|
||||
<Text style={[styles.emptyText, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
|
||||
No events yet. Press FAB to start.
|
||||
</Text>
|
||||
) : (
|
||||
logs.map(log => {
|
||||
const time = new Date(log.timestamp);
|
||||
const timeStr = `${String(time.getHours()).padStart(2, '0')}:${String(time.getMinutes()).padStart(2, '0')}:${String(time.getSeconds()).padStart(2, '0')}.${String(time.getMilliseconds()).padStart(3, '0')}`;
|
||||
|
||||
return (
|
||||
<View key={log.id} style={styles.logEntry}>
|
||||
<Text style={[styles.logTimestamp, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
|
||||
{timeStr}
|
||||
</Text>
|
||||
<Text style={styles.logIcon}>{getCategoryIcon(log.category)}</Text>
|
||||
<Text style={[styles.logMessage, { color: getLogColor(log.level) }]}>
|
||||
{log.message}
|
||||
</Text>
|
||||
</View>
|
||||
);
|
||||
})
|
||||
)}
|
||||
</ScrollView>
|
||||
</View>
|
||||
|
||||
{/* FAB */}
|
||||
<TouchableOpacity
|
||||
style={[styles.fab, {
|
||||
backgroundColor: isListening ? '#EF4444' : AppColors.primary,
|
||||
bottom: insets.bottom + 80,
|
||||
}]}
|
||||
onPress={() => {
|
||||
if (isListening) {
|
||||
addLog('system', '🛑 User stopped session', 'warning');
|
||||
stopSession();
|
||||
} else {
|
||||
clearLogs();
|
||||
addLog('system', '▶️ User started session', 'success');
|
||||
startSession();
|
||||
}
|
||||
}}
|
||||
>
|
||||
<Feather
|
||||
name={isListening ? 'square' : 'mic'}
|
||||
size={28}
|
||||
color="#FFFFFF"
|
||||
/>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
const styles = StyleSheet.create({
|
||||
container: {
|
||||
flex: 1,
|
||||
},
|
||||
header: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'space-between',
|
||||
paddingHorizontal: 20,
|
||||
paddingBottom: 16,
|
||||
},
|
||||
headerTitle: {
|
||||
fontSize: 28,
|
||||
fontWeight: '700',
|
||||
},
|
||||
clearButton: {
|
||||
padding: 8,
|
||||
},
|
||||
statusCard: {
|
||||
marginHorizontal: 20,
|
||||
marginBottom: 16,
|
||||
padding: 16,
|
||||
borderRadius: 12,
|
||||
borderLeftWidth: 4,
|
||||
},
|
||||
statusRow: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
},
|
||||
statusIcon: {
|
||||
fontSize: 32,
|
||||
marginRight: 12,
|
||||
},
|
||||
statusTextContainer: {
|
||||
flex: 1,
|
||||
},
|
||||
statusLabel: {
|
||||
fontSize: 12,
|
||||
fontWeight: '500',
|
||||
marginBottom: 2,
|
||||
},
|
||||
statusText: {
|
||||
fontSize: 18,
|
||||
fontWeight: '700',
|
||||
},
|
||||
timerContainer: {
|
||||
marginTop: 16,
|
||||
paddingTop: 16,
|
||||
borderTopWidth: 1,
|
||||
borderTopColor: 'rgba(156, 163, 175, 0.2)',
|
||||
},
|
||||
timerLabel: {
|
||||
fontSize: 12,
|
||||
fontWeight: '500',
|
||||
marginBottom: 8,
|
||||
},
|
||||
timerRow: {
|
||||
marginBottom: 8,
|
||||
},
|
||||
timerText: {
|
||||
fontSize: 24,
|
||||
fontWeight: '700',
|
||||
fontVariant: ['tabular-nums'],
|
||||
},
|
||||
progressBarContainer: {
|
||||
height: 8,
|
||||
borderRadius: 4,
|
||||
overflow: 'hidden',
|
||||
},
|
||||
progressBarFill: {
|
||||
height: '100%',
|
||||
borderRadius: 4,
|
||||
},
|
||||
transcriptContainer: {
|
||||
marginTop: 12,
|
||||
paddingTop: 12,
|
||||
borderTopWidth: 1,
|
||||
borderTopColor: 'rgba(156, 163, 175, 0.2)',
|
||||
},
|
||||
transcriptLabel: {
|
||||
fontSize: 12,
|
||||
fontWeight: '500',
|
||||
marginBottom: 4,
|
||||
},
|
||||
transcriptText: {
|
||||
fontSize: 14,
|
||||
fontStyle: 'italic',
|
||||
},
|
||||
logsContainer: {
|
||||
flex: 1,
|
||||
marginHorizontal: 20,
|
||||
},
|
||||
logsTitle: {
|
||||
fontSize: 16,
|
||||
fontWeight: '700',
|
||||
marginBottom: 8,
|
||||
},
|
||||
logsScrollView: {
|
||||
flex: 1,
|
||||
borderRadius: 8,
|
||||
},
|
||||
logsContent: {
|
||||
padding: 12,
|
||||
},
|
||||
emptyText: {
|
||||
textAlign: 'center',
|
||||
fontSize: 14,
|
||||
fontStyle: 'italic',
|
||||
paddingVertical: 20,
|
||||
},
|
||||
logEntry: {
|
||||
flexDirection: 'row',
|
||||
marginBottom: 8,
|
||||
alignItems: 'flex-start',
|
||||
},
|
||||
logTimestamp: {
|
||||
fontSize: 11,
|
||||
fontVariant: ['tabular-nums'],
|
||||
marginRight: 8,
|
||||
width: 80,
|
||||
},
|
||||
logIcon: {
|
||||
fontSize: 14,
|
||||
marginRight: 6,
|
||||
},
|
||||
logMessage: {
|
||||
fontSize: 13,
|
||||
flex: 1,
|
||||
lineHeight: 18,
|
||||
},
|
||||
fab: {
|
||||
position: 'absolute',
|
||||
right: 20,
|
||||
width: 64,
|
||||
height: 64,
|
||||
borderRadius: 32,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
shadowColor: '#000',
|
||||
shadowOffset: { width: 0, height: 4 },
|
||||
shadowOpacity: 0.3,
|
||||
shadowRadius: 8,
|
||||
elevation: 8,
|
||||
},
|
||||
});
|
||||
@ -13,7 +13,6 @@ import { BeneficiaryProvider } from '@/contexts/BeneficiaryContext';
|
||||
import { VoiceTranscriptProvider } from '@/contexts/VoiceTranscriptContext';
|
||||
import { VoiceCallProvider } from '@/contexts/VoiceCallContext';
|
||||
import { VoiceProvider } from '@/contexts/VoiceContext';
|
||||
import { ChatProvider } from '@/contexts/ChatContext';
|
||||
import { LoadingSpinner } from '@/components/ui/LoadingSpinner';
|
||||
import { FloatingCallBubble } from '@/components/FloatingCallBubble';
|
||||
|
||||
@ -70,9 +69,7 @@ export default function RootLayout() {
|
||||
<VoiceTranscriptProvider>
|
||||
<VoiceCallProvider>
|
||||
<VoiceProvider>
|
||||
<ChatProvider>
|
||||
<RootLayoutNav />
|
||||
</ChatProvider>
|
||||
</VoiceProvider>
|
||||
</VoiceCallProvider>
|
||||
</VoiceTranscriptProvider>
|
||||
|
||||
@ -1,12 +1,9 @@
|
||||
/**
|
||||
* Voice Floating Action Button Component
|
||||
*
|
||||
* Positioned at the center of the tab bar.
|
||||
* Shows different animations for each voice state:
|
||||
* - idle: white mic icon, green background
|
||||
* - listening: red background, expanding pulse rings
|
||||
* - processing: blue background, spinning indicator
|
||||
* - speaking: green background, wave-like pulse
|
||||
* A floating action button for toggling voice listening mode.
|
||||
* Tap to start/stop listening.
|
||||
* Hidden when a call is already active.
|
||||
*/
|
||||
|
||||
import React, { useRef, useEffect } from 'react';
|
||||
@ -15,13 +12,12 @@ import {
|
||||
TouchableOpacity,
|
||||
Animated,
|
||||
ViewStyle,
|
||||
ActivityIndicator,
|
||||
} from 'react-native';
|
||||
import { Ionicons } from '@expo/vector-icons';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
import * as Haptics from 'expo-haptics';
|
||||
import { AppColors, BorderRadius } from '@/constants/theme';
|
||||
import { useVoiceCall } from '@/contexts/VoiceCallContext';
|
||||
import { useVoice } from '@/contexts/VoiceContext';
|
||||
|
||||
interface VoiceFABProps {
|
||||
onPress: () => void;
|
||||
@ -30,232 +26,146 @@ interface VoiceFABProps {
|
||||
isListening?: boolean;
|
||||
}
|
||||
|
||||
const FAB_SIZE = 60;
|
||||
const FAB_SIZE = 56;
|
||||
|
||||
export function VoiceFAB({ onPress, style, disabled = false, isListening = false }: VoiceFABProps) {
|
||||
const { isCallActive } = useVoiceCall();
|
||||
const { status: voiceStatus } = useVoice();
|
||||
const insets = useSafeAreaInsets();
|
||||
|
||||
// Animation values
|
||||
const scale = useRef(new Animated.Value(1)).current;
|
||||
const opacity = useRef(new Animated.Value(1)).current;
|
||||
|
||||
// Pulse ring 1 (main expanding ring)
|
||||
const pulse1Scale = useRef(new Animated.Value(1)).current;
|
||||
const pulse1Opacity = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Pulse ring 2 (second ring, offset timing)
|
||||
const pulse2Scale = useRef(new Animated.Value(1)).current;
|
||||
const pulse2Opacity = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Speaking glow animation
|
||||
const glowScale = useRef(new Animated.Value(1)).current;
|
||||
|
||||
// Processing rotation
|
||||
const rotation = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Store animation refs for cleanup
|
||||
const animationRef = useRef<Animated.CompositeAnimation | null>(null);
|
||||
|
||||
// Determine effective state
|
||||
const effectiveStatus = isListening
|
||||
? (voiceStatus === 'processing' ? 'processing' : voiceStatus === 'speaking' ? 'speaking' : 'listening')
|
||||
: 'idle';
|
||||
const pulseScale = useRef(new Animated.Value(1)).current;
|
||||
const pulseOpacity = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Hide FAB when call is active
|
||||
useEffect(() => {
|
||||
if (isCallActive) {
|
||||
Animated.parallel([
|
||||
Animated.timing(scale, { toValue: 0, duration: 200, useNativeDriver: true }),
|
||||
Animated.timing(opacity, { toValue: 0, duration: 200, useNativeDriver: true }),
|
||||
Animated.timing(scale, {
|
||||
toValue: 0,
|
||||
duration: 200,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(opacity, {
|
||||
toValue: 0,
|
||||
duration: 200,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
]).start();
|
||||
} else {
|
||||
Animated.parallel([
|
||||
Animated.spring(scale, { toValue: 1, friction: 5, tension: 40, useNativeDriver: true }),
|
||||
Animated.timing(opacity, { toValue: 1, duration: 200, useNativeDriver: true }),
|
||||
Animated.spring(scale, {
|
||||
toValue: 1,
|
||||
friction: 5,
|
||||
tension: 40,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(opacity, {
|
||||
toValue: 1,
|
||||
duration: 200,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
]).start();
|
||||
}
|
||||
}, [isCallActive, scale, opacity]);
|
||||
|
||||
// Animations based on voice status
|
||||
// Pulse animation when listening
|
||||
useEffect(() => {
|
||||
// Stop previous animation
|
||||
if (animationRef.current) {
|
||||
animationRef.current.stop();
|
||||
animationRef.current = null;
|
||||
}
|
||||
|
||||
// Reset all animation values
|
||||
pulse1Scale.setValue(1);
|
||||
pulse1Opacity.setValue(0);
|
||||
pulse2Scale.setValue(1);
|
||||
pulse2Opacity.setValue(0);
|
||||
glowScale.setValue(1);
|
||||
rotation.setValue(0);
|
||||
|
||||
if (effectiveStatus === 'listening') {
|
||||
// Double pulse ring animation - more active/dynamic
|
||||
const pulseAnim = Animated.loop(
|
||||
Animated.stagger(500, [
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse1Scale, { toValue: 2.0, duration: 1200, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
|
||||
]),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse1Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0.5, duration: 0, useNativeDriver: true }),
|
||||
]),
|
||||
])
|
||||
);
|
||||
|
||||
const pulse2Anim = Animated.loop(
|
||||
Animated.sequence([
|
||||
Animated.delay(400),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse2Scale, { toValue: 1.8, duration: 1200, useNativeDriver: true }),
|
||||
Animated.timing(pulse2Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
|
||||
]),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse2Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
|
||||
Animated.timing(pulse2Opacity, { toValue: 0.4, duration: 0, useNativeDriver: true }),
|
||||
]),
|
||||
])
|
||||
);
|
||||
|
||||
const combined = Animated.parallel([pulseAnim, pulse2Anim]);
|
||||
animationRef.current = combined;
|
||||
combined.start();
|
||||
|
||||
} else if (effectiveStatus === 'speaking') {
|
||||
// Gentle breathing glow when speaking
|
||||
const glowAnim = Animated.loop(
|
||||
Animated.sequence([
|
||||
Animated.timing(glowScale, { toValue: 1.15, duration: 600, useNativeDriver: true }),
|
||||
Animated.timing(glowScale, { toValue: 1.0, duration: 600, useNativeDriver: true }),
|
||||
])
|
||||
);
|
||||
|
||||
// Soft outer glow
|
||||
const softPulse = Animated.loop(
|
||||
if (isListening && !isCallActive) {
|
||||
// Start pulsing animation
|
||||
const pulseAnimation = Animated.loop(
|
||||
Animated.sequence([
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse1Scale, { toValue: 1.4, duration: 800, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0.3, duration: 400, useNativeDriver: true }),
|
||||
Animated.timing(pulseScale, {
|
||||
toValue: 1.8,
|
||||
duration: 1000,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(pulseOpacity, {
|
||||
toValue: 0,
|
||||
duration: 1000,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
]),
|
||||
Animated.parallel([
|
||||
Animated.timing(pulse1Scale, { toValue: 1.0, duration: 800, useNativeDriver: true }),
|
||||
Animated.timing(pulse1Opacity, { toValue: 0, duration: 400, useNativeDriver: true }),
|
||||
Animated.timing(pulseScale, {
|
||||
toValue: 1,
|
||||
duration: 0,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
Animated.timing(pulseOpacity, {
|
||||
toValue: 0.6,
|
||||
duration: 0,
|
||||
useNativeDriver: true,
|
||||
}),
|
||||
]),
|
||||
])
|
||||
);
|
||||
|
||||
const combined = Animated.parallel([glowAnim, softPulse]);
|
||||
animationRef.current = combined;
|
||||
combined.start();
|
||||
|
||||
} else if (effectiveStatus === 'processing') {
|
||||
// Spinning rotation for processing
|
||||
const spinAnim = Animated.loop(
|
||||
Animated.timing(rotation, { toValue: 1, duration: 1500, useNativeDriver: true })
|
||||
);
|
||||
animationRef.current = spinAnim;
|
||||
spinAnim.start();
|
||||
}
|
||||
pulseAnimation.start();
|
||||
|
||||
return () => {
|
||||
if (animationRef.current) {
|
||||
animationRef.current.stop();
|
||||
animationRef.current = null;
|
||||
}
|
||||
pulseAnimation.stop();
|
||||
pulseScale.setValue(1);
|
||||
pulseOpacity.setValue(0);
|
||||
};
|
||||
}, [effectiveStatus]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
} else {
|
||||
pulseScale.setValue(1);
|
||||
pulseOpacity.setValue(0);
|
||||
}
|
||||
}, [isListening, isCallActive, pulseScale, pulseOpacity]);
|
||||
|
||||
// Press animation with haptic feedback
|
||||
const handlePressIn = () => {
|
||||
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
|
||||
Animated.spring(scale, { toValue: 0.85, friction: 5, useNativeDriver: true }).start();
|
||||
Animated.spring(scale, {
|
||||
toValue: 0.9,
|
||||
friction: 5,
|
||||
useNativeDriver: true,
|
||||
}).start();
|
||||
};
|
||||
|
||||
const handlePressOut = () => {
|
||||
Animated.spring(scale, { toValue: 1, friction: 5, useNativeDriver: true }).start();
|
||||
Animated.spring(scale, {
|
||||
toValue: 1,
|
||||
friction: 5,
|
||||
useNativeDriver: true,
|
||||
}).start();
|
||||
};
|
||||
|
||||
// Don't render if call is active
|
||||
if (isCallActive) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Determine colors and icon based on state
|
||||
let fabBgColor = AppColors.success; // idle: green
|
||||
let iconName: 'mic-outline' | 'mic' | 'volume-high' = 'mic-outline';
|
||||
let pulseColor = AppColors.error;
|
||||
|
||||
if (effectiveStatus === 'listening') {
|
||||
fabBgColor = '#FF3B30'; // red
|
||||
iconName = 'mic';
|
||||
pulseColor = '#FF3B30';
|
||||
} else if (effectiveStatus === 'processing') {
|
||||
fabBgColor = AppColors.primary; // blue
|
||||
iconName = 'mic';
|
||||
pulseColor = AppColors.primary;
|
||||
} else if (effectiveStatus === 'speaking') {
|
||||
fabBgColor = '#34C759'; // green
|
||||
iconName = 'volume-high';
|
||||
pulseColor = '#34C759';
|
||||
}
|
||||
|
||||
const spin = rotation.interpolate({
|
||||
inputRange: [0, 1],
|
||||
outputRange: ['0deg', '360deg'],
|
||||
});
|
||||
|
||||
return (
|
||||
<Animated.View
|
||||
style={[
|
||||
styles.container,
|
||||
{
|
||||
bottom: insets.bottom + 80, // Above tab bar
|
||||
transform: [{ scale }],
|
||||
opacity,
|
||||
},
|
||||
style,
|
||||
]}
|
||||
>
|
||||
{/* Pulse ring 1 */}
|
||||
{(effectiveStatus === 'listening' || effectiveStatus === 'speaking') && (
|
||||
{/* Pulse ring when listening */}
|
||||
{isListening && (
|
||||
<Animated.View
|
||||
style={[
|
||||
styles.pulseRing,
|
||||
{
|
||||
backgroundColor: pulseColor,
|
||||
transform: [{ scale: pulse1Scale }],
|
||||
opacity: pulse1Opacity,
|
||||
transform: [{ scale: pulseScale }],
|
||||
opacity: pulseOpacity,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Pulse ring 2 (listening only) */}
|
||||
{effectiveStatus === 'listening' && (
|
||||
<Animated.View
|
||||
style={[
|
||||
styles.pulseRing,
|
||||
{
|
||||
backgroundColor: pulseColor,
|
||||
transform: [{ scale: pulse2Scale }],
|
||||
opacity: pulse2Opacity,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
|
||||
<Animated.View
|
||||
style={[
|
||||
{ transform: [{ scale: effectiveStatus === 'speaking' ? glowScale : 1 }] },
|
||||
]}
|
||||
>
|
||||
<TouchableOpacity
|
||||
style={[
|
||||
styles.fab,
|
||||
{ backgroundColor: disabled ? AppColors.surface : fabBgColor },
|
||||
isListening && styles.fabListening,
|
||||
disabled && styles.fabDisabled,
|
||||
]}
|
||||
onPress={onPress}
|
||||
@ -264,38 +174,36 @@ export function VoiceFAB({ onPress, style, disabled = false, isListening = false
|
||||
disabled={disabled}
|
||||
activeOpacity={0.9}
|
||||
>
|
||||
{effectiveStatus === 'processing' ? (
|
||||
<Animated.View style={{ transform: [{ rotate: spin }] }}>
|
||||
<ActivityIndicator size="small" color={AppColors.white} />
|
||||
</Animated.View>
|
||||
) : (
|
||||
<Ionicons
|
||||
name={iconName}
|
||||
name={isListening ? 'mic' : 'mic-outline'}
|
||||
size={28}
|
||||
color={disabled ? AppColors.textMuted : AppColors.white}
|
||||
/>
|
||||
)}
|
||||
</TouchableOpacity>
|
||||
</Animated.View>
|
||||
</Animated.View>
|
||||
);
|
||||
}
|
||||
|
||||
const styles = StyleSheet.create({
|
||||
container: {
|
||||
position: 'absolute',
|
||||
left: 0,
|
||||
right: 0,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
zIndex: 100,
|
||||
},
|
||||
pulseRing: {
|
||||
position: 'absolute',
|
||||
width: FAB_SIZE,
|
||||
height: FAB_SIZE,
|
||||
borderRadius: FAB_SIZE / 2,
|
||||
borderRadius: BorderRadius.full,
|
||||
backgroundColor: AppColors.error,
|
||||
},
|
||||
fab: {
|
||||
width: FAB_SIZE,
|
||||
height: FAB_SIZE,
|
||||
borderRadius: FAB_SIZE / 2,
|
||||
borderRadius: BorderRadius.full,
|
||||
backgroundColor: AppColors.success,
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
shadowColor: '#000',
|
||||
@ -304,7 +212,11 @@ const styles = StyleSheet.create({
|
||||
shadowRadius: 8,
|
||||
elevation: 8,
|
||||
},
|
||||
fabListening: {
|
||||
backgroundColor: AppColors.error,
|
||||
},
|
||||
fabDisabled: {
|
||||
backgroundColor: AppColors.surface,
|
||||
shadowOpacity: 0.1,
|
||||
},
|
||||
});
|
||||
|
||||
@ -1,51 +0,0 @@
|
||||
/**
|
||||
* Chat Context - Persists chat messages across tab navigation
|
||||
*
|
||||
* Without this context, messages are lost when switching tabs
|
||||
* because ChatScreen component unmounts and remounts.
|
||||
*/
|
||||
|
||||
import React, { createContext, useContext, useState, useCallback, ReactNode } from 'react';
|
||||
import type { Message } from '@/types';
|
||||
|
||||
interface ChatContextValue {
|
||||
messages: Message[];
|
||||
setMessages: React.Dispatch<React.SetStateAction<Message[]>>;
|
||||
addMessage: (message: Message) => void;
|
||||
clearMessages: (initialMessage: Message) => void;
|
||||
}
|
||||
|
||||
const ChatContext = createContext<ChatContextValue | undefined>(undefined);
|
||||
|
||||
export function ChatProvider({ children }: { children: ReactNode }) {
|
||||
const [messages, setMessages] = useState<Message[]>([
|
||||
{
|
||||
id: '1',
|
||||
role: 'assistant',
|
||||
content: "Hello! I'm Julia, your AI wellness companion.\n\nType a message below to chat with me.",
|
||||
timestamp: new Date(),
|
||||
},
|
||||
]);
|
||||
|
||||
const addMessage = useCallback((message: Message) => {
|
||||
setMessages(prev => [...prev, message]);
|
||||
}, []);
|
||||
|
||||
const clearMessages = useCallback((initialMessage: Message) => {
|
||||
setMessages([initialMessage]);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ChatContext.Provider value={{ messages, setMessages, addMessage, clearMessages }}>
|
||||
{children}
|
||||
</ChatContext.Provider>
|
||||
);
|
||||
}
|
||||
|
||||
export function useChat() {
|
||||
const context = useContext(ChatContext);
|
||||
if (!context) {
|
||||
throw new Error('useChat must be used within ChatProvider');
|
||||
}
|
||||
return context;
|
||||
}
|
||||
@ -153,12 +153,6 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
// API token cache
|
||||
const apiTokenRef = useRef<string | null>(null);
|
||||
|
||||
// Abort controller for cancelling in-flight API requests
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
|
||||
// Flag to prevent speak() after session stopped
|
||||
const sessionStoppedRef = useRef(false);
|
||||
|
||||
// Deployment ID from settings
|
||||
const deploymentIdRef = useRef<string | null>(null);
|
||||
|
||||
@ -213,12 +207,6 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Don't send if session was stopped
|
||||
if (sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Session stopped, skipping API call');
|
||||
return null;
|
||||
}
|
||||
|
||||
console.log('[VoiceContext] Sending transcript to API:', trimmedText);
|
||||
setStatus('processing');
|
||||
setError(null);
|
||||
@ -226,23 +214,10 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
// Add user message to transcript for chat display
|
||||
addTranscriptEntry('user', trimmedText);
|
||||
|
||||
// Create abort controller for this request
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort();
|
||||
}
|
||||
const abortController = new AbortController();
|
||||
abortControllerRef.current = abortController;
|
||||
|
||||
try {
|
||||
// Get API token
|
||||
const token = await getWellNuoToken();
|
||||
|
||||
// Check if aborted
|
||||
if (abortController.signal.aborted || sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Request aborted before API call');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Normalize question
|
||||
const normalizedQuestion = normalizeQuestion(trimmedText);
|
||||
|
||||
@ -269,17 +244,10 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: new URLSearchParams(requestParams).toString(),
|
||||
signal: abortController.signal,
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
// Check if session was stopped while waiting for response
|
||||
if (sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Session stopped during API call, discarding response');
|
||||
return null;
|
||||
}
|
||||
|
||||
if (data.ok && data.response?.body) {
|
||||
const responseText = data.response.body;
|
||||
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
|
||||
@ -288,7 +256,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
// Add Julia's response to transcript for chat display
|
||||
addTranscriptEntry('assistant', responseText);
|
||||
|
||||
// Speak the response (will be skipped if session stopped)
|
||||
// Speak the response
|
||||
await speak(responseText);
|
||||
|
||||
return responseText;
|
||||
@ -301,13 +269,8 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
throw new Error(data.message || 'Could not get response');
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore abort errors
|
||||
if (err instanceof Error && err.name === 'AbortError') {
|
||||
console.log('[VoiceContext] API request aborted');
|
||||
return null;
|
||||
}
|
||||
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
||||
console.warn('[VoiceContext] API error:', errorMsg);
|
||||
console.error('[VoiceContext] API error:', errorMsg);
|
||||
setError(errorMsg);
|
||||
setStatus('idle');
|
||||
return null;
|
||||
@ -337,12 +300,6 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
const speak = useCallback(async (text: string): Promise<void> => {
|
||||
if (!text.trim()) return;
|
||||
|
||||
// Don't speak if session was stopped
|
||||
if (sessionStoppedRef.current) {
|
||||
console.log('[VoiceContext] Session stopped, skipping TTS');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[VoiceContext] Speaking:', text.slice(0, 50) + '...');
|
||||
setStatus('speaking');
|
||||
setIsSpeaking(true);
|
||||
@ -358,27 +315,20 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
onDone: () => {
|
||||
console.log('[VoiceContext] TTS completed');
|
||||
setIsSpeaking(false);
|
||||
// Return to listening state after speaking (if session wasn't stopped)
|
||||
if (!sessionStoppedRef.current) {
|
||||
// Return to listening state after speaking (if session is active)
|
||||
setStatus('listening');
|
||||
}
|
||||
resolve();
|
||||
},
|
||||
onError: (error) => {
|
||||
console.warn('[VoiceContext] TTS error:', error);
|
||||
console.error('[VoiceContext] TTS error:', error);
|
||||
setIsSpeaking(false);
|
||||
if (!sessionStoppedRef.current) {
|
||||
setStatus('listening');
|
||||
}
|
||||
resolve();
|
||||
},
|
||||
onStopped: () => {
|
||||
console.log('[VoiceContext] TTS stopped (interrupted)');
|
||||
setIsSpeaking(false);
|
||||
// Don't set status to listening if session was stopped by user
|
||||
if (!sessionStoppedRef.current) {
|
||||
setStatus('listening');
|
||||
}
|
||||
resolve();
|
||||
},
|
||||
});
|
||||
@ -398,7 +348,6 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
*/
|
||||
const startSession = useCallback(() => {
|
||||
console.log('[VoiceContext] Starting voice session');
|
||||
sessionStoppedRef.current = false;
|
||||
setStatus('listening');
|
||||
setIsListening(true);
|
||||
setError(null);
|
||||
@ -411,16 +360,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
||||
*/
|
||||
const stopSession = useCallback(() => {
|
||||
console.log('[VoiceContext] Stopping voice session');
|
||||
// Mark session as stopped FIRST to prevent any pending callbacks
|
||||
sessionStoppedRef.current = true;
|
||||
// Abort any in-flight API requests
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort();
|
||||
abortControllerRef.current = null;
|
||||
}
|
||||
// Stop TTS
|
||||
Speech.stop();
|
||||
// Reset all state
|
||||
setStatus('idle');
|
||||
setIsListening(false);
|
||||
setIsSpeaking(false);
|
||||
|
||||
@ -104,8 +104,6 @@ export function useSpeechRecognition(
|
||||
const isStartingRef = useRef(false);
|
||||
// Track if voice has been detected in current session (for onVoiceDetected callback)
|
||||
const voiceDetectedRef = useRef(false);
|
||||
// Track last partial transcript for iOS fix (iOS never sends isFinal:true)
|
||||
const lastPartialRef = useRef('');
|
||||
|
||||
// Check availability on mount
|
||||
useEffect(() => {
|
||||
@ -122,7 +120,7 @@ export function useSpeechRecognition(
|
||||
setIsAvailable(true);
|
||||
console.log('[SpeechRecognition] Available, permission status:', status.status);
|
||||
} catch (err) {
|
||||
console.warn('[SpeechRecognition] Not available:', err);
|
||||
console.error('[SpeechRecognition] Not available:', err);
|
||||
setIsAvailable(false);
|
||||
}
|
||||
};
|
||||
@ -142,16 +140,6 @@ export function useSpeechRecognition(
|
||||
// Event: Recognition ended
|
||||
useSpeechRecognitionEvent('end', () => {
|
||||
console.log('[SpeechRecognition] Ended');
|
||||
|
||||
// iOS FIX: iOS never sends isFinal:true, so we send last partial as final when STT ends
|
||||
const lastPartial = lastPartialRef.current;
|
||||
if (lastPartial && lastPartial.trim().length > 0) {
|
||||
console.log('[SpeechRecognition] 🍎 iOS FIX - Sending last partial as final:', lastPartial);
|
||||
setRecognizedText(lastPartial);
|
||||
onResult?.(lastPartial, true); // Send as final=true
|
||||
lastPartialRef.current = ''; // Clear after sending
|
||||
}
|
||||
|
||||
setIsListening(false);
|
||||
setPartialTranscript('');
|
||||
isStartingRef.current = false;
|
||||
@ -179,10 +167,8 @@ export function useSpeechRecognition(
|
||||
if (isFinal) {
|
||||
setRecognizedText(transcript);
|
||||
setPartialTranscript('');
|
||||
lastPartialRef.current = ''; // Clear after final
|
||||
} else {
|
||||
setPartialTranscript(transcript);
|
||||
lastPartialRef.current = transcript; // Save for iOS fix
|
||||
}
|
||||
|
||||
onResult?.(transcript, isFinal);
|
||||
@ -191,20 +177,15 @@ export function useSpeechRecognition(
|
||||
|
||||
// Event: Error occurred
|
||||
useSpeechRecognitionEvent('error', (event: any) => {
|
||||
const errorCode = event.error || '';
|
||||
const errorMessage = event.message || errorCode || 'Speech recognition error';
|
||||
const errorMessage = event.message || event.error || 'Speech recognition error';
|
||||
console.error('[SpeechRecognition] Error:', errorMessage);
|
||||
|
||||
// "no-speech" is normal when user is silent — ignore completely
|
||||
if (errorCode === 'no-speech') {
|
||||
console.log('[SpeechRecognition] No speech detected (silence) - ignoring');
|
||||
setIsListening(false);
|
||||
isStartingRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
console.warn('[SpeechRecognition] Error:', errorMessage);
|
||||
// Don't set error for "no-speech" - this is normal when user doesn't say anything
|
||||
if (event.error !== 'no-speech') {
|
||||
setError(errorMessage);
|
||||
onError?.(errorMessage);
|
||||
}
|
||||
|
||||
setIsListening(false);
|
||||
isStartingRef.current = false;
|
||||
});
|
||||
@ -226,7 +207,7 @@ export function useSpeechRecognition(
|
||||
|
||||
if (!isAvailable) {
|
||||
const msg = 'Speech recognition is not available on this device';
|
||||
console.warn('[SpeechRecognition]', msg);
|
||||
console.error('[SpeechRecognition]', msg);
|
||||
setError(msg);
|
||||
onError?.(msg);
|
||||
return false;
|
||||
@ -243,7 +224,7 @@ export function useSpeechRecognition(
|
||||
|
||||
if (!permissionResult.granted) {
|
||||
const msg = 'Microphone permission denied';
|
||||
console.warn('[SpeechRecognition]', msg);
|
||||
console.error('[SpeechRecognition]', msg);
|
||||
setError(msg);
|
||||
onError?.(msg);
|
||||
isStartingRef.current = false;
|
||||
@ -268,7 +249,7 @@ export function useSpeechRecognition(
|
||||
return true;
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : 'Failed to start speech recognition';
|
||||
console.warn('[SpeechRecognition] Start error:', msg);
|
||||
console.error('[SpeechRecognition] Start error:', msg);
|
||||
setError(msg);
|
||||
onError?.(msg);
|
||||
isStartingRef.current = false;
|
||||
|
||||
@ -185,7 +185,7 @@ export function useTextToSpeech(
|
||||
},
|
||||
onError: (err) => {
|
||||
const errorMsg = typeof err === 'string' ? err : 'Speech synthesis error';
|
||||
console.warn('[TTS] Error:', errorMsg);
|
||||
console.error('[TTS] Error:', errorMsg);
|
||||
if (isMountedRef.current) {
|
||||
setIsSpeaking(false);
|
||||
setCurrentText(null);
|
||||
@ -227,7 +227,7 @@ export function useTextToSpeech(
|
||||
console.log('[TTS] Available voices:', voices.length);
|
||||
return voices;
|
||||
} catch (err) {
|
||||
console.warn('[TTS] Could not get voices:', err);
|
||||
console.error('[TTS] Could not get voices:', err);
|
||||
return [];
|
||||
}
|
||||
}, []);
|
||||
|
||||
34
package-lock.json
generated
34
package-lock.json
generated
@ -12,7 +12,6 @@
|
||||
"@expo/vector-icons": "^15.0.3",
|
||||
"@jamsch/expo-speech-recognition": "^0.2.15",
|
||||
"@notifee/react-native": "^9.1.8",
|
||||
"@react-native-async-storage/async-storage": "2.2.0",
|
||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||
"@react-navigation/elements": "^2.6.3",
|
||||
"@react-navigation/native": "^7.1.8",
|
||||
@ -3569,18 +3568,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@react-native-async-storage/async-storage": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@react-native-async-storage/async-storage/-/async-storage-2.2.0.tgz",
|
||||
"integrity": "sha512-gvRvjR5JAaUZF8tv2Kcq/Gbt3JHwbKFYfmb445rhOj6NUMx3qPLixmDx5pZAyb9at1bYvJ4/eTUipU5aki45xw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"merge-options": "^3.0.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react-native": "^0.0.0-0 || >=0.65 <1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@react-native/assets-registry": {
|
||||
"version": "0.81.5",
|
||||
"resolved": "https://registry.npmjs.org/@react-native/assets-registry/-/assets-registry-0.81.5.tgz",
|
||||
@ -8767,15 +8754,6 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/is-plain-obj": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
|
||||
"integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/is-regex": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz",
|
||||
@ -9807,18 +9785,6 @@
|
||||
"integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/merge-options": {
|
||||
"version": "3.0.4",
|
||||
"resolved": "https://registry.npmjs.org/merge-options/-/merge-options-3.0.4.tgz",
|
||||
"integrity": "sha512-2Sug1+knBjkaMsMgf1ctR1Ujx+Ayku4EdJN4Z+C2+JzoeF7A3OZ9KM2GY0CpQS51NR61LTurMJrRKPhSs3ZRTQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-plain-obj": "^2.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/merge-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
|
||||
|
||||
@ -15,7 +15,6 @@
|
||||
"@expo/vector-icons": "^15.0.3",
|
||||
"@jamsch/expo-speech-recognition": "^0.2.15",
|
||||
"@notifee/react-native": "^9.1.8",
|
||||
"@react-native-async-storage/async-storage": "2.2.0",
|
||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||
"@react-navigation/elements": "^2.6.3",
|
||||
"@react-navigation/native": "^7.1.8",
|
||||
|
||||
@ -113,7 +113,7 @@ class ApiService {
|
||||
console.log('[API] refreshToken result:', result.ok ? 'SUCCESS' : result.error?.message);
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.warn('[API] refreshToken error:', error);
|
||||
console.error('[API] refreshToken error:', error);
|
||||
return {
|
||||
ok: false,
|
||||
error: { message: 'Failed to refresh token', code: 'REFRESH_ERROR' }
|
||||
@ -229,20 +229,6 @@ class ApiService {
|
||||
}
|
||||
}
|
||||
|
||||
// Voice API Type management
|
||||
async setVoiceApiType(type: 'voice_ask' | 'ask_wellnuo_ai'): Promise<void> {
|
||||
await SecureStore.setItemAsync('voiceApiType', type);
|
||||
}
|
||||
|
||||
async getVoiceApiType(): Promise<'voice_ask' | 'ask_wellnuo_ai'> {
|
||||
try {
|
||||
const saved = await SecureStore.getItemAsync('voiceApiType');
|
||||
return (saved as 'voice_ask' | 'ask_wellnuo_ai') || 'ask_wellnuo_ai';
|
||||
} catch {
|
||||
return 'ask_wellnuo_ai';
|
||||
}
|
||||
}
|
||||
|
||||
async validateDeploymentId(deploymentId: string): Promise<ApiResponse<{ valid: boolean; name?: string }>> {
|
||||
const token = await this.getToken();
|
||||
const userName = await this.getUserName();
|
||||
|
||||
@ -46,7 +46,7 @@ class CallManager {
|
||||
await this.disconnectCallback();
|
||||
console.log(`[CallManager] Previous call disconnected`);
|
||||
} catch (err) {
|
||||
console.warn(`[CallManager] Error disconnecting previous call:`, err);
|
||||
console.error(`[CallManager] Error disconnecting previous call:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,7 +98,7 @@ class CallManager {
|
||||
try {
|
||||
await this.disconnectCallback();
|
||||
} catch (err) {
|
||||
console.warn(`[CallManager] Error force disconnecting:`, err);
|
||||
console.error(`[CallManager] Error force disconnecting:`, err);
|
||||
}
|
||||
this.activeCallId = null;
|
||||
this.disconnectCallback = null;
|
||||
|
||||
@ -228,7 +228,7 @@ export async function createCall(options: {
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
console.warn('[Ultravox] API error:', response.status, errorData);
|
||||
console.error('[Ultravox] API error:', response.status, errorData);
|
||||
return {
|
||||
success: false,
|
||||
error: errorData.message || `API error: ${response.status}`,
|
||||
@ -239,7 +239,7 @@ export async function createCall(options: {
|
||||
console.log('[Ultravox] Call created:', data.callId);
|
||||
return { success: true, data };
|
||||
} catch (error) {
|
||||
console.warn('[Ultravox] Create call error:', error);
|
||||
console.error('[Ultravox] Create call error:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to create call',
|
||||
@ -265,7 +265,7 @@ export async function getCall(callId: string): Promise<CreateCallResponse | null
|
||||
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.warn('[Ultravox] Get call error:', error);
|
||||
console.error('[Ultravox] Get call error:', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -284,7 +284,7 @@ export async function endCall(callId: string): Promise<boolean> {
|
||||
|
||||
return response.ok;
|
||||
} catch (error) {
|
||||
console.warn('[Ultravox] End call error:', error);
|
||||
console.error('[Ultravox] End call error:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,7 +23,7 @@ async function getNotifee() {
|
||||
try {
|
||||
notifee = (await import('@notifee/react-native')).default;
|
||||
} catch (e) {
|
||||
console.warn('[AndroidVoiceService] Failed to load notifee:', e);
|
||||
console.error('[AndroidVoiceService] Failed to load notifee:', e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -52,7 +52,7 @@ async function createNotificationChannel(): Promise<void> {
|
||||
});
|
||||
console.log('[AndroidVoiceService] Notification channel created');
|
||||
} catch (e) {
|
||||
console.warn('[AndroidVoiceService] Failed to create channel:', e);
|
||||
console.error('[AndroidVoiceService] Failed to create channel:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ export async function startVoiceCallService(): Promise<void> {
|
||||
|
||||
console.log('[AndroidVoiceService] Foreground service started');
|
||||
} catch (e) {
|
||||
console.warn('[AndroidVoiceService] Failed to start foreground service:', e);
|
||||
console.error('[AndroidVoiceService] Failed to start foreground service:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ export async function stopVoiceCallService(): Promise<void> {
|
||||
await notifeeModule.cancelNotification(NOTIFICATION_ID);
|
||||
console.log('[AndroidVoiceService] Foreground service stopped');
|
||||
} catch (e) {
|
||||
console.warn('[AndroidVoiceService] Failed to stop foreground service:', e);
|
||||
console.error('[AndroidVoiceService] Failed to stop foreground service:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ export async function openBatteryOptimizationSettings(): Promise<void> {
|
||||
// Try generic battery settings
|
||||
await Linking.openSettings();
|
||||
} catch (e) {
|
||||
console.warn('[AndroidVoiceService] Failed to open settings:', e);
|
||||
console.error('[AndroidVoiceService] Failed to open settings:', e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ export async function requestNotificationPermission(): Promise<boolean> {
|
||||
console.log('[AndroidVoiceService] Notification permission:', granted ? 'granted' : 'denied');
|
||||
return granted;
|
||||
} catch (e) {
|
||||
console.warn('[AndroidVoiceService] Failed to request notification permission:', e);
|
||||
console.error('[AndroidVoiceService] Failed to request notification permission:', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user