Compare commits

...

2 Commits

Author SHA1 Message Date
d6353c8533 2026-01-29: Stable version with voice debug and iOS STT fix
Добавлено:
- Voice Debug tab - real-time логи STT/API/TTS/Timer
- iOS STT fix - отправка последнего partial как final при onEnd
- iOS auto-stop - автоматическая остановка STT после 2s тишины
- Voice API selector в Profile (voice_ask / ask_wellnuo_ai)

Исправлено:
- iOS никогда не отправлял isFinal:true - теперь отправляет через onEnd
- STT не останавливался после тишины - добавлен auto-stop таймер
- Profile Voice API selector восстановлен после rollback

Известные issues:
- TypeScript ошибки (setTimeout type) - не критично
- updateVoiceApiType отсутствует в VoiceContext - нужно добавить

Стабильная версия для тестирования на iPhone.
2026-01-28 19:45:40 -08:00
05f872d067 fix: voice session improvements - FAB stop, echo prevention, chat TTS
- FAB button now correctly stops session during speaking/processing states
- Echo prevention: STT stopped during TTS playback, results ignored during speaking
- Chat TTS only speaks when voice session is active (no auto-speak for text chat)
- Session stop now aborts in-flight API requests and prevents race conditions
- STT restarts after TTS with 800ms delay for audio focus release
- Pending interrupt transcript processed after TTS completion
- ChatContext added for message persistence across tab navigation
- VoiceFAB redesigned with state-based animations
- console.error replaced with console.warn across voice pipeline
- no-speech STT errors silenced (normal silence behavior)
2026-01-27 22:59:55 -08:00
19 changed files with 1260 additions and 253 deletions

1
REVIEW_REPORT.md Normal file
View File

@ -0,0 +1 @@
Limit reached · resets 1pm (America/Los_Angeles) · turn on /extra-usage

View File

@ -1,6 +1,6 @@
import { Tabs } from 'expo-router';
import React, { useCallback, useEffect, useRef } from 'react';
import { Platform, View, AppState, AppStateStatus } from 'react-native';
import { Platform, View, AppState, AppStateStatus, TouchableOpacity, StyleSheet } from 'react-native';
import { Feather } from '@expo/vector-icons';
import { useSafeAreaInsets } from 'react-native-safe-area-context';
@ -29,6 +29,7 @@ export default function TabLayout() {
interruptIfSpeaking,
setTranscript,
setPartialTranscript,
partialTranscript, // for iOS auto-stop timer
sendTranscript,
} = useVoice();
@ -40,14 +41,13 @@ export default function TabLayout() {
const pendingInterruptTranscriptRef = useRef<string | null>(null);
// Callback for voice detection - interrupt TTS when user speaks
// NOTE: On Android, STT doesn't run during TTS (shared audio focus),
// so interruption on Android happens via FAB press instead.
// On iOS, STT can run alongside TTS, so voice detection works.
const handleVoiceDetected = useCallback(() => {
// Interrupt TTS when user starts speaking during 'speaking' state
if (status === 'speaking' || isSpeaking) {
console.log('[TabLayout] Voice detected during TTS playback - INTERRUPTING Julia');
const wasInterrupted = interruptIfSpeaking();
if (wasInterrupted) {
console.log('[TabLayout] TTS interrupted successfully, now listening to user');
}
if (Platform.OS === 'ios' && (status === 'speaking' || isSpeaking)) {
console.log('[TabLayout] Voice detected during TTS (iOS) - INTERRUPTING Julia');
interruptIfSpeaking();
}
}, [status, isSpeaking, interruptIfSpeaking]);
@ -63,21 +63,24 @@ export default function TabLayout() {
// Callback for STT results
const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => {
// Ignore any STT results during TTS playback or processing (echo prevention)
if (status === 'speaking' || status === 'processing') {
if (isFinal) {
// Check if we're still in speaking mode (user interrupted Julia)
if (isSpeaking || status === 'speaking') {
// Store the transcript to send after TTS fully stops
console.log('[TabLayout] Got final result while TTS playing - storing for after interruption:', transcript);
// User interrupted Julia with speech — store to send after TTS stops
console.log('[TabLayout] Got final result during TTS/processing - storing for after interruption:', transcript);
pendingInterruptTranscriptRef.current = transcript;
} else {
// Normal case: not speaking, send immediately
}
// Ignore partial transcripts during TTS (they're likely echo)
return;
}
if (isFinal) {
setTranscript(transcript);
sendTranscript(transcript);
}
} else {
setPartialTranscript(transcript);
}
}, [setTranscript, setPartialTranscript, sendTranscript, isSpeaking, status]);
}, [setTranscript, setPartialTranscript, sendTranscript, status]);
// Speech recognition with voice detection callback
const {
@ -85,7 +88,7 @@ export default function TabLayout() {
stopListening,
isListening: sttIsListening,
} = useSpeechRecognition({
lang: 'ru-RU',
lang: 'en-US',
continuous: true,
interimResults: true,
onVoiceDetected: handleVoiceDetected,
@ -93,6 +96,65 @@ export default function TabLayout() {
onEnd: handleSTTEnd,
});
// Ref to prevent concurrent startListening calls
const sttStartingRef = useRef(false);
// Ref to track last partial transcript for iOS auto-stop
const lastPartialTextRef = useRef('');
const silenceTimerRef = useRef<NodeJS.Timeout | null>(null);
// iOS AUTO-STOP: Stop STT after 2 seconds of silence (no new partial transcripts)
// This triggers onEnd → iOS fix sends lastPartial as final
useEffect(() => {
// Clear existing timer
if (silenceTimerRef.current) {
clearTimeout(silenceTimerRef.current);
silenceTimerRef.current = null;
}
// Only track silence when STT is listening (not during processing/speaking)
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
// Get current partial from VoiceContext (set by handleSpeechResult)
const currentPartial = partialTranscript;
// If partial changed, update ref and set new 2s timer
if (currentPartial !== lastPartialTextRef.current) {
lastPartialTextRef.current = currentPartial;
// Start 2-second silence timer
silenceTimerRef.current = setTimeout(() => {
if (sttIsListening && sessionActiveRef.current) {
console.log('[TabLayout] 🍎 iOS AUTO-STOP: 2s silence - stopping STT to trigger onEnd → iOS fix');
stopListening();
}
}, 2000);
}
}
return () => {
if (silenceTimerRef.current) {
clearTimeout(silenceTimerRef.current);
silenceTimerRef.current = null;
}
};
}, [sttIsListening, status, partialTranscript, stopListening]);
// Safe wrapper to start STT with debounce protection
const safeStartSTT = useCallback(() => {
if (sttIsListening || sttStartingRef.current) {
return; // Already listening or starting
}
// Don't start STT during TTS on Android - they share audio focus
if (Platform.OS === 'android' && (status === 'speaking' || isSpeaking)) {
console.log('[TabLayout] Skipping STT start - TTS is playing (Android audio focus)');
return;
}
sttStartingRef.current = true;
console.log('[TabLayout] Starting STT...');
startListening().finally(() => {
sttStartingRef.current = false;
});
}, [sttIsListening, status, isSpeaking, startListening]);
// Update session active ref when isListening changes
useEffect(() => {
sessionActiveRef.current = isListening;
@ -104,42 +166,32 @@ export default function TabLayout() {
// Start/stop STT when voice session starts/stops
useEffect(() => {
if (isListening) {
console.log('[TabLayout] Starting STT for voice session');
startListening();
console.log('[TabLayout] Voice session started - starting STT');
safeStartSTT();
} else {
console.log('[TabLayout] Stopping STT - session ended');
console.log('[TabLayout] Voice session ended - stopping STT');
stopListening();
}
}, [isListening, startListening, stopListening]);
// Restart STT if it ended while session is still active
// This ensures continuous listening even during/after TTS playback
useEffect(() => {
if (shouldRestartSTTRef.current && sessionActiveRef.current && !sttIsListening) {
console.log('[TabLayout] Restarting STT - session still active');
shouldRestartSTTRef.current = false;
// Small delay to ensure clean restart
const timer = setTimeout(() => {
if (sessionActiveRef.current) {
startListening();
}
}, 100);
return () => clearTimeout(timer);
}
}, [sttIsListening, startListening]);
}, [isListening]); // eslint-disable-line react-hooks/exhaustive-deps
// Track previous status to detect transition from speaking to listening
const prevStatusRef = useRef<typeof status>('idle');
// Auto-restart STT when TTS finishes (status changes from 'speaking' to 'listening')
// Also process any pending transcript from user interruption
// Stop STT when entering processing or speaking state (prevent echo)
// Restart STT when TTS finishes (speaking → listening)
useEffect(() => {
const prevStatus = prevStatusRef.current;
prevStatusRef.current = status;
// When transitioning from speaking to listening, handle pending interrupt transcript
// Stop STT when processing starts or TTS starts (prevent Julia hearing herself)
if ((status === 'processing' || status === 'speaking') && sttIsListening) {
console.log('[TabLayout] Stopping STT during', status, '(echo prevention)');
stopListening();
}
// When TTS finishes (speaking → listening), restart STT
if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) {
console.log('[TabLayout] TTS finished/interrupted - checking for pending transcript');
console.log('[TabLayout] TTS finished - restarting STT');
// Process pending transcript from interruption if any
const pendingTranscript = pendingInterruptTranscriptRef.current;
@ -150,76 +202,74 @@ export default function TabLayout() {
sendTranscript(pendingTranscript);
}
// Small delay to ensure TTS cleanup is complete, then restart STT
// Delay to let TTS fully release audio focus, then restart STT
const timer = setTimeout(() => {
if (sessionActiveRef.current && !sttIsListening) {
startListening();
if (sessionActiveRef.current) {
safeStartSTT();
}
}, 200);
}, 800); // 800ms to ensure TTS audio fully fades
return () => clearTimeout(timer);
}
}, [status, sttIsListening, startListening, setTranscript, sendTranscript]);
// ============================================================================
// TAB NAVIGATION PERSISTENCE
// Ensure voice session continues when user switches between tabs.
// The session state is in VoiceContext (root level), but STT may stop due to:
// 1. Native audio session changes
// 2. Tab unmount/remount (though tabs layout doesn't unmount)
// 3. AppState changes (background/foreground)
// ============================================================================
// When processing finishes and goes to speaking, STT is already stopped (above)
// When speaking finishes and goes to listening, STT restarts (above)
}, [status]); // eslint-disable-line react-hooks/exhaustive-deps
// Monitor and recover STT state during tab navigation
// If session is active but STT stopped unexpectedly, restart it
// IMPORTANT: STT should run DURING TTS playback to detect user interruption!
// When STT ends unexpectedly during active session, restart it (but not during TTS)
useEffect(() => {
// Check every 500ms if STT needs to be restarted
const intervalId = setInterval(() => {
// Only act if session should be active (isListening from VoiceContext)
// but STT is not actually listening
// Note: We DO want STT running during 'speaking' to detect interruption!
// Only skip during 'processing' (API call in progress)
if (
shouldRestartSTTRef.current &&
sessionActiveRef.current &&
!sttIsListening &&
status !== 'processing'
status !== 'processing' &&
status !== 'speaking'
) {
console.log('[TabLayout] STT watchdog: restarting STT (session active but STT stopped, status:', status, ')');
startListening();
shouldRestartSTTRef.current = false;
console.log('[TabLayout] STT ended unexpectedly - restarting');
const timer = setTimeout(() => {
if (sessionActiveRef.current) {
safeStartSTT();
}
}, 500);
return () => clearInterval(intervalId);
}, [sttIsListening, status, startListening]);
}, 300);
return () => clearTimeout(timer);
}
}, [sttIsListening]); // eslint-disable-line react-hooks/exhaustive-deps
// Handle app state changes (background/foreground)
// When app comes back to foreground, restart STT if session was active
useEffect(() => {
const handleAppStateChange = (nextAppState: AppStateStatus) => {
if (nextAppState === 'active' && sessionActiveRef.current) {
// App came to foreground, give it a moment then check STT
// STT should run even during 'speaking' to detect user interruption
setTimeout(() => {
if (sessionActiveRef.current && !sttIsListening && status !== 'processing') {
if (sessionActiveRef.current && !sttIsListening && status !== 'processing' && status !== 'speaking') {
console.log('[TabLayout] App foregrounded - restarting STT');
startListening();
safeStartSTT();
}
}, 300);
}, 500);
}
};
const subscription = AppState.addEventListener('change', handleAppStateChange);
return () => subscription.remove();
}, [sttIsListening, status, startListening]);
}, [sttIsListening, status, safeStartSTT]);
// Handle voice FAB press - toggle listening mode
// Must check ALL active states (listening, processing, speaking), not just isListening
const handleVoiceFABPress = useCallback(() => {
if (isListening) {
const isSessionActive = isListening || status === 'speaking' || status === 'processing';
console.log('[TabLayout] FAB pressed, isSessionActive:', isSessionActive, 'status:', status, 'isListening:', isListening);
if (isSessionActive) {
// Force-stop everything: STT, TTS, and session state
console.log('[TabLayout] Force-stopping everything');
stopListening();
stopSession();
sessionActiveRef.current = false;
shouldRestartSTTRef.current = false;
pendingInterruptTranscriptRef.current = null;
} else {
startSession();
}
}, [isListening, startSession, stopSession]);
}, [isListening, status, startSession, stopSession, stopListening]);
// Calculate tab bar height based on safe area
// On iOS with home indicator, insets.bottom is ~34px
@ -278,6 +328,28 @@ export default function TabLayout() {
),
}}
/>
{/* Voice FAB - center tab button */}
<Tabs.Screen
name="explore"
options={{
title: '',
tabBarButton: () => (
<View style={tabFABStyles.fabWrapper}>
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening || status === 'speaking' || status === 'processing'} />
</View>
),
}}
/>
{/* Voice Debug - visible tab */}
<Tabs.Screen
name="voice-debug"
options={{
title: 'Debug',
tabBarIcon: ({ color, size }) => (
<Feather name="activity" size={22} color={color} />
),
}}
/>
<Tabs.Screen
name="profile"
options={{
@ -287,13 +359,6 @@ export default function TabLayout() {
),
}}
/>
{/* Hide explore tab */}
<Tabs.Screen
name="explore"
options={{
href: null,
}}
/>
{/* Audio Debug - hidden */}
<Tabs.Screen
name="audio-debug"
@ -309,9 +374,15 @@ export default function TabLayout() {
}}
/>
</Tabs>
{/* Voice FAB - toggle listening mode */}
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening} />
</View>
);
}
const tabFABStyles = StyleSheet.create({
fabWrapper: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
top: -20,
},
});

View File

@ -40,7 +40,7 @@ export default function BeneficiaryDashboardScreen() {
setUserId(uid);
console.log('Loaded credentials for WebView:', { hasToken: !!token, user, uid });
} catch (err) {
console.error('Failed to load credentials:', err);
console.warn('Failed to load credentials:', err);
} finally {
setIsTokenLoaded(true);
}
@ -80,7 +80,7 @@ export default function BeneficiaryDashboardScreen() {
console.log('MobileAppLogin timeout - function not found');
}, 5000);
} catch(e) {
console.error('Failed to call MobileAppLogin:', e);
console.warn('Failed to call MobileAppLogin:', e);
}
})();
true;

View File

@ -25,6 +25,8 @@ import { useRouter, useFocusEffect } from 'expo-router';
import { api } from '@/services/api';
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
import { useVoice } from '@/contexts/VoiceContext';
import { useChat } from '@/contexts/ChatContext';
import { useTextToSpeech } from '@/hooks/useTextToSpeech';
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
import type { Message, Beneficiary } from '@/types';
@ -113,6 +115,9 @@ export default function ChatScreen() {
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
const { transcript, hasNewTranscript, markTranscriptAsShown, getTranscriptAsMessages } = useVoiceTranscript();
// Voice context for real-time transcript display and checking if voice session is active
const { partialTranscript: voicePartial, isListening: voiceIsListening, status: voiceStatus, isActive: voiceIsActive } = useVoice();
// TTS for reading Julia's responses aloud
const { speak, stop: stopTTS, isSpeaking } = useTextToSpeech({
language: 'ru-RU',
@ -131,8 +136,8 @@ export default function ChatScreen() {
const [customDeploymentId, setCustomDeploymentId] = useState<string | null>(null);
const [deploymentName, setDeploymentName] = useState<string | null>(null);
// Chat state - initialized after deployment ID is loaded
const [messages, setMessages] = useState<Message[]>([createInitialMessage(null)]);
// Chat state - stored in context to persist across tab navigation
const { messages, setMessages } = useChat();
const [sortNewestFirst, setSortNewestFirst] = useState(false);
const [input, setInput] = useState('');
@ -145,6 +150,23 @@ export default function ChatScreen() {
inputRef.current = input;
}, [input]);
// Show partial voice transcript in input field in real-time
useEffect(() => {
if (voiceIsListening && voicePartial) {
setInput(voicePartial);
}
}, [voicePartial, voiceIsListening]);
// Clear input when voice switches to processing (transcript was sent)
const prevVoiceStatusRef = useRef(voiceStatus);
useEffect(() => {
const prev = prevVoiceStatusRef.current;
prevVoiceStatusRef.current = voiceStatus;
if (prev === 'listening' && voiceStatus === 'processing') {
setInput('');
}
}, [voiceStatus]);
// Beneficiary picker
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
@ -235,7 +257,7 @@ export default function ChatScreen() {
}
return [];
} catch (error) {
console.error('Failed to load beneficiaries:', error);
console.warn('Failed to load beneficiaries:', error);
return [];
} finally {
setLoadingBeneficiaries(false);
@ -392,8 +414,11 @@ export default function ChatScreen() {
};
setMessages(prev => [...prev, assistantMessage]);
// Speak the response using TTS
// Only speak the response if voice session is active (FAB pressed)
// Don't auto-speak for text-only chat messages
if (voiceIsActive) {
speak(responseText);
}
} else {
// Token might be expired, clear and retry once
if (data.status === '401 Unauthorized') {
@ -413,7 +438,7 @@ export default function ChatScreen() {
} finally {
setIsSending(false);
}
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak]);
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak, voiceIsActive]);
// Render message bubble
const renderMessage = ({ item }: { item: Message }) => {
@ -610,9 +635,9 @@ export default function ChatScreen() {
{/* Input */}
<View style={styles.inputContainer}>
<TextInput
style={styles.input}
placeholder="Type a message..."
placeholderTextColor={AppColors.textMuted}
style={[styles.input, voiceIsListening && styles.inputListening]}
placeholder={voiceIsListening ? "Listening..." : "Type a message..."}
placeholderTextColor={voiceIsListening ? AppColors.error : AppColors.textMuted}
value={input}
onChangeText={setInput}
multiline
@ -774,6 +799,10 @@ const styles = StyleSheet.create({
maxHeight: 100,
marginRight: Spacing.sm,
},
inputListening: {
borderWidth: 1.5,
borderColor: AppColors.error,
},
sendButton: {
width: 44,
height: 44,

View File

@ -51,7 +51,7 @@ export default function HomeScreen() {
});
}
} catch (err) {
console.error('Failed to load credentials:', err);
console.warn('Failed to load credentials:', err);
} finally {
setIsTokenLoaded(true);
}
@ -126,11 +126,11 @@ export default function HomeScreen() {
webViewRef.current?.injectJavaScript(injectScript);
}
} else {
console.error('Token refresh failed');
console.warn('Token refresh failed');
setError('Session expired. Please restart the app.');
}
} catch (err) {
console.error('Error refreshing token:', err);
console.warn('Error refreshing token:', err);
} finally {
setIsRefreshingToken(false);
}
@ -178,7 +178,7 @@ export default function HomeScreen() {
observer.observe(document.body, { childList: true, subtree: true });
}
} catch(e) {
console.error('Failed to inject token:', e);
console.warn('Failed to inject token:', e);
}
})();
true;

View File

@ -15,6 +15,7 @@ import { router } from 'expo-router';
import { Ionicons } from '@expo/vector-icons';
import { SafeAreaView } from 'react-native-safe-area-context';
import { useAuth } from '@/contexts/AuthContext';
import { useVoice } from '@/contexts/VoiceContext';
import { api } from '@/services/api';
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
@ -55,6 +56,7 @@ function MenuItem({
export default function ProfileScreen() {
const { user, logout } = useAuth();
const { updateVoiceApiType } = useVoice();
const [deploymentId, setDeploymentId] = useState<string>('');
const [deploymentName, setDeploymentName] = useState<string>('');
const [showDeploymentModal, setShowDeploymentModal] = useState(false);
@ -62,6 +64,11 @@ export default function ProfileScreen() {
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<string | null>(null);
// Voice API Type state
const [voiceApiType, setVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
const [showVoiceApiModal, setShowVoiceApiModal] = useState(false);
const [tempVoiceApiType, setTempVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
// Load saved deployment ID or auto-populate from first available
useEffect(() => {
const loadDeploymentId = async () => {
@ -88,12 +95,26 @@ export default function ProfileScreen() {
loadDeploymentId();
}, []);
// Load saved Voice API type
useEffect(() => {
const loadVoiceApiType = async () => {
const saved = await api.getVoiceApiType();
setVoiceApiType(saved);
};
loadVoiceApiType();
}, []);
const openDeploymentModal = useCallback(() => {
setTempDeploymentId(deploymentId);
setValidationError(null);
setShowDeploymentModal(true);
}, [deploymentId]);
const openVoiceApiModal = useCallback(() => {
setTempVoiceApiType(voiceApiType);
setShowVoiceApiModal(true);
}, [voiceApiType]);
const saveDeploymentId = useCallback(async () => {
const trimmed = tempDeploymentId.trim();
setValidationError(null);
@ -128,6 +149,13 @@ export default function ProfileScreen() {
}
}, [tempDeploymentId]);
const saveVoiceApiType = useCallback(async () => {
await api.setVoiceApiType(tempVoiceApiType);
setVoiceApiType(tempVoiceApiType);
updateVoiceApiType(tempVoiceApiType);
setShowVoiceApiModal(false);
}, [tempVoiceApiType, updateVoiceApiType]);
const openTerms = () => {
router.push('/terms');
};
@ -185,6 +213,15 @@ export default function ProfileScreen() {
subtitle={deploymentId ? (deploymentName || `ID: ${deploymentId}`) : 'Auto'}
onPress={openDeploymentModal}
/>
<View style={styles.menuDivider} />
<MenuItem
icon="radio-outline"
iconColor="#9333EA"
iconBgColor="#F3E8FF"
title="Voice API"
subtitle={voiceApiType === 'voice_ask' ? 'voice_ask' : 'ask_wellnuo_ai (LLaMA)'}
onPress={openVoiceApiModal}
/>
</View>
</View>
@ -271,6 +308,65 @@ export default function ProfileScreen() {
</View>
</KeyboardAvoidingView>
</Modal>
{/* Voice API Modal */}
<Modal
visible={showVoiceApiModal}
transparent
animationType="fade"
onRequestClose={() => setShowVoiceApiModal(false)}
>
<View style={styles.modalOverlay}>
<View style={styles.modalContent}>
<Text style={styles.modalTitle}>Voice API</Text>
<Text style={styles.modalDescription}>
Choose which API function to use for voice requests.
</Text>
{/* Radio buttons */}
<TouchableOpacity
style={styles.radioOption}
onPress={() => setTempVoiceApiType('ask_wellnuo_ai')}
>
<View style={styles.radioCircle}>
{tempVoiceApiType === 'ask_wellnuo_ai' && <View style={styles.radioCircleSelected} />}
</View>
<View style={styles.radioTextContainer}>
<Text style={styles.radioLabel}>ask_wellnuo_ai</Text>
<Text style={styles.radioDescription}>LLaMA with WellNuo data</Text>
</View>
</TouchableOpacity>
<TouchableOpacity
style={styles.radioOption}
onPress={() => setTempVoiceApiType('voice_ask')}
>
<View style={styles.radioCircle}>
{tempVoiceApiType === 'voice_ask' && <View style={styles.radioCircleSelected} />}
</View>
<View style={styles.radioTextContainer}>
<Text style={styles.radioLabel}>voice_ask</Text>
<Text style={styles.radioDescription}>Alternative voice API</Text>
</View>
</TouchableOpacity>
<View style={styles.modalButtons}>
<TouchableOpacity
style={styles.modalButtonCancel}
onPress={() => setShowVoiceApiModal(false)}
>
<Text style={styles.modalButtonCancelText}>Cancel</Text>
</TouchableOpacity>
<TouchableOpacity
style={styles.modalButtonSave}
onPress={saveVoiceApiType}
>
<Text style={styles.modalButtonSaveText}>Save</Text>
</TouchableOpacity>
</View>
</View>
</View>
</Modal>
</SafeAreaView>
);
}
@ -472,4 +568,40 @@ const styles = StyleSheet.create({
disabledText: {
opacity: 0.5,
},
// Radio button styles
radioOption: {
flexDirection: 'row',
alignItems: 'center',
paddingVertical: Spacing.sm + 4,
marginBottom: Spacing.xs,
},
radioCircle: {
width: 24,
height: 24,
borderRadius: 12,
borderWidth: 2,
borderColor: AppColors.primary,
alignItems: 'center',
justifyContent: 'center',
marginRight: Spacing.md,
},
radioCircleSelected: {
width: 12,
height: 12,
borderRadius: 6,
backgroundColor: AppColors.primary,
},
radioTextContainer: {
flex: 1,
},
radioLabel: {
fontSize: FontSizes.base,
fontWeight: '500',
color: AppColors.textPrimary,
marginBottom: 2,
},
radioDescription: {
fontSize: FontSizes.xs,
color: AppColors.textSecondary,
},
});

504
app/(tabs)/voice-debug.tsx Normal file
View File

@ -0,0 +1,504 @@
/**
* Voice Debug Screen
*
* Real-time debugging interface for voice recognition pipeline.
* Shows all events, timers, API calls, and state changes.
*/
import React, { useState, useEffect, useRef, useCallback } from 'react';
import {
View,
Text,
ScrollView,
StyleSheet,
TouchableOpacity,
} from 'react-native';
import { useSafeAreaInsets } from 'react-native-safe-area-context';
import { Feather } from '@expo/vector-icons';
import { useVoice } from '@/contexts/VoiceContext';
import { useSpeechRecognition } from '@/hooks/useSpeechRecognition';
import { AppColors } from '@/constants/theme';
import { useColorScheme } from '@/hooks/use-color-scheme';
interface LogEntry {
id: string;
timestamp: number;
category: 'stt' | 'api' | 'tts' | 'timer' | 'system';
message: string;
level: 'info' | 'warning' | 'error' | 'success';
data?: any;
}
export default function VoiceDebugScreen() {
const colorScheme = useColorScheme();
const isDark = colorScheme === 'dark';
const insets = useSafeAreaInsets();
const {
isListening,
isSpeaking,
status,
startSession,
stopSession,
} = useVoice();
const {
isListening: sttIsListening,
partialTranscript,
recognizedText,
} = useSpeechRecognition({
lang: 'en-US',
continuous: true,
interimResults: true,
});
const [logs, setLogs] = useState<LogEntry[]>([]);
const [silenceTimer, setSilenceTimer] = useState(0);
const scrollViewRef = useRef<ScrollView>(null);
const logIdCounter = useRef(0);
const lastPartialRef = useRef('');
// Add log entry
const addLog = useCallback((
category: LogEntry['category'],
message: string,
level: LogEntry['level'] = 'info',
data?: any
) => {
const entry: LogEntry = {
id: `log-${logIdCounter.current++}`,
timestamp: Date.now(),
category,
message,
level,
data,
};
console.log(`[VoiceDebug:${category}]`, message, data || '');
setLogs(prev => {
const updated = [...prev, entry];
// Keep only last 100 logs
return updated.slice(-100);
});
setTimeout(() => {
scrollViewRef.current?.scrollToEnd({ animated: true });
}, 50);
}, []);
// Clear logs
const clearLogs = useCallback(() => {
setLogs([]);
logIdCounter.current = 0;
addLog('system', 'Logs cleared', 'info');
}, [addLog]);
// Monitor voice session state
useEffect(() => {
if (isListening) {
addLog('system', '🎤 Voice session STARTED', 'success');
} else {
addLog('system', '⏹️ Voice session STOPPED', 'info');
setSilenceTimer(0);
}
}, [isListening, addLog]);
// Monitor STT state
useEffect(() => {
if (sttIsListening) {
addLog('stt', '▶️ STT listening started', 'success');
} else if (isListening) {
addLog('stt', '⏸️ STT stopped (but session active)', 'warning');
}
}, [sttIsListening, isListening, addLog]);
// Monitor status changes
useEffect(() => {
if (status === 'processing') {
addLog('api', '⚙️ Processing transcript → sending to API', 'info');
} else if (status === 'speaking') {
addLog('tts', '🔊 TTS playing (Julia speaking)', 'info');
} else if (status === 'listening') {
addLog('system', '👂 Ready to listen', 'info');
}
}, [status, addLog]);
// Monitor partial transcripts
useEffect(() => {
if (partialTranscript && partialTranscript !== lastPartialRef.current) {
lastPartialRef.current = partialTranscript;
addLog('stt', `📝 Partial: "${partialTranscript.slice(0, 40)}${partialTranscript.length > 40 ? '...' : ''}"`, 'info');
// Reset silence timer
setSilenceTimer(0);
addLog('timer', '🔄 Silence timer RESET', 'warning');
}
}, [partialTranscript, addLog]);
// Monitor final transcripts
useEffect(() => {
if (recognizedText && recognizedText !== lastPartialRef.current) {
addLog('stt', `✅ FINAL: "${recognizedText.slice(0, 40)}${recognizedText.length > 40 ? '...' : ''}"`, 'success', {
length: recognizedText.length,
transcript: recognizedText
});
addLog('api', '📤 Sending to API...', 'info');
}
}, [recognizedText, addLog]);
// Silence timer (only when STT is listening and not processing/speaking)
useEffect(() => {
let interval: NodeJS.Timeout | null = null;
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
interval = setInterval(() => {
setSilenceTimer(prev => {
const next = prev + 100;
// Log milestones
if (next === 1000) {
addLog('timer', '⏱️ Silence: 1.0s', 'info');
} else if (next === 1500) {
addLog('timer', '⏱️ Silence: 1.5s', 'warning');
} else if (next === 2000) {
addLog('timer', '🛑 Silence: 2.0s → AUTO-STOP triggered', 'error');
}
return next;
});
}, 100);
} else {
setSilenceTimer(0);
}
return () => {
if (interval) clearInterval(interval);
};
}, [sttIsListening, status, addLog]);
// Get status indicator
const getStatusDisplay = () => {
if (status === 'speaking' || isSpeaking) {
return { color: '#9333EA', icon: '🔊', text: 'Speaking' };
}
if (status === 'processing') {
return { color: '#F59E0B', icon: '⚙️', text: 'Processing' };
}
if (isListening && sttIsListening) {
return { color: '#10B981', icon: '🟢', text: 'Listening' };
}
if (isListening && !sttIsListening) {
return { color: '#F59E0B', icon: '🟡', text: 'Session Active (STT Off)' };
}
return { color: '#6B7280', icon: '⚪', text: 'Idle' };
};
const statusDisplay = getStatusDisplay();
const silenceProgress = Math.min(silenceTimer / 2000, 1);
const silenceSeconds = (silenceTimer / 1000).toFixed(1);
// Log level colors
const getLogColor = (level: LogEntry['level']) => {
switch (level) {
case 'error': return '#EF4444';
case 'warning': return '#F59E0B';
case 'success': return '#10B981';
default: return isDark ? '#D1D5DB' : '#374151';
}
};
// Category icons
const getCategoryIcon = (category: LogEntry['category']) => {
switch (category) {
case 'stt': return '🎤';
case 'api': return '📡';
case 'tts': return '🔊';
case 'timer': return '⏱️';
case 'system': return '⚙️';
default: return '•';
}
};
return (
<View style={[styles.container, { backgroundColor: isDark ? '#0A0A0A' : '#FFFFFF' }]}>
{/* Header */}
<View style={[styles.header, { paddingTop: insets.top + 16 }]}>
<Text style={[styles.headerTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
Voice Debug
</Text>
<TouchableOpacity onPress={clearLogs} style={styles.clearButton}>
<Feather name="trash-2" size={20} color={isDark ? '#9CA3AF' : '#6B7280'} />
</TouchableOpacity>
</View>
{/* Status Card */}
<View style={[styles.statusCard, {
backgroundColor: isDark ? '#1F2937' : '#F3F4F6',
borderColor: statusDisplay.color,
}]}>
<View style={styles.statusRow}>
<Text style={styles.statusIcon}>{statusDisplay.icon}</Text>
<View style={styles.statusTextContainer}>
<Text style={[styles.statusLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Status
</Text>
<Text style={[styles.statusText, { color: statusDisplay.color }]}>
{statusDisplay.text}
</Text>
</View>
</View>
{/* Silence Timer */}
{sttIsListening && status !== 'processing' && status !== 'speaking' && (
<View style={styles.timerContainer}>
<Text style={[styles.timerLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Silence Timer (iOS auto-stop at 2.0s)
</Text>
<View style={styles.timerRow}>
<Text style={[styles.timerText, {
color: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : isDark ? '#D1D5DB' : '#374151'
}]}>
{silenceSeconds}s / 2.0s
</Text>
</View>
<View style={[styles.progressBarContainer, { backgroundColor: isDark ? '#374151' : '#E5E7EB' }]}>
<View style={[styles.progressBarFill, {
width: `${silenceProgress * 100}%`,
backgroundColor: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : '#10B981'
}]} />
</View>
</View>
)}
{/* Current Transcripts */}
{partialTranscript && (
<View style={styles.transcriptContainer}>
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Partial:
</Text>
<Text style={[styles.transcriptText, { color: isDark ? '#F59E0B' : '#D97706' }]}>
"{partialTranscript}"
</Text>
</View>
)}
{recognizedText && (
<View style={styles.transcriptContainer}>
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Final:
</Text>
<Text style={[styles.transcriptText, { color: isDark ? '#10B981' : '#059669' }]}>
"{recognizedText}"
</Text>
</View>
)}
</View>
{/* Logs */}
<View style={styles.logsContainer}>
<Text style={[styles.logsTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
Event Log
</Text>
<ScrollView
ref={scrollViewRef}
style={[styles.logsScrollView, { backgroundColor: isDark ? '#111827' : '#F9FAFB' }]}
contentContainerStyle={styles.logsContent}
>
{logs.length === 0 ? (
<Text style={[styles.emptyText, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
No events yet. Press FAB to start.
</Text>
) : (
logs.map(log => {
const time = new Date(log.timestamp);
const timeStr = `${String(time.getHours()).padStart(2, '0')}:${String(time.getMinutes()).padStart(2, '0')}:${String(time.getSeconds()).padStart(2, '0')}.${String(time.getMilliseconds()).padStart(3, '0')}`;
return (
<View key={log.id} style={styles.logEntry}>
<Text style={[styles.logTimestamp, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
{timeStr}
</Text>
<Text style={styles.logIcon}>{getCategoryIcon(log.category)}</Text>
<Text style={[styles.logMessage, { color: getLogColor(log.level) }]}>
{log.message}
</Text>
</View>
);
})
)}
</ScrollView>
</View>
{/* FAB */}
<TouchableOpacity
style={[styles.fab, {
backgroundColor: isListening ? '#EF4444' : AppColors.primary,
bottom: insets.bottom + 80,
}]}
onPress={() => {
if (isListening) {
addLog('system', '🛑 User stopped session', 'warning');
stopSession();
} else {
clearLogs();
addLog('system', '▶️ User started session', 'success');
startSession();
}
}}
>
<Feather
name={isListening ? 'square' : 'mic'}
size={28}
color="#FFFFFF"
/>
</TouchableOpacity>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
},
header: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'space-between',
paddingHorizontal: 20,
paddingBottom: 16,
},
headerTitle: {
fontSize: 28,
fontWeight: '700',
},
clearButton: {
padding: 8,
},
statusCard: {
marginHorizontal: 20,
marginBottom: 16,
padding: 16,
borderRadius: 12,
borderLeftWidth: 4,
},
statusRow: {
flexDirection: 'row',
alignItems: 'center',
},
statusIcon: {
fontSize: 32,
marginRight: 12,
},
statusTextContainer: {
flex: 1,
},
statusLabel: {
fontSize: 12,
fontWeight: '500',
marginBottom: 2,
},
statusText: {
fontSize: 18,
fontWeight: '700',
},
timerContainer: {
marginTop: 16,
paddingTop: 16,
borderTopWidth: 1,
borderTopColor: 'rgba(156, 163, 175, 0.2)',
},
timerLabel: {
fontSize: 12,
fontWeight: '500',
marginBottom: 8,
},
timerRow: {
marginBottom: 8,
},
timerText: {
fontSize: 24,
fontWeight: '700',
fontVariant: ['tabular-nums'],
},
progressBarContainer: {
height: 8,
borderRadius: 4,
overflow: 'hidden',
},
progressBarFill: {
height: '100%',
borderRadius: 4,
},
transcriptContainer: {
marginTop: 12,
paddingTop: 12,
borderTopWidth: 1,
borderTopColor: 'rgba(156, 163, 175, 0.2)',
},
transcriptLabel: {
fontSize: 12,
fontWeight: '500',
marginBottom: 4,
},
transcriptText: {
fontSize: 14,
fontStyle: 'italic',
},
logsContainer: {
flex: 1,
marginHorizontal: 20,
},
logsTitle: {
fontSize: 16,
fontWeight: '700',
marginBottom: 8,
},
logsScrollView: {
flex: 1,
borderRadius: 8,
},
logsContent: {
padding: 12,
},
emptyText: {
textAlign: 'center',
fontSize: 14,
fontStyle: 'italic',
paddingVertical: 20,
},
logEntry: {
flexDirection: 'row',
marginBottom: 8,
alignItems: 'flex-start',
},
logTimestamp: {
fontSize: 11,
fontVariant: ['tabular-nums'],
marginRight: 8,
width: 80,
},
logIcon: {
fontSize: 14,
marginRight: 6,
},
logMessage: {
fontSize: 13,
flex: 1,
lineHeight: 18,
},
fab: {
position: 'absolute',
right: 20,
width: 64,
height: 64,
borderRadius: 32,
alignItems: 'center',
justifyContent: 'center',
shadowColor: '#000',
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.3,
shadowRadius: 8,
elevation: 8,
},
});

View File

@ -13,6 +13,7 @@ import { BeneficiaryProvider } from '@/contexts/BeneficiaryContext';
import { VoiceTranscriptProvider } from '@/contexts/VoiceTranscriptContext';
import { VoiceCallProvider } from '@/contexts/VoiceCallContext';
import { VoiceProvider } from '@/contexts/VoiceContext';
import { ChatProvider } from '@/contexts/ChatContext';
import { LoadingSpinner } from '@/components/ui/LoadingSpinner';
import { FloatingCallBubble } from '@/components/FloatingCallBubble';
@ -69,7 +70,9 @@ export default function RootLayout() {
<VoiceTranscriptProvider>
<VoiceCallProvider>
<VoiceProvider>
<ChatProvider>
<RootLayoutNav />
</ChatProvider>
</VoiceProvider>
</VoiceCallProvider>
</VoiceTranscriptProvider>

View File

@ -1,9 +1,12 @@
/**
* Voice Floating Action Button Component
*
* A floating action button for toggling voice listening mode.
* Tap to start/stop listening.
* Hidden when a call is already active.
* Positioned at the center of the tab bar.
* Shows different animations for each voice state:
* - idle: white mic icon, green background
* - listening: red background, expanding pulse rings
* - processing: blue background, spinning indicator
* - speaking: green background, wave-like pulse
*/
import React, { useRef, useEffect } from 'react';
@ -12,12 +15,13 @@ import {
TouchableOpacity,
Animated,
ViewStyle,
ActivityIndicator,
} from 'react-native';
import { Ionicons } from '@expo/vector-icons';
import { useSafeAreaInsets } from 'react-native-safe-area-context';
import * as Haptics from 'expo-haptics';
import { AppColors, BorderRadius } from '@/constants/theme';
import { useVoiceCall } from '@/contexts/VoiceCallContext';
import { useVoice } from '@/contexts/VoiceContext';
interface VoiceFABProps {
onPress: () => void;
@ -26,146 +30,232 @@ interface VoiceFABProps {
isListening?: boolean;
}
const FAB_SIZE = 56;
const FAB_SIZE = 60;
export function VoiceFAB({ onPress, style, disabled = false, isListening = false }: VoiceFABProps) {
const { isCallActive } = useVoiceCall();
const insets = useSafeAreaInsets();
const { status: voiceStatus } = useVoice();
// Animation values
const scale = useRef(new Animated.Value(1)).current;
const opacity = useRef(new Animated.Value(1)).current;
const pulseScale = useRef(new Animated.Value(1)).current;
const pulseOpacity = useRef(new Animated.Value(0)).current;
// Pulse ring 1 (main expanding ring)
const pulse1Scale = useRef(new Animated.Value(1)).current;
const pulse1Opacity = useRef(new Animated.Value(0)).current;
// Pulse ring 2 (second ring, offset timing)
const pulse2Scale = useRef(new Animated.Value(1)).current;
const pulse2Opacity = useRef(new Animated.Value(0)).current;
// Speaking glow animation
const glowScale = useRef(new Animated.Value(1)).current;
// Processing rotation
const rotation = useRef(new Animated.Value(0)).current;
// Store animation refs for cleanup
const animationRef = useRef<Animated.CompositeAnimation | null>(null);
// Determine effective state
const effectiveStatus = isListening
? (voiceStatus === 'processing' ? 'processing' : voiceStatus === 'speaking' ? 'speaking' : 'listening')
: 'idle';
// Hide FAB when call is active
useEffect(() => {
if (isCallActive) {
Animated.parallel([
Animated.timing(scale, {
toValue: 0,
duration: 200,
useNativeDriver: true,
}),
Animated.timing(opacity, {
toValue: 0,
duration: 200,
useNativeDriver: true,
}),
Animated.timing(scale, { toValue: 0, duration: 200, useNativeDriver: true }),
Animated.timing(opacity, { toValue: 0, duration: 200, useNativeDriver: true }),
]).start();
} else {
Animated.parallel([
Animated.spring(scale, {
toValue: 1,
friction: 5,
tension: 40,
useNativeDriver: true,
}),
Animated.timing(opacity, {
toValue: 1,
duration: 200,
useNativeDriver: true,
}),
Animated.spring(scale, { toValue: 1, friction: 5, tension: 40, useNativeDriver: true }),
Animated.timing(opacity, { toValue: 1, duration: 200, useNativeDriver: true }),
]).start();
}
}, [isCallActive, scale, opacity]);
// Pulse animation when listening
// Animations based on voice status
useEffect(() => {
if (isListening && !isCallActive) {
// Start pulsing animation
const pulseAnimation = Animated.loop(
Animated.sequence([
// Stop previous animation
if (animationRef.current) {
animationRef.current.stop();
animationRef.current = null;
}
// Reset all animation values
pulse1Scale.setValue(1);
pulse1Opacity.setValue(0);
pulse2Scale.setValue(1);
pulse2Opacity.setValue(0);
glowScale.setValue(1);
rotation.setValue(0);
if (effectiveStatus === 'listening') {
// Double pulse ring animation - more active/dynamic
const pulseAnim = Animated.loop(
Animated.stagger(500, [
Animated.parallel([
Animated.timing(pulseScale, {
toValue: 1.8,
duration: 1000,
useNativeDriver: true,
}),
Animated.timing(pulseOpacity, {
toValue: 0,
duration: 1000,
useNativeDriver: true,
}),
Animated.timing(pulse1Scale, { toValue: 2.0, duration: 1200, useNativeDriver: true }),
Animated.timing(pulse1Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
]),
Animated.parallel([
Animated.timing(pulseScale, {
toValue: 1,
duration: 0,
useNativeDriver: true,
}),
Animated.timing(pulseOpacity, {
toValue: 0.6,
duration: 0,
useNativeDriver: true,
}),
Animated.timing(pulse1Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
Animated.timing(pulse1Opacity, { toValue: 0.5, duration: 0, useNativeDriver: true }),
]),
])
);
pulseAnimation.start();
const pulse2Anim = Animated.loop(
Animated.sequence([
Animated.delay(400),
Animated.parallel([
Animated.timing(pulse2Scale, { toValue: 1.8, duration: 1200, useNativeDriver: true }),
Animated.timing(pulse2Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
]),
Animated.parallel([
Animated.timing(pulse2Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
Animated.timing(pulse2Opacity, { toValue: 0.4, duration: 0, useNativeDriver: true }),
]),
])
);
const combined = Animated.parallel([pulseAnim, pulse2Anim]);
animationRef.current = combined;
combined.start();
} else if (effectiveStatus === 'speaking') {
// Gentle breathing glow when speaking
const glowAnim = Animated.loop(
Animated.sequence([
Animated.timing(glowScale, { toValue: 1.15, duration: 600, useNativeDriver: true }),
Animated.timing(glowScale, { toValue: 1.0, duration: 600, useNativeDriver: true }),
])
);
// Soft outer glow
const softPulse = Animated.loop(
Animated.sequence([
Animated.parallel([
Animated.timing(pulse1Scale, { toValue: 1.4, duration: 800, useNativeDriver: true }),
Animated.timing(pulse1Opacity, { toValue: 0.3, duration: 400, useNativeDriver: true }),
]),
Animated.parallel([
Animated.timing(pulse1Scale, { toValue: 1.0, duration: 800, useNativeDriver: true }),
Animated.timing(pulse1Opacity, { toValue: 0, duration: 400, useNativeDriver: true }),
]),
])
);
const combined = Animated.parallel([glowAnim, softPulse]);
animationRef.current = combined;
combined.start();
} else if (effectiveStatus === 'processing') {
// Spinning rotation for processing
const spinAnim = Animated.loop(
Animated.timing(rotation, { toValue: 1, duration: 1500, useNativeDriver: true })
);
animationRef.current = spinAnim;
spinAnim.start();
}
return () => {
pulseAnimation.stop();
pulseScale.setValue(1);
pulseOpacity.setValue(0);
};
} else {
pulseScale.setValue(1);
pulseOpacity.setValue(0);
if (animationRef.current) {
animationRef.current.stop();
animationRef.current = null;
}
}, [isListening, isCallActive, pulseScale, pulseOpacity]);
};
}, [effectiveStatus]); // eslint-disable-line react-hooks/exhaustive-deps
// Press animation with haptic feedback
const handlePressIn = () => {
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
Animated.spring(scale, {
toValue: 0.9,
friction: 5,
useNativeDriver: true,
}).start();
Animated.spring(scale, { toValue: 0.85, friction: 5, useNativeDriver: true }).start();
};
const handlePressOut = () => {
Animated.spring(scale, {
toValue: 1,
friction: 5,
useNativeDriver: true,
}).start();
Animated.spring(scale, { toValue: 1, friction: 5, useNativeDriver: true }).start();
};
// Don't render if call is active
if (isCallActive) {
return null;
}
// Determine colors and icon based on state
let fabBgColor = AppColors.success; // idle: green
let iconName: 'mic-outline' | 'mic' | 'volume-high' = 'mic-outline';
let pulseColor = AppColors.error;
if (effectiveStatus === 'listening') {
fabBgColor = '#FF3B30'; // red
iconName = 'mic';
pulseColor = '#FF3B30';
} else if (effectiveStatus === 'processing') {
fabBgColor = AppColors.primary; // blue
iconName = 'mic';
pulseColor = AppColors.primary;
} else if (effectiveStatus === 'speaking') {
fabBgColor = '#34C759'; // green
iconName = 'volume-high';
pulseColor = '#34C759';
}
const spin = rotation.interpolate({
inputRange: [0, 1],
outputRange: ['0deg', '360deg'],
});
return (
<Animated.View
style={[
styles.container,
{
bottom: insets.bottom + 80, // Above tab bar
transform: [{ scale }],
opacity,
},
style,
]}
>
{/* Pulse ring when listening */}
{isListening && (
{/* Pulse ring 1 */}
{(effectiveStatus === 'listening' || effectiveStatus === 'speaking') && (
<Animated.View
style={[
styles.pulseRing,
{
transform: [{ scale: pulseScale }],
opacity: pulseOpacity,
backgroundColor: pulseColor,
transform: [{ scale: pulse1Scale }],
opacity: pulse1Opacity,
},
]}
/>
)}
{/* Pulse ring 2 (listening only) */}
{effectiveStatus === 'listening' && (
<Animated.View
style={[
styles.pulseRing,
{
backgroundColor: pulseColor,
transform: [{ scale: pulse2Scale }],
opacity: pulse2Opacity,
},
]}
/>
)}
<Animated.View
style={[
{ transform: [{ scale: effectiveStatus === 'speaking' ? glowScale : 1 }] },
]}
>
<TouchableOpacity
style={[
styles.fab,
isListening && styles.fabListening,
{ backgroundColor: disabled ? AppColors.surface : fabBgColor },
disabled && styles.fabDisabled,
]}
onPress={onPress}
@ -174,36 +264,38 @@ export function VoiceFAB({ onPress, style, disabled = false, isListening = false
disabled={disabled}
activeOpacity={0.9}
>
{effectiveStatus === 'processing' ? (
<Animated.View style={{ transform: [{ rotate: spin }] }}>
<ActivityIndicator size="small" color={AppColors.white} />
</Animated.View>
) : (
<Ionicons
name={isListening ? 'mic' : 'mic-outline'}
name={iconName}
size={28}
color={disabled ? AppColors.textMuted : AppColors.white}
/>
)}
</TouchableOpacity>
</Animated.View>
</Animated.View>
);
}
const styles = StyleSheet.create({
container: {
position: 'absolute',
left: 0,
right: 0,
alignItems: 'center',
zIndex: 100,
justifyContent: 'center',
},
pulseRing: {
position: 'absolute',
width: FAB_SIZE,
height: FAB_SIZE,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.error,
borderRadius: FAB_SIZE / 2,
},
fab: {
width: FAB_SIZE,
height: FAB_SIZE,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.success,
borderRadius: FAB_SIZE / 2,
justifyContent: 'center',
alignItems: 'center',
shadowColor: '#000',
@ -212,11 +304,7 @@ const styles = StyleSheet.create({
shadowRadius: 8,
elevation: 8,
},
fabListening: {
backgroundColor: AppColors.error,
},
fabDisabled: {
backgroundColor: AppColors.surface,
shadowOpacity: 0.1,
},
});

51
contexts/ChatContext.tsx Normal file
View File

@ -0,0 +1,51 @@
/**
* Chat Context - Persists chat messages across tab navigation
*
* Without this context, messages are lost when switching tabs
* because ChatScreen component unmounts and remounts.
*/
import React, { createContext, useContext, useState, useCallback, ReactNode } from 'react';
import type { Message } from '@/types';
interface ChatContextValue {
messages: Message[];
setMessages: React.Dispatch<React.SetStateAction<Message[]>>;
addMessage: (message: Message) => void;
clearMessages: (initialMessage: Message) => void;
}
const ChatContext = createContext<ChatContextValue | undefined>(undefined);
export function ChatProvider({ children }: { children: ReactNode }) {
const [messages, setMessages] = useState<Message[]>([
{
id: '1',
role: 'assistant',
content: "Hello! I'm Julia, your AI wellness companion.\n\nType a message below to chat with me.",
timestamp: new Date(),
},
]);
const addMessage = useCallback((message: Message) => {
setMessages(prev => [...prev, message]);
}, []);
const clearMessages = useCallback((initialMessage: Message) => {
setMessages([initialMessage]);
}, []);
return (
<ChatContext.Provider value={{ messages, setMessages, addMessage, clearMessages }}>
{children}
</ChatContext.Provider>
);
}
export function useChat() {
const context = useContext(ChatContext);
if (!context) {
throw new Error('useChat must be used within ChatProvider');
}
return context;
}

View File

@ -153,6 +153,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
// API token cache
const apiTokenRef = useRef<string | null>(null);
// Abort controller for cancelling in-flight API requests
const abortControllerRef = useRef<AbortController | null>(null);
// Flag to prevent speak() after session stopped
const sessionStoppedRef = useRef(false);
// Deployment ID from settings
const deploymentIdRef = useRef<string | null>(null);
@ -207,6 +213,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
return null;
}
// Don't send if session was stopped
if (sessionStoppedRef.current) {
console.log('[VoiceContext] Session stopped, skipping API call');
return null;
}
console.log('[VoiceContext] Sending transcript to API:', trimmedText);
setStatus('processing');
setError(null);
@ -214,10 +226,23 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
// Add user message to transcript for chat display
addTranscriptEntry('user', trimmedText);
// Create abort controller for this request
if (abortControllerRef.current) {
abortControllerRef.current.abort();
}
const abortController = new AbortController();
abortControllerRef.current = abortController;
try {
// Get API token
const token = await getWellNuoToken();
// Check if aborted
if (abortController.signal.aborted || sessionStoppedRef.current) {
console.log('[VoiceContext] Request aborted before API call');
return null;
}
// Normalize question
const normalizedQuestion = normalizeQuestion(trimmedText);
@ -244,10 +269,17 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: new URLSearchParams(requestParams).toString(),
signal: abortController.signal,
});
const data = await response.json();
// Check if session was stopped while waiting for response
if (sessionStoppedRef.current) {
console.log('[VoiceContext] Session stopped during API call, discarding response');
return null;
}
if (data.ok && data.response?.body) {
const responseText = data.response.body;
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
@ -256,7 +288,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
// Add Julia's response to transcript for chat display
addTranscriptEntry('assistant', responseText);
// Speak the response
// Speak the response (will be skipped if session stopped)
await speak(responseText);
return responseText;
@ -269,8 +301,13 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
throw new Error(data.message || 'Could not get response');
}
} catch (err) {
// Ignore abort errors
if (err instanceof Error && err.name === 'AbortError') {
console.log('[VoiceContext] API request aborted');
return null;
}
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
console.error('[VoiceContext] API error:', errorMsg);
console.warn('[VoiceContext] API error:', errorMsg);
setError(errorMsg);
setStatus('idle');
return null;
@ -300,6 +337,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
const speak = useCallback(async (text: string): Promise<void> => {
if (!text.trim()) return;
// Don't speak if session was stopped
if (sessionStoppedRef.current) {
console.log('[VoiceContext] Session stopped, skipping TTS');
return;
}
console.log('[VoiceContext] Speaking:', text.slice(0, 50) + '...');
setStatus('speaking');
setIsSpeaking(true);
@ -315,20 +358,27 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
onDone: () => {
console.log('[VoiceContext] TTS completed');
setIsSpeaking(false);
// Return to listening state after speaking (if session is active)
// Return to listening state after speaking (if session wasn't stopped)
if (!sessionStoppedRef.current) {
setStatus('listening');
}
resolve();
},
onError: (error) => {
console.error('[VoiceContext] TTS error:', error);
console.warn('[VoiceContext] TTS error:', error);
setIsSpeaking(false);
if (!sessionStoppedRef.current) {
setStatus('listening');
}
resolve();
},
onStopped: () => {
console.log('[VoiceContext] TTS stopped (interrupted)');
setIsSpeaking(false);
// Don't set status to listening if session was stopped by user
if (!sessionStoppedRef.current) {
setStatus('listening');
}
resolve();
},
});
@ -348,6 +398,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
*/
const startSession = useCallback(() => {
console.log('[VoiceContext] Starting voice session');
sessionStoppedRef.current = false;
setStatus('listening');
setIsListening(true);
setError(null);
@ -360,7 +411,16 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
*/
const stopSession = useCallback(() => {
console.log('[VoiceContext] Stopping voice session');
// Mark session as stopped FIRST to prevent any pending callbacks
sessionStoppedRef.current = true;
// Abort any in-flight API requests
if (abortControllerRef.current) {
abortControllerRef.current.abort();
abortControllerRef.current = null;
}
// Stop TTS
Speech.stop();
// Reset all state
setStatus('idle');
setIsListening(false);
setIsSpeaking(false);

View File

@ -104,6 +104,8 @@ export function useSpeechRecognition(
const isStartingRef = useRef(false);
// Track if voice has been detected in current session (for onVoiceDetected callback)
const voiceDetectedRef = useRef(false);
// Track last partial transcript for iOS fix (iOS never sends isFinal:true)
const lastPartialRef = useRef('');
// Check availability on mount
useEffect(() => {
@ -120,7 +122,7 @@ export function useSpeechRecognition(
setIsAvailable(true);
console.log('[SpeechRecognition] Available, permission status:', status.status);
} catch (err) {
console.error('[SpeechRecognition] Not available:', err);
console.warn('[SpeechRecognition] Not available:', err);
setIsAvailable(false);
}
};
@ -140,6 +142,16 @@ export function useSpeechRecognition(
// Event: Recognition ended
useSpeechRecognitionEvent('end', () => {
console.log('[SpeechRecognition] Ended');
// iOS FIX: iOS never sends isFinal:true, so we send last partial as final when STT ends
const lastPartial = lastPartialRef.current;
if (lastPartial && lastPartial.trim().length > 0) {
console.log('[SpeechRecognition] 🍎 iOS FIX - Sending last partial as final:', lastPartial);
setRecognizedText(lastPartial);
onResult?.(lastPartial, true); // Send as final=true
lastPartialRef.current = ''; // Clear after sending
}
setIsListening(false);
setPartialTranscript('');
isStartingRef.current = false;
@ -167,8 +179,10 @@ export function useSpeechRecognition(
if (isFinal) {
setRecognizedText(transcript);
setPartialTranscript('');
lastPartialRef.current = ''; // Clear after final
} else {
setPartialTranscript(transcript);
lastPartialRef.current = transcript; // Save for iOS fix
}
onResult?.(transcript, isFinal);
@ -177,15 +191,20 @@ export function useSpeechRecognition(
// Event: Error occurred
useSpeechRecognitionEvent('error', (event: any) => {
const errorMessage = event.message || event.error || 'Speech recognition error';
console.error('[SpeechRecognition] Error:', errorMessage);
const errorCode = event.error || '';
const errorMessage = event.message || errorCode || 'Speech recognition error';
// Don't set error for "no-speech" - this is normal when user doesn't say anything
if (event.error !== 'no-speech') {
setError(errorMessage);
onError?.(errorMessage);
// "no-speech" is normal when user is silent — ignore completely
if (errorCode === 'no-speech') {
console.log('[SpeechRecognition] No speech detected (silence) - ignoring');
setIsListening(false);
isStartingRef.current = false;
return;
}
console.warn('[SpeechRecognition] Error:', errorMessage);
setError(errorMessage);
onError?.(errorMessage);
setIsListening(false);
isStartingRef.current = false;
});
@ -207,7 +226,7 @@ export function useSpeechRecognition(
if (!isAvailable) {
const msg = 'Speech recognition is not available on this device';
console.error('[SpeechRecognition]', msg);
console.warn('[SpeechRecognition]', msg);
setError(msg);
onError?.(msg);
return false;
@ -224,7 +243,7 @@ export function useSpeechRecognition(
if (!permissionResult.granted) {
const msg = 'Microphone permission denied';
console.error('[SpeechRecognition]', msg);
console.warn('[SpeechRecognition]', msg);
setError(msg);
onError?.(msg);
isStartingRef.current = false;
@ -249,7 +268,7 @@ export function useSpeechRecognition(
return true;
} catch (err) {
const msg = err instanceof Error ? err.message : 'Failed to start speech recognition';
console.error('[SpeechRecognition] Start error:', msg);
console.warn('[SpeechRecognition] Start error:', msg);
setError(msg);
onError?.(msg);
isStartingRef.current = false;

View File

@ -185,7 +185,7 @@ export function useTextToSpeech(
},
onError: (err) => {
const errorMsg = typeof err === 'string' ? err : 'Speech synthesis error';
console.error('[TTS] Error:', errorMsg);
console.warn('[TTS] Error:', errorMsg);
if (isMountedRef.current) {
setIsSpeaking(false);
setCurrentText(null);
@ -227,7 +227,7 @@ export function useTextToSpeech(
console.log('[TTS] Available voices:', voices.length);
return voices;
} catch (err) {
console.error('[TTS] Could not get voices:', err);
console.warn('[TTS] Could not get voices:', err);
return [];
}
}, []);

34
package-lock.json generated
View File

@ -12,6 +12,7 @@
"@expo/vector-icons": "^15.0.3",
"@jamsch/expo-speech-recognition": "^0.2.15",
"@notifee/react-native": "^9.1.8",
"@react-native-async-storage/async-storage": "2.2.0",
"@react-navigation/bottom-tabs": "^7.4.0",
"@react-navigation/elements": "^2.6.3",
"@react-navigation/native": "^7.1.8",
@ -3568,6 +3569,18 @@
}
}
},
"node_modules/@react-native-async-storage/async-storage": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/@react-native-async-storage/async-storage/-/async-storage-2.2.0.tgz",
"integrity": "sha512-gvRvjR5JAaUZF8tv2Kcq/Gbt3JHwbKFYfmb445rhOj6NUMx3qPLixmDx5pZAyb9at1bYvJ4/eTUipU5aki45xw==",
"license": "MIT",
"dependencies": {
"merge-options": "^3.0.4"
},
"peerDependencies": {
"react-native": "^0.0.0-0 || >=0.65 <1.0"
}
},
"node_modules/@react-native/assets-registry": {
"version": "0.81.5",
"resolved": "https://registry.npmjs.org/@react-native/assets-registry/-/assets-registry-0.81.5.tgz",
@ -8754,6 +8767,15 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-plain-obj": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
"integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/is-regex": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz",
@ -9785,6 +9807,18 @@
"integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==",
"license": "MIT"
},
"node_modules/merge-options": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/merge-options/-/merge-options-3.0.4.tgz",
"integrity": "sha512-2Sug1+knBjkaMsMgf1ctR1Ujx+Ayku4EdJN4Z+C2+JzoeF7A3OZ9KM2GY0CpQS51NR61LTurMJrRKPhSs3ZRTQ==",
"license": "MIT",
"dependencies": {
"is-plain-obj": "^2.1.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",

View File

@ -15,6 +15,7 @@
"@expo/vector-icons": "^15.0.3",
"@jamsch/expo-speech-recognition": "^0.2.15",
"@notifee/react-native": "^9.1.8",
"@react-native-async-storage/async-storage": "2.2.0",
"@react-navigation/bottom-tabs": "^7.4.0",
"@react-navigation/elements": "^2.6.3",
"@react-navigation/native": "^7.1.8",

View File

@ -113,7 +113,7 @@ class ApiService {
console.log('[API] refreshToken result:', result.ok ? 'SUCCESS' : result.error?.message);
return result;
} catch (error) {
console.error('[API] refreshToken error:', error);
console.warn('[API] refreshToken error:', error);
return {
ok: false,
error: { message: 'Failed to refresh token', code: 'REFRESH_ERROR' }
@ -229,6 +229,20 @@ class ApiService {
}
}
// Voice API Type management
async setVoiceApiType(type: 'voice_ask' | 'ask_wellnuo_ai'): Promise<void> {
await SecureStore.setItemAsync('voiceApiType', type);
}
async getVoiceApiType(): Promise<'voice_ask' | 'ask_wellnuo_ai'> {
try {
const saved = await SecureStore.getItemAsync('voiceApiType');
return (saved as 'voice_ask' | 'ask_wellnuo_ai') || 'ask_wellnuo_ai';
} catch {
return 'ask_wellnuo_ai';
}
}
async validateDeploymentId(deploymentId: string): Promise<ApiResponse<{ valid: boolean; name?: string }>> {
const token = await this.getToken();
const userName = await this.getUserName();

View File

@ -46,7 +46,7 @@ class CallManager {
await this.disconnectCallback();
console.log(`[CallManager] Previous call disconnected`);
} catch (err) {
console.error(`[CallManager] Error disconnecting previous call:`, err);
console.warn(`[CallManager] Error disconnecting previous call:`, err);
}
}
}
@ -98,7 +98,7 @@ class CallManager {
try {
await this.disconnectCallback();
} catch (err) {
console.error(`[CallManager] Error force disconnecting:`, err);
console.warn(`[CallManager] Error force disconnecting:`, err);
}
this.activeCallId = null;
this.disconnectCallback = null;

View File

@ -228,7 +228,7 @@ export async function createCall(options: {
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
console.error('[Ultravox] API error:', response.status, errorData);
console.warn('[Ultravox] API error:', response.status, errorData);
return {
success: false,
error: errorData.message || `API error: ${response.status}`,
@ -239,7 +239,7 @@ export async function createCall(options: {
console.log('[Ultravox] Call created:', data.callId);
return { success: true, data };
} catch (error) {
console.error('[Ultravox] Create call error:', error);
console.warn('[Ultravox] Create call error:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to create call',
@ -265,7 +265,7 @@ export async function getCall(callId: string): Promise<CreateCallResponse | null
return await response.json();
} catch (error) {
console.error('[Ultravox] Get call error:', error);
console.warn('[Ultravox] Get call error:', error);
return null;
}
}
@ -284,7 +284,7 @@ export async function endCall(callId: string): Promise<boolean> {
return response.ok;
} catch (error) {
console.error('[Ultravox] End call error:', error);
console.warn('[Ultravox] End call error:', error);
return false;
}
}

View File

@ -23,7 +23,7 @@ async function getNotifee() {
try {
notifee = (await import('@notifee/react-native')).default;
} catch (e) {
console.error('[AndroidVoiceService] Failed to load notifee:', e);
console.warn('[AndroidVoiceService] Failed to load notifee:', e);
return null;
}
}
@ -52,7 +52,7 @@ async function createNotificationChannel(): Promise<void> {
});
console.log('[AndroidVoiceService] Notification channel created');
} catch (e) {
console.error('[AndroidVoiceService] Failed to create channel:', e);
console.warn('[AndroidVoiceService] Failed to create channel:', e);
}
}
@ -102,7 +102,7 @@ export async function startVoiceCallService(): Promise<void> {
console.log('[AndroidVoiceService] Foreground service started');
} catch (e) {
console.error('[AndroidVoiceService] Failed to start foreground service:', e);
console.warn('[AndroidVoiceService] Failed to start foreground service:', e);
}
}
@ -123,7 +123,7 @@ export async function stopVoiceCallService(): Promise<void> {
await notifeeModule.cancelNotification(NOTIFICATION_ID);
console.log('[AndroidVoiceService] Foreground service stopped');
} catch (e) {
console.error('[AndroidVoiceService] Failed to stop foreground service:', e);
console.warn('[AndroidVoiceService] Failed to stop foreground service:', e);
}
}
@ -178,7 +178,7 @@ export async function openBatteryOptimizationSettings(): Promise<void> {
// Try generic battery settings
await Linking.openSettings();
} catch (e) {
console.error('[AndroidVoiceService] Failed to open settings:', e);
console.warn('[AndroidVoiceService] Failed to open settings:', e);
}
}
@ -262,7 +262,7 @@ export async function requestNotificationPermission(): Promise<boolean> {
console.log('[AndroidVoiceService] Notification permission:', granted ? 'granted' : 'denied');
return granted;
} catch (e) {
console.error('[AndroidVoiceService] Failed to request notification permission:', e);
console.warn('[AndroidVoiceService] Failed to request notification permission:', e);
return false;
}
}