Compare commits
2 Commits
f2803ca5db
...
d6353c8533
| Author | SHA1 | Date | |
|---|---|---|---|
| d6353c8533 | |||
| 05f872d067 |
1
REVIEW_REPORT.md
Normal file
1
REVIEW_REPORT.md
Normal file
@ -0,0 +1 @@
|
|||||||
|
Limit reached · resets 1pm (America/Los_Angeles) · turn on /extra-usage
|
||||||
@ -1,6 +1,6 @@
|
|||||||
import { Tabs } from 'expo-router';
|
import { Tabs } from 'expo-router';
|
||||||
import React, { useCallback, useEffect, useRef } from 'react';
|
import React, { useCallback, useEffect, useRef } from 'react';
|
||||||
import { Platform, View, AppState, AppStateStatus } from 'react-native';
|
import { Platform, View, AppState, AppStateStatus, TouchableOpacity, StyleSheet } from 'react-native';
|
||||||
import { Feather } from '@expo/vector-icons';
|
import { Feather } from '@expo/vector-icons';
|
||||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||||
|
|
||||||
@ -29,6 +29,7 @@ export default function TabLayout() {
|
|||||||
interruptIfSpeaking,
|
interruptIfSpeaking,
|
||||||
setTranscript,
|
setTranscript,
|
||||||
setPartialTranscript,
|
setPartialTranscript,
|
||||||
|
partialTranscript, // for iOS auto-stop timer
|
||||||
sendTranscript,
|
sendTranscript,
|
||||||
} = useVoice();
|
} = useVoice();
|
||||||
|
|
||||||
@ -40,14 +41,13 @@ export default function TabLayout() {
|
|||||||
const pendingInterruptTranscriptRef = useRef<string | null>(null);
|
const pendingInterruptTranscriptRef = useRef<string | null>(null);
|
||||||
|
|
||||||
// Callback for voice detection - interrupt TTS when user speaks
|
// Callback for voice detection - interrupt TTS when user speaks
|
||||||
|
// NOTE: On Android, STT doesn't run during TTS (shared audio focus),
|
||||||
|
// so interruption on Android happens via FAB press instead.
|
||||||
|
// On iOS, STT can run alongside TTS, so voice detection works.
|
||||||
const handleVoiceDetected = useCallback(() => {
|
const handleVoiceDetected = useCallback(() => {
|
||||||
// Interrupt TTS when user starts speaking during 'speaking' state
|
if (Platform.OS === 'ios' && (status === 'speaking' || isSpeaking)) {
|
||||||
if (status === 'speaking' || isSpeaking) {
|
console.log('[TabLayout] Voice detected during TTS (iOS) - INTERRUPTING Julia');
|
||||||
console.log('[TabLayout] Voice detected during TTS playback - INTERRUPTING Julia');
|
interruptIfSpeaking();
|
||||||
const wasInterrupted = interruptIfSpeaking();
|
|
||||||
if (wasInterrupted) {
|
|
||||||
console.log('[TabLayout] TTS interrupted successfully, now listening to user');
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}, [status, isSpeaking, interruptIfSpeaking]);
|
}, [status, isSpeaking, interruptIfSpeaking]);
|
||||||
|
|
||||||
@ -63,21 +63,24 @@ export default function TabLayout() {
|
|||||||
|
|
||||||
// Callback for STT results
|
// Callback for STT results
|
||||||
const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => {
|
const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => {
|
||||||
|
// Ignore any STT results during TTS playback or processing (echo prevention)
|
||||||
|
if (status === 'speaking' || status === 'processing') {
|
||||||
if (isFinal) {
|
if (isFinal) {
|
||||||
// Check if we're still in speaking mode (user interrupted Julia)
|
// User interrupted Julia with speech — store to send after TTS stops
|
||||||
if (isSpeaking || status === 'speaking') {
|
console.log('[TabLayout] Got final result during TTS/processing - storing for after interruption:', transcript);
|
||||||
// Store the transcript to send after TTS fully stops
|
|
||||||
console.log('[TabLayout] Got final result while TTS playing - storing for after interruption:', transcript);
|
|
||||||
pendingInterruptTranscriptRef.current = transcript;
|
pendingInterruptTranscriptRef.current = transcript;
|
||||||
} else {
|
}
|
||||||
// Normal case: not speaking, send immediately
|
// Ignore partial transcripts during TTS (they're likely echo)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isFinal) {
|
||||||
setTranscript(transcript);
|
setTranscript(transcript);
|
||||||
sendTranscript(transcript);
|
sendTranscript(transcript);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
setPartialTranscript(transcript);
|
setPartialTranscript(transcript);
|
||||||
}
|
}
|
||||||
}, [setTranscript, setPartialTranscript, sendTranscript, isSpeaking, status]);
|
}, [setTranscript, setPartialTranscript, sendTranscript, status]);
|
||||||
|
|
||||||
// Speech recognition with voice detection callback
|
// Speech recognition with voice detection callback
|
||||||
const {
|
const {
|
||||||
@ -85,7 +88,7 @@ export default function TabLayout() {
|
|||||||
stopListening,
|
stopListening,
|
||||||
isListening: sttIsListening,
|
isListening: sttIsListening,
|
||||||
} = useSpeechRecognition({
|
} = useSpeechRecognition({
|
||||||
lang: 'ru-RU',
|
lang: 'en-US',
|
||||||
continuous: true,
|
continuous: true,
|
||||||
interimResults: true,
|
interimResults: true,
|
||||||
onVoiceDetected: handleVoiceDetected,
|
onVoiceDetected: handleVoiceDetected,
|
||||||
@ -93,6 +96,65 @@ export default function TabLayout() {
|
|||||||
onEnd: handleSTTEnd,
|
onEnd: handleSTTEnd,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Ref to prevent concurrent startListening calls
|
||||||
|
const sttStartingRef = useRef(false);
|
||||||
|
// Ref to track last partial transcript for iOS auto-stop
|
||||||
|
const lastPartialTextRef = useRef('');
|
||||||
|
const silenceTimerRef = useRef<NodeJS.Timeout | null>(null);
|
||||||
|
|
||||||
|
// iOS AUTO-STOP: Stop STT after 2 seconds of silence (no new partial transcripts)
|
||||||
|
// This triggers onEnd → iOS fix sends lastPartial as final
|
||||||
|
useEffect(() => {
|
||||||
|
// Clear existing timer
|
||||||
|
if (silenceTimerRef.current) {
|
||||||
|
clearTimeout(silenceTimerRef.current);
|
||||||
|
silenceTimerRef.current = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only track silence when STT is listening (not during processing/speaking)
|
||||||
|
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||||
|
// Get current partial from VoiceContext (set by handleSpeechResult)
|
||||||
|
const currentPartial = partialTranscript;
|
||||||
|
|
||||||
|
// If partial changed, update ref and set new 2s timer
|
||||||
|
if (currentPartial !== lastPartialTextRef.current) {
|
||||||
|
lastPartialTextRef.current = currentPartial;
|
||||||
|
|
||||||
|
// Start 2-second silence timer
|
||||||
|
silenceTimerRef.current = setTimeout(() => {
|
||||||
|
if (sttIsListening && sessionActiveRef.current) {
|
||||||
|
console.log('[TabLayout] 🍎 iOS AUTO-STOP: 2s silence - stopping STT to trigger onEnd → iOS fix');
|
||||||
|
stopListening();
|
||||||
|
}
|
||||||
|
}, 2000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
if (silenceTimerRef.current) {
|
||||||
|
clearTimeout(silenceTimerRef.current);
|
||||||
|
silenceTimerRef.current = null;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}, [sttIsListening, status, partialTranscript, stopListening]);
|
||||||
|
|
||||||
|
// Safe wrapper to start STT with debounce protection
|
||||||
|
const safeStartSTT = useCallback(() => {
|
||||||
|
if (sttIsListening || sttStartingRef.current) {
|
||||||
|
return; // Already listening or starting
|
||||||
|
}
|
||||||
|
// Don't start STT during TTS on Android - they share audio focus
|
||||||
|
if (Platform.OS === 'android' && (status === 'speaking' || isSpeaking)) {
|
||||||
|
console.log('[TabLayout] Skipping STT start - TTS is playing (Android audio focus)');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
sttStartingRef.current = true;
|
||||||
|
console.log('[TabLayout] Starting STT...');
|
||||||
|
startListening().finally(() => {
|
||||||
|
sttStartingRef.current = false;
|
||||||
|
});
|
||||||
|
}, [sttIsListening, status, isSpeaking, startListening]);
|
||||||
|
|
||||||
// Update session active ref when isListening changes
|
// Update session active ref when isListening changes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
sessionActiveRef.current = isListening;
|
sessionActiveRef.current = isListening;
|
||||||
@ -104,42 +166,32 @@ export default function TabLayout() {
|
|||||||
// Start/stop STT when voice session starts/stops
|
// Start/stop STT when voice session starts/stops
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (isListening) {
|
if (isListening) {
|
||||||
console.log('[TabLayout] Starting STT for voice session');
|
console.log('[TabLayout] Voice session started - starting STT');
|
||||||
startListening();
|
safeStartSTT();
|
||||||
} else {
|
} else {
|
||||||
console.log('[TabLayout] Stopping STT - session ended');
|
console.log('[TabLayout] Voice session ended - stopping STT');
|
||||||
stopListening();
|
stopListening();
|
||||||
}
|
}
|
||||||
}, [isListening, startListening, stopListening]);
|
}, [isListening]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
// Restart STT if it ended while session is still active
|
|
||||||
// This ensures continuous listening even during/after TTS playback
|
|
||||||
useEffect(() => {
|
|
||||||
if (shouldRestartSTTRef.current && sessionActiveRef.current && !sttIsListening) {
|
|
||||||
console.log('[TabLayout] Restarting STT - session still active');
|
|
||||||
shouldRestartSTTRef.current = false;
|
|
||||||
// Small delay to ensure clean restart
|
|
||||||
const timer = setTimeout(() => {
|
|
||||||
if (sessionActiveRef.current) {
|
|
||||||
startListening();
|
|
||||||
}
|
|
||||||
}, 100);
|
|
||||||
return () => clearTimeout(timer);
|
|
||||||
}
|
|
||||||
}, [sttIsListening, startListening]);
|
|
||||||
|
|
||||||
// Track previous status to detect transition from speaking to listening
|
// Track previous status to detect transition from speaking to listening
|
||||||
const prevStatusRef = useRef<typeof status>('idle');
|
const prevStatusRef = useRef<typeof status>('idle');
|
||||||
|
|
||||||
// Auto-restart STT when TTS finishes (status changes from 'speaking' to 'listening')
|
// Stop STT when entering processing or speaking state (prevent echo)
|
||||||
// Also process any pending transcript from user interruption
|
// Restart STT when TTS finishes (speaking → listening)
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const prevStatus = prevStatusRef.current;
|
const prevStatus = prevStatusRef.current;
|
||||||
prevStatusRef.current = status;
|
prevStatusRef.current = status;
|
||||||
|
|
||||||
// When transitioning from speaking to listening, handle pending interrupt transcript
|
// Stop STT when processing starts or TTS starts (prevent Julia hearing herself)
|
||||||
|
if ((status === 'processing' || status === 'speaking') && sttIsListening) {
|
||||||
|
console.log('[TabLayout] Stopping STT during', status, '(echo prevention)');
|
||||||
|
stopListening();
|
||||||
|
}
|
||||||
|
|
||||||
|
// When TTS finishes (speaking → listening), restart STT
|
||||||
if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) {
|
if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) {
|
||||||
console.log('[TabLayout] TTS finished/interrupted - checking for pending transcript');
|
console.log('[TabLayout] TTS finished - restarting STT');
|
||||||
|
|
||||||
// Process pending transcript from interruption if any
|
// Process pending transcript from interruption if any
|
||||||
const pendingTranscript = pendingInterruptTranscriptRef.current;
|
const pendingTranscript = pendingInterruptTranscriptRef.current;
|
||||||
@ -150,76 +202,74 @@ export default function TabLayout() {
|
|||||||
sendTranscript(pendingTranscript);
|
sendTranscript(pendingTranscript);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Small delay to ensure TTS cleanup is complete, then restart STT
|
// Delay to let TTS fully release audio focus, then restart STT
|
||||||
const timer = setTimeout(() => {
|
const timer = setTimeout(() => {
|
||||||
if (sessionActiveRef.current && !sttIsListening) {
|
if (sessionActiveRef.current) {
|
||||||
startListening();
|
safeStartSTT();
|
||||||
}
|
}
|
||||||
}, 200);
|
}, 800); // 800ms to ensure TTS audio fully fades
|
||||||
return () => clearTimeout(timer);
|
return () => clearTimeout(timer);
|
||||||
}
|
}
|
||||||
}, [status, sttIsListening, startListening, setTranscript, sendTranscript]);
|
|
||||||
|
|
||||||
// ============================================================================
|
// When processing finishes and goes to speaking, STT is already stopped (above)
|
||||||
// TAB NAVIGATION PERSISTENCE
|
// When speaking finishes and goes to listening, STT restarts (above)
|
||||||
// Ensure voice session continues when user switches between tabs.
|
}, [status]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
// The session state is in VoiceContext (root level), but STT may stop due to:
|
|
||||||
// 1. Native audio session changes
|
|
||||||
// 2. Tab unmount/remount (though tabs layout doesn't unmount)
|
|
||||||
// 3. AppState changes (background/foreground)
|
|
||||||
// ============================================================================
|
|
||||||
|
|
||||||
// Monitor and recover STT state during tab navigation
|
// When STT ends unexpectedly during active session, restart it (but not during TTS)
|
||||||
// If session is active but STT stopped unexpectedly, restart it
|
|
||||||
// IMPORTANT: STT should run DURING TTS playback to detect user interruption!
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Check every 500ms if STT needs to be restarted
|
|
||||||
const intervalId = setInterval(() => {
|
|
||||||
// Only act if session should be active (isListening from VoiceContext)
|
|
||||||
// but STT is not actually listening
|
|
||||||
// Note: We DO want STT running during 'speaking' to detect interruption!
|
|
||||||
// Only skip during 'processing' (API call in progress)
|
|
||||||
if (
|
if (
|
||||||
|
shouldRestartSTTRef.current &&
|
||||||
sessionActiveRef.current &&
|
sessionActiveRef.current &&
|
||||||
!sttIsListening &&
|
!sttIsListening &&
|
||||||
status !== 'processing'
|
status !== 'processing' &&
|
||||||
|
status !== 'speaking'
|
||||||
) {
|
) {
|
||||||
console.log('[TabLayout] STT watchdog: restarting STT (session active but STT stopped, status:', status, ')');
|
shouldRestartSTTRef.current = false;
|
||||||
startListening();
|
console.log('[TabLayout] STT ended unexpectedly - restarting');
|
||||||
|
const timer = setTimeout(() => {
|
||||||
|
if (sessionActiveRef.current) {
|
||||||
|
safeStartSTT();
|
||||||
}
|
}
|
||||||
}, 500);
|
}, 300);
|
||||||
|
return () => clearTimeout(timer);
|
||||||
return () => clearInterval(intervalId);
|
}
|
||||||
}, [sttIsListening, status, startListening]);
|
}, [sttIsListening]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
// Handle app state changes (background/foreground)
|
// Handle app state changes (background/foreground)
|
||||||
// When app comes back to foreground, restart STT if session was active
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const handleAppStateChange = (nextAppState: AppStateStatus) => {
|
const handleAppStateChange = (nextAppState: AppStateStatus) => {
|
||||||
if (nextAppState === 'active' && sessionActiveRef.current) {
|
if (nextAppState === 'active' && sessionActiveRef.current) {
|
||||||
// App came to foreground, give it a moment then check STT
|
|
||||||
// STT should run even during 'speaking' to detect user interruption
|
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
if (sessionActiveRef.current && !sttIsListening && status !== 'processing') {
|
if (sessionActiveRef.current && !sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||||
console.log('[TabLayout] App foregrounded - restarting STT');
|
console.log('[TabLayout] App foregrounded - restarting STT');
|
||||||
startListening();
|
safeStartSTT();
|
||||||
}
|
}
|
||||||
}, 300);
|
}, 500);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const subscription = AppState.addEventListener('change', handleAppStateChange);
|
const subscription = AppState.addEventListener('change', handleAppStateChange);
|
||||||
return () => subscription.remove();
|
return () => subscription.remove();
|
||||||
}, [sttIsListening, status, startListening]);
|
}, [sttIsListening, status, safeStartSTT]);
|
||||||
|
|
||||||
// Handle voice FAB press - toggle listening mode
|
// Handle voice FAB press - toggle listening mode
|
||||||
|
// Must check ALL active states (listening, processing, speaking), not just isListening
|
||||||
const handleVoiceFABPress = useCallback(() => {
|
const handleVoiceFABPress = useCallback(() => {
|
||||||
if (isListening) {
|
const isSessionActive = isListening || status === 'speaking' || status === 'processing';
|
||||||
|
console.log('[TabLayout] FAB pressed, isSessionActive:', isSessionActive, 'status:', status, 'isListening:', isListening);
|
||||||
|
|
||||||
|
if (isSessionActive) {
|
||||||
|
// Force-stop everything: STT, TTS, and session state
|
||||||
|
console.log('[TabLayout] Force-stopping everything');
|
||||||
|
stopListening();
|
||||||
stopSession();
|
stopSession();
|
||||||
|
sessionActiveRef.current = false;
|
||||||
|
shouldRestartSTTRef.current = false;
|
||||||
|
pendingInterruptTranscriptRef.current = null;
|
||||||
} else {
|
} else {
|
||||||
startSession();
|
startSession();
|
||||||
}
|
}
|
||||||
}, [isListening, startSession, stopSession]);
|
}, [isListening, status, startSession, stopSession, stopListening]);
|
||||||
|
|
||||||
// Calculate tab bar height based on safe area
|
// Calculate tab bar height based on safe area
|
||||||
// On iOS with home indicator, insets.bottom is ~34px
|
// On iOS with home indicator, insets.bottom is ~34px
|
||||||
@ -278,6 +328,28 @@ export default function TabLayout() {
|
|||||||
),
|
),
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
|
{/* Voice FAB - center tab button */}
|
||||||
|
<Tabs.Screen
|
||||||
|
name="explore"
|
||||||
|
options={{
|
||||||
|
title: '',
|
||||||
|
tabBarButton: () => (
|
||||||
|
<View style={tabFABStyles.fabWrapper}>
|
||||||
|
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening || status === 'speaking' || status === 'processing'} />
|
||||||
|
</View>
|
||||||
|
),
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
{/* Voice Debug - visible tab */}
|
||||||
|
<Tabs.Screen
|
||||||
|
name="voice-debug"
|
||||||
|
options={{
|
||||||
|
title: 'Debug',
|
||||||
|
tabBarIcon: ({ color, size }) => (
|
||||||
|
<Feather name="activity" size={22} color={color} />
|
||||||
|
),
|
||||||
|
}}
|
||||||
|
/>
|
||||||
<Tabs.Screen
|
<Tabs.Screen
|
||||||
name="profile"
|
name="profile"
|
||||||
options={{
|
options={{
|
||||||
@ -287,13 +359,6 @@ export default function TabLayout() {
|
|||||||
),
|
),
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
{/* Hide explore tab */}
|
|
||||||
<Tabs.Screen
|
|
||||||
name="explore"
|
|
||||||
options={{
|
|
||||||
href: null,
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
{/* Audio Debug - hidden */}
|
{/* Audio Debug - hidden */}
|
||||||
<Tabs.Screen
|
<Tabs.Screen
|
||||||
name="audio-debug"
|
name="audio-debug"
|
||||||
@ -309,9 +374,15 @@ export default function TabLayout() {
|
|||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
{/* Voice FAB - toggle listening mode */}
|
|
||||||
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening} />
|
|
||||||
</View>
|
</View>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const tabFABStyles = StyleSheet.create({
|
||||||
|
fabWrapper: {
|
||||||
|
flex: 1,
|
||||||
|
alignItems: 'center',
|
||||||
|
justifyContent: 'center',
|
||||||
|
top: -20,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|||||||
@ -40,7 +40,7 @@ export default function BeneficiaryDashboardScreen() {
|
|||||||
setUserId(uid);
|
setUserId(uid);
|
||||||
console.log('Loaded credentials for WebView:', { hasToken: !!token, user, uid });
|
console.log('Loaded credentials for WebView:', { hasToken: !!token, user, uid });
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to load credentials:', err);
|
console.warn('Failed to load credentials:', err);
|
||||||
} finally {
|
} finally {
|
||||||
setIsTokenLoaded(true);
|
setIsTokenLoaded(true);
|
||||||
}
|
}
|
||||||
@ -80,7 +80,7 @@ export default function BeneficiaryDashboardScreen() {
|
|||||||
console.log('MobileAppLogin timeout - function not found');
|
console.log('MobileAppLogin timeout - function not found');
|
||||||
}, 5000);
|
}, 5000);
|
||||||
} catch(e) {
|
} catch(e) {
|
||||||
console.error('Failed to call MobileAppLogin:', e);
|
console.warn('Failed to call MobileAppLogin:', e);
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
true;
|
true;
|
||||||
|
|||||||
@ -25,6 +25,8 @@ import { useRouter, useFocusEffect } from 'expo-router';
|
|||||||
import { api } from '@/services/api';
|
import { api } from '@/services/api';
|
||||||
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
||||||
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
|
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
|
||||||
|
import { useVoice } from '@/contexts/VoiceContext';
|
||||||
|
import { useChat } from '@/contexts/ChatContext';
|
||||||
import { useTextToSpeech } from '@/hooks/useTextToSpeech';
|
import { useTextToSpeech } from '@/hooks/useTextToSpeech';
|
||||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||||
import type { Message, Beneficiary } from '@/types';
|
import type { Message, Beneficiary } from '@/types';
|
||||||
@ -113,6 +115,9 @@ export default function ChatScreen() {
|
|||||||
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
||||||
const { transcript, hasNewTranscript, markTranscriptAsShown, getTranscriptAsMessages } = useVoiceTranscript();
|
const { transcript, hasNewTranscript, markTranscriptAsShown, getTranscriptAsMessages } = useVoiceTranscript();
|
||||||
|
|
||||||
|
// Voice context for real-time transcript display and checking if voice session is active
|
||||||
|
const { partialTranscript: voicePartial, isListening: voiceIsListening, status: voiceStatus, isActive: voiceIsActive } = useVoice();
|
||||||
|
|
||||||
// TTS for reading Julia's responses aloud
|
// TTS for reading Julia's responses aloud
|
||||||
const { speak, stop: stopTTS, isSpeaking } = useTextToSpeech({
|
const { speak, stop: stopTTS, isSpeaking } = useTextToSpeech({
|
||||||
language: 'ru-RU',
|
language: 'ru-RU',
|
||||||
@ -131,8 +136,8 @@ export default function ChatScreen() {
|
|||||||
const [customDeploymentId, setCustomDeploymentId] = useState<string | null>(null);
|
const [customDeploymentId, setCustomDeploymentId] = useState<string | null>(null);
|
||||||
const [deploymentName, setDeploymentName] = useState<string | null>(null);
|
const [deploymentName, setDeploymentName] = useState<string | null>(null);
|
||||||
|
|
||||||
// Chat state - initialized after deployment ID is loaded
|
// Chat state - stored in context to persist across tab navigation
|
||||||
const [messages, setMessages] = useState<Message[]>([createInitialMessage(null)]);
|
const { messages, setMessages } = useChat();
|
||||||
const [sortNewestFirst, setSortNewestFirst] = useState(false);
|
const [sortNewestFirst, setSortNewestFirst] = useState(false);
|
||||||
|
|
||||||
const [input, setInput] = useState('');
|
const [input, setInput] = useState('');
|
||||||
@ -145,6 +150,23 @@ export default function ChatScreen() {
|
|||||||
inputRef.current = input;
|
inputRef.current = input;
|
||||||
}, [input]);
|
}, [input]);
|
||||||
|
|
||||||
|
// Show partial voice transcript in input field in real-time
|
||||||
|
useEffect(() => {
|
||||||
|
if (voiceIsListening && voicePartial) {
|
||||||
|
setInput(voicePartial);
|
||||||
|
}
|
||||||
|
}, [voicePartial, voiceIsListening]);
|
||||||
|
|
||||||
|
// Clear input when voice switches to processing (transcript was sent)
|
||||||
|
const prevVoiceStatusRef = useRef(voiceStatus);
|
||||||
|
useEffect(() => {
|
||||||
|
const prev = prevVoiceStatusRef.current;
|
||||||
|
prevVoiceStatusRef.current = voiceStatus;
|
||||||
|
if (prev === 'listening' && voiceStatus === 'processing') {
|
||||||
|
setInput('');
|
||||||
|
}
|
||||||
|
}, [voiceStatus]);
|
||||||
|
|
||||||
// Beneficiary picker
|
// Beneficiary picker
|
||||||
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
|
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
|
||||||
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
|
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
|
||||||
@ -235,7 +257,7 @@ export default function ChatScreen() {
|
|||||||
}
|
}
|
||||||
return [];
|
return [];
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Failed to load beneficiaries:', error);
|
console.warn('Failed to load beneficiaries:', error);
|
||||||
return [];
|
return [];
|
||||||
} finally {
|
} finally {
|
||||||
setLoadingBeneficiaries(false);
|
setLoadingBeneficiaries(false);
|
||||||
@ -392,8 +414,11 @@ export default function ChatScreen() {
|
|||||||
};
|
};
|
||||||
setMessages(prev => [...prev, assistantMessage]);
|
setMessages(prev => [...prev, assistantMessage]);
|
||||||
|
|
||||||
// Speak the response using TTS
|
// Only speak the response if voice session is active (FAB pressed)
|
||||||
|
// Don't auto-speak for text-only chat messages
|
||||||
|
if (voiceIsActive) {
|
||||||
speak(responseText);
|
speak(responseText);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Token might be expired, clear and retry once
|
// Token might be expired, clear and retry once
|
||||||
if (data.status === '401 Unauthorized') {
|
if (data.status === '401 Unauthorized') {
|
||||||
@ -413,7 +438,7 @@ export default function ChatScreen() {
|
|||||||
} finally {
|
} finally {
|
||||||
setIsSending(false);
|
setIsSending(false);
|
||||||
}
|
}
|
||||||
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak]);
|
}, [isSending, getWellNuoToken, customDeploymentId, currentBeneficiary, beneficiaries, speak, voiceIsActive]);
|
||||||
|
|
||||||
// Render message bubble
|
// Render message bubble
|
||||||
const renderMessage = ({ item }: { item: Message }) => {
|
const renderMessage = ({ item }: { item: Message }) => {
|
||||||
@ -610,9 +635,9 @@ export default function ChatScreen() {
|
|||||||
{/* Input */}
|
{/* Input */}
|
||||||
<View style={styles.inputContainer}>
|
<View style={styles.inputContainer}>
|
||||||
<TextInput
|
<TextInput
|
||||||
style={styles.input}
|
style={[styles.input, voiceIsListening && styles.inputListening]}
|
||||||
placeholder="Type a message..."
|
placeholder={voiceIsListening ? "Listening..." : "Type a message..."}
|
||||||
placeholderTextColor={AppColors.textMuted}
|
placeholderTextColor={voiceIsListening ? AppColors.error : AppColors.textMuted}
|
||||||
value={input}
|
value={input}
|
||||||
onChangeText={setInput}
|
onChangeText={setInput}
|
||||||
multiline
|
multiline
|
||||||
@ -774,6 +799,10 @@ const styles = StyleSheet.create({
|
|||||||
maxHeight: 100,
|
maxHeight: 100,
|
||||||
marginRight: Spacing.sm,
|
marginRight: Spacing.sm,
|
||||||
},
|
},
|
||||||
|
inputListening: {
|
||||||
|
borderWidth: 1.5,
|
||||||
|
borderColor: AppColors.error,
|
||||||
|
},
|
||||||
sendButton: {
|
sendButton: {
|
||||||
width: 44,
|
width: 44,
|
||||||
height: 44,
|
height: 44,
|
||||||
|
|||||||
@ -51,7 +51,7 @@ export default function HomeScreen() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to load credentials:', err);
|
console.warn('Failed to load credentials:', err);
|
||||||
} finally {
|
} finally {
|
||||||
setIsTokenLoaded(true);
|
setIsTokenLoaded(true);
|
||||||
}
|
}
|
||||||
@ -126,11 +126,11 @@ export default function HomeScreen() {
|
|||||||
webViewRef.current?.injectJavaScript(injectScript);
|
webViewRef.current?.injectJavaScript(injectScript);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
console.error('Token refresh failed');
|
console.warn('Token refresh failed');
|
||||||
setError('Session expired. Please restart the app.');
|
setError('Session expired. Please restart the app.');
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Error refreshing token:', err);
|
console.warn('Error refreshing token:', err);
|
||||||
} finally {
|
} finally {
|
||||||
setIsRefreshingToken(false);
|
setIsRefreshingToken(false);
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ export default function HomeScreen() {
|
|||||||
observer.observe(document.body, { childList: true, subtree: true });
|
observer.observe(document.body, { childList: true, subtree: true });
|
||||||
}
|
}
|
||||||
} catch(e) {
|
} catch(e) {
|
||||||
console.error('Failed to inject token:', e);
|
console.warn('Failed to inject token:', e);
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
true;
|
true;
|
||||||
|
|||||||
@ -15,6 +15,7 @@ import { router } from 'expo-router';
|
|||||||
import { Ionicons } from '@expo/vector-icons';
|
import { Ionicons } from '@expo/vector-icons';
|
||||||
import { SafeAreaView } from 'react-native-safe-area-context';
|
import { SafeAreaView } from 'react-native-safe-area-context';
|
||||||
import { useAuth } from '@/contexts/AuthContext';
|
import { useAuth } from '@/contexts/AuthContext';
|
||||||
|
import { useVoice } from '@/contexts/VoiceContext';
|
||||||
import { api } from '@/services/api';
|
import { api } from '@/services/api';
|
||||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||||
|
|
||||||
@ -55,6 +56,7 @@ function MenuItem({
|
|||||||
|
|
||||||
export default function ProfileScreen() {
|
export default function ProfileScreen() {
|
||||||
const { user, logout } = useAuth();
|
const { user, logout } = useAuth();
|
||||||
|
const { updateVoiceApiType } = useVoice();
|
||||||
const [deploymentId, setDeploymentId] = useState<string>('');
|
const [deploymentId, setDeploymentId] = useState<string>('');
|
||||||
const [deploymentName, setDeploymentName] = useState<string>('');
|
const [deploymentName, setDeploymentName] = useState<string>('');
|
||||||
const [showDeploymentModal, setShowDeploymentModal] = useState(false);
|
const [showDeploymentModal, setShowDeploymentModal] = useState(false);
|
||||||
@ -62,6 +64,11 @@ export default function ProfileScreen() {
|
|||||||
const [isValidating, setIsValidating] = useState(false);
|
const [isValidating, setIsValidating] = useState(false);
|
||||||
const [validationError, setValidationError] = useState<string | null>(null);
|
const [validationError, setValidationError] = useState<string | null>(null);
|
||||||
|
|
||||||
|
// Voice API Type state
|
||||||
|
const [voiceApiType, setVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
|
||||||
|
const [showVoiceApiModal, setShowVoiceApiModal] = useState(false);
|
||||||
|
const [tempVoiceApiType, setTempVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
|
||||||
|
|
||||||
// Load saved deployment ID or auto-populate from first available
|
// Load saved deployment ID or auto-populate from first available
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const loadDeploymentId = async () => {
|
const loadDeploymentId = async () => {
|
||||||
@ -88,12 +95,26 @@ export default function ProfileScreen() {
|
|||||||
loadDeploymentId();
|
loadDeploymentId();
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
|
// Load saved Voice API type
|
||||||
|
useEffect(() => {
|
||||||
|
const loadVoiceApiType = async () => {
|
||||||
|
const saved = await api.getVoiceApiType();
|
||||||
|
setVoiceApiType(saved);
|
||||||
|
};
|
||||||
|
loadVoiceApiType();
|
||||||
|
}, []);
|
||||||
|
|
||||||
const openDeploymentModal = useCallback(() => {
|
const openDeploymentModal = useCallback(() => {
|
||||||
setTempDeploymentId(deploymentId);
|
setTempDeploymentId(deploymentId);
|
||||||
setValidationError(null);
|
setValidationError(null);
|
||||||
setShowDeploymentModal(true);
|
setShowDeploymentModal(true);
|
||||||
}, [deploymentId]);
|
}, [deploymentId]);
|
||||||
|
|
||||||
|
const openVoiceApiModal = useCallback(() => {
|
||||||
|
setTempVoiceApiType(voiceApiType);
|
||||||
|
setShowVoiceApiModal(true);
|
||||||
|
}, [voiceApiType]);
|
||||||
|
|
||||||
const saveDeploymentId = useCallback(async () => {
|
const saveDeploymentId = useCallback(async () => {
|
||||||
const trimmed = tempDeploymentId.trim();
|
const trimmed = tempDeploymentId.trim();
|
||||||
setValidationError(null);
|
setValidationError(null);
|
||||||
@ -128,6 +149,13 @@ export default function ProfileScreen() {
|
|||||||
}
|
}
|
||||||
}, [tempDeploymentId]);
|
}, [tempDeploymentId]);
|
||||||
|
|
||||||
|
const saveVoiceApiType = useCallback(async () => {
|
||||||
|
await api.setVoiceApiType(tempVoiceApiType);
|
||||||
|
setVoiceApiType(tempVoiceApiType);
|
||||||
|
updateVoiceApiType(tempVoiceApiType);
|
||||||
|
setShowVoiceApiModal(false);
|
||||||
|
}, [tempVoiceApiType, updateVoiceApiType]);
|
||||||
|
|
||||||
const openTerms = () => {
|
const openTerms = () => {
|
||||||
router.push('/terms');
|
router.push('/terms');
|
||||||
};
|
};
|
||||||
@ -185,6 +213,15 @@ export default function ProfileScreen() {
|
|||||||
subtitle={deploymentId ? (deploymentName || `ID: ${deploymentId}`) : 'Auto'}
|
subtitle={deploymentId ? (deploymentName || `ID: ${deploymentId}`) : 'Auto'}
|
||||||
onPress={openDeploymentModal}
|
onPress={openDeploymentModal}
|
||||||
/>
|
/>
|
||||||
|
<View style={styles.menuDivider} />
|
||||||
|
<MenuItem
|
||||||
|
icon="radio-outline"
|
||||||
|
iconColor="#9333EA"
|
||||||
|
iconBgColor="#F3E8FF"
|
||||||
|
title="Voice API"
|
||||||
|
subtitle={voiceApiType === 'voice_ask' ? 'voice_ask' : 'ask_wellnuo_ai (LLaMA)'}
|
||||||
|
onPress={openVoiceApiModal}
|
||||||
|
/>
|
||||||
</View>
|
</View>
|
||||||
</View>
|
</View>
|
||||||
|
|
||||||
@ -271,6 +308,65 @@ export default function ProfileScreen() {
|
|||||||
</View>
|
</View>
|
||||||
</KeyboardAvoidingView>
|
</KeyboardAvoidingView>
|
||||||
</Modal>
|
</Modal>
|
||||||
|
|
||||||
|
{/* Voice API Modal */}
|
||||||
|
<Modal
|
||||||
|
visible={showVoiceApiModal}
|
||||||
|
transparent
|
||||||
|
animationType="fade"
|
||||||
|
onRequestClose={() => setShowVoiceApiModal(false)}
|
||||||
|
>
|
||||||
|
<View style={styles.modalOverlay}>
|
||||||
|
<View style={styles.modalContent}>
|
||||||
|
<Text style={styles.modalTitle}>Voice API</Text>
|
||||||
|
<Text style={styles.modalDescription}>
|
||||||
|
Choose which API function to use for voice requests.
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
{/* Radio buttons */}
|
||||||
|
<TouchableOpacity
|
||||||
|
style={styles.radioOption}
|
||||||
|
onPress={() => setTempVoiceApiType('ask_wellnuo_ai')}
|
||||||
|
>
|
||||||
|
<View style={styles.radioCircle}>
|
||||||
|
{tempVoiceApiType === 'ask_wellnuo_ai' && <View style={styles.radioCircleSelected} />}
|
||||||
|
</View>
|
||||||
|
<View style={styles.radioTextContainer}>
|
||||||
|
<Text style={styles.radioLabel}>ask_wellnuo_ai</Text>
|
||||||
|
<Text style={styles.radioDescription}>LLaMA with WellNuo data</Text>
|
||||||
|
</View>
|
||||||
|
</TouchableOpacity>
|
||||||
|
|
||||||
|
<TouchableOpacity
|
||||||
|
style={styles.radioOption}
|
||||||
|
onPress={() => setTempVoiceApiType('voice_ask')}
|
||||||
|
>
|
||||||
|
<View style={styles.radioCircle}>
|
||||||
|
{tempVoiceApiType === 'voice_ask' && <View style={styles.radioCircleSelected} />}
|
||||||
|
</View>
|
||||||
|
<View style={styles.radioTextContainer}>
|
||||||
|
<Text style={styles.radioLabel}>voice_ask</Text>
|
||||||
|
<Text style={styles.radioDescription}>Alternative voice API</Text>
|
||||||
|
</View>
|
||||||
|
</TouchableOpacity>
|
||||||
|
|
||||||
|
<View style={styles.modalButtons}>
|
||||||
|
<TouchableOpacity
|
||||||
|
style={styles.modalButtonCancel}
|
||||||
|
onPress={() => setShowVoiceApiModal(false)}
|
||||||
|
>
|
||||||
|
<Text style={styles.modalButtonCancelText}>Cancel</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
<TouchableOpacity
|
||||||
|
style={styles.modalButtonSave}
|
||||||
|
onPress={saveVoiceApiType}
|
||||||
|
>
|
||||||
|
<Text style={styles.modalButtonSaveText}>Save</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
</View>
|
||||||
|
</View>
|
||||||
|
</View>
|
||||||
|
</Modal>
|
||||||
</SafeAreaView>
|
</SafeAreaView>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -472,4 +568,40 @@ const styles = StyleSheet.create({
|
|||||||
disabledText: {
|
disabledText: {
|
||||||
opacity: 0.5,
|
opacity: 0.5,
|
||||||
},
|
},
|
||||||
|
// Radio button styles
|
||||||
|
radioOption: {
|
||||||
|
flexDirection: 'row',
|
||||||
|
alignItems: 'center',
|
||||||
|
paddingVertical: Spacing.sm + 4,
|
||||||
|
marginBottom: Spacing.xs,
|
||||||
|
},
|
||||||
|
radioCircle: {
|
||||||
|
width: 24,
|
||||||
|
height: 24,
|
||||||
|
borderRadius: 12,
|
||||||
|
borderWidth: 2,
|
||||||
|
borderColor: AppColors.primary,
|
||||||
|
alignItems: 'center',
|
||||||
|
justifyContent: 'center',
|
||||||
|
marginRight: Spacing.md,
|
||||||
|
},
|
||||||
|
radioCircleSelected: {
|
||||||
|
width: 12,
|
||||||
|
height: 12,
|
||||||
|
borderRadius: 6,
|
||||||
|
backgroundColor: AppColors.primary,
|
||||||
|
},
|
||||||
|
radioTextContainer: {
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
radioLabel: {
|
||||||
|
fontSize: FontSizes.base,
|
||||||
|
fontWeight: '500',
|
||||||
|
color: AppColors.textPrimary,
|
||||||
|
marginBottom: 2,
|
||||||
|
},
|
||||||
|
radioDescription: {
|
||||||
|
fontSize: FontSizes.xs,
|
||||||
|
color: AppColors.textSecondary,
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|||||||
504
app/(tabs)/voice-debug.tsx
Normal file
504
app/(tabs)/voice-debug.tsx
Normal file
@ -0,0 +1,504 @@
|
|||||||
|
/**
|
||||||
|
* Voice Debug Screen
|
||||||
|
*
|
||||||
|
* Real-time debugging interface for voice recognition pipeline.
|
||||||
|
* Shows all events, timers, API calls, and state changes.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import React, { useState, useEffect, useRef, useCallback } from 'react';
|
||||||
|
import {
|
||||||
|
View,
|
||||||
|
Text,
|
||||||
|
ScrollView,
|
||||||
|
StyleSheet,
|
||||||
|
TouchableOpacity,
|
||||||
|
} from 'react-native';
|
||||||
|
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||||
|
import { Feather } from '@expo/vector-icons';
|
||||||
|
|
||||||
|
import { useVoice } from '@/contexts/VoiceContext';
|
||||||
|
import { useSpeechRecognition } from '@/hooks/useSpeechRecognition';
|
||||||
|
import { AppColors } from '@/constants/theme';
|
||||||
|
import { useColorScheme } from '@/hooks/use-color-scheme';
|
||||||
|
|
||||||
|
interface LogEntry {
|
||||||
|
id: string;
|
||||||
|
timestamp: number;
|
||||||
|
category: 'stt' | 'api' | 'tts' | 'timer' | 'system';
|
||||||
|
message: string;
|
||||||
|
level: 'info' | 'warning' | 'error' | 'success';
|
||||||
|
data?: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function VoiceDebugScreen() {
|
||||||
|
const colorScheme = useColorScheme();
|
||||||
|
const isDark = colorScheme === 'dark';
|
||||||
|
const insets = useSafeAreaInsets();
|
||||||
|
|
||||||
|
const {
|
||||||
|
isListening,
|
||||||
|
isSpeaking,
|
||||||
|
status,
|
||||||
|
startSession,
|
||||||
|
stopSession,
|
||||||
|
} = useVoice();
|
||||||
|
|
||||||
|
const {
|
||||||
|
isListening: sttIsListening,
|
||||||
|
partialTranscript,
|
||||||
|
recognizedText,
|
||||||
|
} = useSpeechRecognition({
|
||||||
|
lang: 'en-US',
|
||||||
|
continuous: true,
|
||||||
|
interimResults: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
const [logs, setLogs] = useState<LogEntry[]>([]);
|
||||||
|
const [silenceTimer, setSilenceTimer] = useState(0);
|
||||||
|
const scrollViewRef = useRef<ScrollView>(null);
|
||||||
|
const logIdCounter = useRef(0);
|
||||||
|
const lastPartialRef = useRef('');
|
||||||
|
|
||||||
|
// Add log entry
|
||||||
|
const addLog = useCallback((
|
||||||
|
category: LogEntry['category'],
|
||||||
|
message: string,
|
||||||
|
level: LogEntry['level'] = 'info',
|
||||||
|
data?: any
|
||||||
|
) => {
|
||||||
|
const entry: LogEntry = {
|
||||||
|
id: `log-${logIdCounter.current++}`,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
category,
|
||||||
|
message,
|
||||||
|
level,
|
||||||
|
data,
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log(`[VoiceDebug:${category}]`, message, data || '');
|
||||||
|
|
||||||
|
setLogs(prev => {
|
||||||
|
const updated = [...prev, entry];
|
||||||
|
// Keep only last 100 logs
|
||||||
|
return updated.slice(-100);
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(() => {
|
||||||
|
scrollViewRef.current?.scrollToEnd({ animated: true });
|
||||||
|
}, 50);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Clear logs
|
||||||
|
const clearLogs = useCallback(() => {
|
||||||
|
setLogs([]);
|
||||||
|
logIdCounter.current = 0;
|
||||||
|
addLog('system', 'Logs cleared', 'info');
|
||||||
|
}, [addLog]);
|
||||||
|
|
||||||
|
// Monitor voice session state
|
||||||
|
useEffect(() => {
|
||||||
|
if (isListening) {
|
||||||
|
addLog('system', '🎤 Voice session STARTED', 'success');
|
||||||
|
} else {
|
||||||
|
addLog('system', '⏹️ Voice session STOPPED', 'info');
|
||||||
|
setSilenceTimer(0);
|
||||||
|
}
|
||||||
|
}, [isListening, addLog]);
|
||||||
|
|
||||||
|
// Monitor STT state
|
||||||
|
useEffect(() => {
|
||||||
|
if (sttIsListening) {
|
||||||
|
addLog('stt', '▶️ STT listening started', 'success');
|
||||||
|
} else if (isListening) {
|
||||||
|
addLog('stt', '⏸️ STT stopped (but session active)', 'warning');
|
||||||
|
}
|
||||||
|
}, [sttIsListening, isListening, addLog]);
|
||||||
|
|
||||||
|
// Monitor status changes
|
||||||
|
useEffect(() => {
|
||||||
|
if (status === 'processing') {
|
||||||
|
addLog('api', '⚙️ Processing transcript → sending to API', 'info');
|
||||||
|
} else if (status === 'speaking') {
|
||||||
|
addLog('tts', '🔊 TTS playing (Julia speaking)', 'info');
|
||||||
|
} else if (status === 'listening') {
|
||||||
|
addLog('system', '👂 Ready to listen', 'info');
|
||||||
|
}
|
||||||
|
}, [status, addLog]);
|
||||||
|
|
||||||
|
// Monitor partial transcripts
|
||||||
|
useEffect(() => {
|
||||||
|
if (partialTranscript && partialTranscript !== lastPartialRef.current) {
|
||||||
|
lastPartialRef.current = partialTranscript;
|
||||||
|
addLog('stt', `📝 Partial: "${partialTranscript.slice(0, 40)}${partialTranscript.length > 40 ? '...' : ''}"`, 'info');
|
||||||
|
|
||||||
|
// Reset silence timer
|
||||||
|
setSilenceTimer(0);
|
||||||
|
addLog('timer', '🔄 Silence timer RESET', 'warning');
|
||||||
|
}
|
||||||
|
}, [partialTranscript, addLog]);
|
||||||
|
|
||||||
|
// Monitor final transcripts
|
||||||
|
useEffect(() => {
|
||||||
|
if (recognizedText && recognizedText !== lastPartialRef.current) {
|
||||||
|
addLog('stt', `✅ FINAL: "${recognizedText.slice(0, 40)}${recognizedText.length > 40 ? '...' : ''}"`, 'success', {
|
||||||
|
length: recognizedText.length,
|
||||||
|
transcript: recognizedText
|
||||||
|
});
|
||||||
|
addLog('api', '📤 Sending to API...', 'info');
|
||||||
|
}
|
||||||
|
}, [recognizedText, addLog]);
|
||||||
|
|
||||||
|
// Silence timer (only when STT is listening and not processing/speaking)
|
||||||
|
useEffect(() => {
|
||||||
|
let interval: NodeJS.Timeout | null = null;
|
||||||
|
|
||||||
|
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
|
||||||
|
interval = setInterval(() => {
|
||||||
|
setSilenceTimer(prev => {
|
||||||
|
const next = prev + 100;
|
||||||
|
|
||||||
|
// Log milestones
|
||||||
|
if (next === 1000) {
|
||||||
|
addLog('timer', '⏱️ Silence: 1.0s', 'info');
|
||||||
|
} else if (next === 1500) {
|
||||||
|
addLog('timer', '⏱️ Silence: 1.5s', 'warning');
|
||||||
|
} else if (next === 2000) {
|
||||||
|
addLog('timer', '🛑 Silence: 2.0s → AUTO-STOP triggered', 'error');
|
||||||
|
}
|
||||||
|
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
}, 100);
|
||||||
|
} else {
|
||||||
|
setSilenceTimer(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
if (interval) clearInterval(interval);
|
||||||
|
};
|
||||||
|
}, [sttIsListening, status, addLog]);
|
||||||
|
|
||||||
|
// Get status indicator
|
||||||
|
const getStatusDisplay = () => {
|
||||||
|
if (status === 'speaking' || isSpeaking) {
|
||||||
|
return { color: '#9333EA', icon: '🔊', text: 'Speaking' };
|
||||||
|
}
|
||||||
|
if (status === 'processing') {
|
||||||
|
return { color: '#F59E0B', icon: '⚙️', text: 'Processing' };
|
||||||
|
}
|
||||||
|
if (isListening && sttIsListening) {
|
||||||
|
return { color: '#10B981', icon: '🟢', text: 'Listening' };
|
||||||
|
}
|
||||||
|
if (isListening && !sttIsListening) {
|
||||||
|
return { color: '#F59E0B', icon: '🟡', text: 'Session Active (STT Off)' };
|
||||||
|
}
|
||||||
|
return { color: '#6B7280', icon: '⚪', text: 'Idle' };
|
||||||
|
};
|
||||||
|
|
||||||
|
const statusDisplay = getStatusDisplay();
|
||||||
|
const silenceProgress = Math.min(silenceTimer / 2000, 1);
|
||||||
|
const silenceSeconds = (silenceTimer / 1000).toFixed(1);
|
||||||
|
|
||||||
|
// Log level colors
|
||||||
|
const getLogColor = (level: LogEntry['level']) => {
|
||||||
|
switch (level) {
|
||||||
|
case 'error': return '#EF4444';
|
||||||
|
case 'warning': return '#F59E0B';
|
||||||
|
case 'success': return '#10B981';
|
||||||
|
default: return isDark ? '#D1D5DB' : '#374151';
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Category icons
|
||||||
|
const getCategoryIcon = (category: LogEntry['category']) => {
|
||||||
|
switch (category) {
|
||||||
|
case 'stt': return '🎤';
|
||||||
|
case 'api': return '📡';
|
||||||
|
case 'tts': return '🔊';
|
||||||
|
case 'timer': return '⏱️';
|
||||||
|
case 'system': return '⚙️';
|
||||||
|
default: return '•';
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<View style={[styles.container, { backgroundColor: isDark ? '#0A0A0A' : '#FFFFFF' }]}>
|
||||||
|
{/* Header */}
|
||||||
|
<View style={[styles.header, { paddingTop: insets.top + 16 }]}>
|
||||||
|
<Text style={[styles.headerTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
|
||||||
|
Voice Debug
|
||||||
|
</Text>
|
||||||
|
<TouchableOpacity onPress={clearLogs} style={styles.clearButton}>
|
||||||
|
<Feather name="trash-2" size={20} color={isDark ? '#9CA3AF' : '#6B7280'} />
|
||||||
|
</TouchableOpacity>
|
||||||
|
</View>
|
||||||
|
|
||||||
|
{/* Status Card */}
|
||||||
|
<View style={[styles.statusCard, {
|
||||||
|
backgroundColor: isDark ? '#1F2937' : '#F3F4F6',
|
||||||
|
borderColor: statusDisplay.color,
|
||||||
|
}]}>
|
||||||
|
<View style={styles.statusRow}>
|
||||||
|
<Text style={styles.statusIcon}>{statusDisplay.icon}</Text>
|
||||||
|
<View style={styles.statusTextContainer}>
|
||||||
|
<Text style={[styles.statusLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||||
|
Status
|
||||||
|
</Text>
|
||||||
|
<Text style={[styles.statusText, { color: statusDisplay.color }]}>
|
||||||
|
{statusDisplay.text}
|
||||||
|
</Text>
|
||||||
|
</View>
|
||||||
|
</View>
|
||||||
|
|
||||||
|
{/* Silence Timer */}
|
||||||
|
{sttIsListening && status !== 'processing' && status !== 'speaking' && (
|
||||||
|
<View style={styles.timerContainer}>
|
||||||
|
<Text style={[styles.timerLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||||
|
Silence Timer (iOS auto-stop at 2.0s)
|
||||||
|
</Text>
|
||||||
|
<View style={styles.timerRow}>
|
||||||
|
<Text style={[styles.timerText, {
|
||||||
|
color: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : isDark ? '#D1D5DB' : '#374151'
|
||||||
|
}]}>
|
||||||
|
{silenceSeconds}s / 2.0s
|
||||||
|
</Text>
|
||||||
|
</View>
|
||||||
|
<View style={[styles.progressBarContainer, { backgroundColor: isDark ? '#374151' : '#E5E7EB' }]}>
|
||||||
|
<View style={[styles.progressBarFill, {
|
||||||
|
width: `${silenceProgress * 100}%`,
|
||||||
|
backgroundColor: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : '#10B981'
|
||||||
|
}]} />
|
||||||
|
</View>
|
||||||
|
</View>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Current Transcripts */}
|
||||||
|
{partialTranscript && (
|
||||||
|
<View style={styles.transcriptContainer}>
|
||||||
|
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||||
|
Partial:
|
||||||
|
</Text>
|
||||||
|
<Text style={[styles.transcriptText, { color: isDark ? '#F59E0B' : '#D97706' }]}>
|
||||||
|
"{partialTranscript}"
|
||||||
|
</Text>
|
||||||
|
</View>
|
||||||
|
)}
|
||||||
|
{recognizedText && (
|
||||||
|
<View style={styles.transcriptContainer}>
|
||||||
|
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
|
||||||
|
Final:
|
||||||
|
</Text>
|
||||||
|
<Text style={[styles.transcriptText, { color: isDark ? '#10B981' : '#059669' }]}>
|
||||||
|
"{recognizedText}"
|
||||||
|
</Text>
|
||||||
|
</View>
|
||||||
|
)}
|
||||||
|
</View>
|
||||||
|
|
||||||
|
{/* Logs */}
|
||||||
|
<View style={styles.logsContainer}>
|
||||||
|
<Text style={[styles.logsTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
|
||||||
|
Event Log
|
||||||
|
</Text>
|
||||||
|
<ScrollView
|
||||||
|
ref={scrollViewRef}
|
||||||
|
style={[styles.logsScrollView, { backgroundColor: isDark ? '#111827' : '#F9FAFB' }]}
|
||||||
|
contentContainerStyle={styles.logsContent}
|
||||||
|
>
|
||||||
|
{logs.length === 0 ? (
|
||||||
|
<Text style={[styles.emptyText, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
|
||||||
|
No events yet. Press FAB to start.
|
||||||
|
</Text>
|
||||||
|
) : (
|
||||||
|
logs.map(log => {
|
||||||
|
const time = new Date(log.timestamp);
|
||||||
|
const timeStr = `${String(time.getHours()).padStart(2, '0')}:${String(time.getMinutes()).padStart(2, '0')}:${String(time.getSeconds()).padStart(2, '0')}.${String(time.getMilliseconds()).padStart(3, '0')}`;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<View key={log.id} style={styles.logEntry}>
|
||||||
|
<Text style={[styles.logTimestamp, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
|
||||||
|
{timeStr}
|
||||||
|
</Text>
|
||||||
|
<Text style={styles.logIcon}>{getCategoryIcon(log.category)}</Text>
|
||||||
|
<Text style={[styles.logMessage, { color: getLogColor(log.level) }]}>
|
||||||
|
{log.message}
|
||||||
|
</Text>
|
||||||
|
</View>
|
||||||
|
);
|
||||||
|
})
|
||||||
|
)}
|
||||||
|
</ScrollView>
|
||||||
|
</View>
|
||||||
|
|
||||||
|
{/* FAB */}
|
||||||
|
<TouchableOpacity
|
||||||
|
style={[styles.fab, {
|
||||||
|
backgroundColor: isListening ? '#EF4444' : AppColors.primary,
|
||||||
|
bottom: insets.bottom + 80,
|
||||||
|
}]}
|
||||||
|
onPress={() => {
|
||||||
|
if (isListening) {
|
||||||
|
addLog('system', '🛑 User stopped session', 'warning');
|
||||||
|
stopSession();
|
||||||
|
} else {
|
||||||
|
clearLogs();
|
||||||
|
addLog('system', '▶️ User started session', 'success');
|
||||||
|
startSession();
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<Feather
|
||||||
|
name={isListening ? 'square' : 'mic'}
|
||||||
|
size={28}
|
||||||
|
color="#FFFFFF"
|
||||||
|
/>
|
||||||
|
</TouchableOpacity>
|
||||||
|
</View>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const styles = StyleSheet.create({
|
||||||
|
container: {
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
header: {
|
||||||
|
flexDirection: 'row',
|
||||||
|
alignItems: 'center',
|
||||||
|
justifyContent: 'space-between',
|
||||||
|
paddingHorizontal: 20,
|
||||||
|
paddingBottom: 16,
|
||||||
|
},
|
||||||
|
headerTitle: {
|
||||||
|
fontSize: 28,
|
||||||
|
fontWeight: '700',
|
||||||
|
},
|
||||||
|
clearButton: {
|
||||||
|
padding: 8,
|
||||||
|
},
|
||||||
|
statusCard: {
|
||||||
|
marginHorizontal: 20,
|
||||||
|
marginBottom: 16,
|
||||||
|
padding: 16,
|
||||||
|
borderRadius: 12,
|
||||||
|
borderLeftWidth: 4,
|
||||||
|
},
|
||||||
|
statusRow: {
|
||||||
|
flexDirection: 'row',
|
||||||
|
alignItems: 'center',
|
||||||
|
},
|
||||||
|
statusIcon: {
|
||||||
|
fontSize: 32,
|
||||||
|
marginRight: 12,
|
||||||
|
},
|
||||||
|
statusTextContainer: {
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
statusLabel: {
|
||||||
|
fontSize: 12,
|
||||||
|
fontWeight: '500',
|
||||||
|
marginBottom: 2,
|
||||||
|
},
|
||||||
|
statusText: {
|
||||||
|
fontSize: 18,
|
||||||
|
fontWeight: '700',
|
||||||
|
},
|
||||||
|
timerContainer: {
|
||||||
|
marginTop: 16,
|
||||||
|
paddingTop: 16,
|
||||||
|
borderTopWidth: 1,
|
||||||
|
borderTopColor: 'rgba(156, 163, 175, 0.2)',
|
||||||
|
},
|
||||||
|
timerLabel: {
|
||||||
|
fontSize: 12,
|
||||||
|
fontWeight: '500',
|
||||||
|
marginBottom: 8,
|
||||||
|
},
|
||||||
|
timerRow: {
|
||||||
|
marginBottom: 8,
|
||||||
|
},
|
||||||
|
timerText: {
|
||||||
|
fontSize: 24,
|
||||||
|
fontWeight: '700',
|
||||||
|
fontVariant: ['tabular-nums'],
|
||||||
|
},
|
||||||
|
progressBarContainer: {
|
||||||
|
height: 8,
|
||||||
|
borderRadius: 4,
|
||||||
|
overflow: 'hidden',
|
||||||
|
},
|
||||||
|
progressBarFill: {
|
||||||
|
height: '100%',
|
||||||
|
borderRadius: 4,
|
||||||
|
},
|
||||||
|
transcriptContainer: {
|
||||||
|
marginTop: 12,
|
||||||
|
paddingTop: 12,
|
||||||
|
borderTopWidth: 1,
|
||||||
|
borderTopColor: 'rgba(156, 163, 175, 0.2)',
|
||||||
|
},
|
||||||
|
transcriptLabel: {
|
||||||
|
fontSize: 12,
|
||||||
|
fontWeight: '500',
|
||||||
|
marginBottom: 4,
|
||||||
|
},
|
||||||
|
transcriptText: {
|
||||||
|
fontSize: 14,
|
||||||
|
fontStyle: 'italic',
|
||||||
|
},
|
||||||
|
logsContainer: {
|
||||||
|
flex: 1,
|
||||||
|
marginHorizontal: 20,
|
||||||
|
},
|
||||||
|
logsTitle: {
|
||||||
|
fontSize: 16,
|
||||||
|
fontWeight: '700',
|
||||||
|
marginBottom: 8,
|
||||||
|
},
|
||||||
|
logsScrollView: {
|
||||||
|
flex: 1,
|
||||||
|
borderRadius: 8,
|
||||||
|
},
|
||||||
|
logsContent: {
|
||||||
|
padding: 12,
|
||||||
|
},
|
||||||
|
emptyText: {
|
||||||
|
textAlign: 'center',
|
||||||
|
fontSize: 14,
|
||||||
|
fontStyle: 'italic',
|
||||||
|
paddingVertical: 20,
|
||||||
|
},
|
||||||
|
logEntry: {
|
||||||
|
flexDirection: 'row',
|
||||||
|
marginBottom: 8,
|
||||||
|
alignItems: 'flex-start',
|
||||||
|
},
|
||||||
|
logTimestamp: {
|
||||||
|
fontSize: 11,
|
||||||
|
fontVariant: ['tabular-nums'],
|
||||||
|
marginRight: 8,
|
||||||
|
width: 80,
|
||||||
|
},
|
||||||
|
logIcon: {
|
||||||
|
fontSize: 14,
|
||||||
|
marginRight: 6,
|
||||||
|
},
|
||||||
|
logMessage: {
|
||||||
|
fontSize: 13,
|
||||||
|
flex: 1,
|
||||||
|
lineHeight: 18,
|
||||||
|
},
|
||||||
|
fab: {
|
||||||
|
position: 'absolute',
|
||||||
|
right: 20,
|
||||||
|
width: 64,
|
||||||
|
height: 64,
|
||||||
|
borderRadius: 32,
|
||||||
|
alignItems: 'center',
|
||||||
|
justifyContent: 'center',
|
||||||
|
shadowColor: '#000',
|
||||||
|
shadowOffset: { width: 0, height: 4 },
|
||||||
|
shadowOpacity: 0.3,
|
||||||
|
shadowRadius: 8,
|
||||||
|
elevation: 8,
|
||||||
|
},
|
||||||
|
});
|
||||||
@ -13,6 +13,7 @@ import { BeneficiaryProvider } from '@/contexts/BeneficiaryContext';
|
|||||||
import { VoiceTranscriptProvider } from '@/contexts/VoiceTranscriptContext';
|
import { VoiceTranscriptProvider } from '@/contexts/VoiceTranscriptContext';
|
||||||
import { VoiceCallProvider } from '@/contexts/VoiceCallContext';
|
import { VoiceCallProvider } from '@/contexts/VoiceCallContext';
|
||||||
import { VoiceProvider } from '@/contexts/VoiceContext';
|
import { VoiceProvider } from '@/contexts/VoiceContext';
|
||||||
|
import { ChatProvider } from '@/contexts/ChatContext';
|
||||||
import { LoadingSpinner } from '@/components/ui/LoadingSpinner';
|
import { LoadingSpinner } from '@/components/ui/LoadingSpinner';
|
||||||
import { FloatingCallBubble } from '@/components/FloatingCallBubble';
|
import { FloatingCallBubble } from '@/components/FloatingCallBubble';
|
||||||
|
|
||||||
@ -69,7 +70,9 @@ export default function RootLayout() {
|
|||||||
<VoiceTranscriptProvider>
|
<VoiceTranscriptProvider>
|
||||||
<VoiceCallProvider>
|
<VoiceCallProvider>
|
||||||
<VoiceProvider>
|
<VoiceProvider>
|
||||||
|
<ChatProvider>
|
||||||
<RootLayoutNav />
|
<RootLayoutNav />
|
||||||
|
</ChatProvider>
|
||||||
</VoiceProvider>
|
</VoiceProvider>
|
||||||
</VoiceCallProvider>
|
</VoiceCallProvider>
|
||||||
</VoiceTranscriptProvider>
|
</VoiceTranscriptProvider>
|
||||||
|
|||||||
@ -1,9 +1,12 @@
|
|||||||
/**
|
/**
|
||||||
* Voice Floating Action Button Component
|
* Voice Floating Action Button Component
|
||||||
*
|
*
|
||||||
* A floating action button for toggling voice listening mode.
|
* Positioned at the center of the tab bar.
|
||||||
* Tap to start/stop listening.
|
* Shows different animations for each voice state:
|
||||||
* Hidden when a call is already active.
|
* - idle: white mic icon, green background
|
||||||
|
* - listening: red background, expanding pulse rings
|
||||||
|
* - processing: blue background, spinning indicator
|
||||||
|
* - speaking: green background, wave-like pulse
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import React, { useRef, useEffect } from 'react';
|
import React, { useRef, useEffect } from 'react';
|
||||||
@ -12,12 +15,13 @@ import {
|
|||||||
TouchableOpacity,
|
TouchableOpacity,
|
||||||
Animated,
|
Animated,
|
||||||
ViewStyle,
|
ViewStyle,
|
||||||
|
ActivityIndicator,
|
||||||
} from 'react-native';
|
} from 'react-native';
|
||||||
import { Ionicons } from '@expo/vector-icons';
|
import { Ionicons } from '@expo/vector-icons';
|
||||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
|
||||||
import * as Haptics from 'expo-haptics';
|
import * as Haptics from 'expo-haptics';
|
||||||
import { AppColors, BorderRadius } from '@/constants/theme';
|
import { AppColors, BorderRadius } from '@/constants/theme';
|
||||||
import { useVoiceCall } from '@/contexts/VoiceCallContext';
|
import { useVoiceCall } from '@/contexts/VoiceCallContext';
|
||||||
|
import { useVoice } from '@/contexts/VoiceContext';
|
||||||
|
|
||||||
interface VoiceFABProps {
|
interface VoiceFABProps {
|
||||||
onPress: () => void;
|
onPress: () => void;
|
||||||
@ -26,146 +30,232 @@ interface VoiceFABProps {
|
|||||||
isListening?: boolean;
|
isListening?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
const FAB_SIZE = 56;
|
const FAB_SIZE = 60;
|
||||||
|
|
||||||
export function VoiceFAB({ onPress, style, disabled = false, isListening = false }: VoiceFABProps) {
|
export function VoiceFAB({ onPress, style, disabled = false, isListening = false }: VoiceFABProps) {
|
||||||
const { isCallActive } = useVoiceCall();
|
const { isCallActive } = useVoiceCall();
|
||||||
const insets = useSafeAreaInsets();
|
const { status: voiceStatus } = useVoice();
|
||||||
|
|
||||||
// Animation values
|
// Animation values
|
||||||
const scale = useRef(new Animated.Value(1)).current;
|
const scale = useRef(new Animated.Value(1)).current;
|
||||||
const opacity = useRef(new Animated.Value(1)).current;
|
const opacity = useRef(new Animated.Value(1)).current;
|
||||||
const pulseScale = useRef(new Animated.Value(1)).current;
|
|
||||||
const pulseOpacity = useRef(new Animated.Value(0)).current;
|
// Pulse ring 1 (main expanding ring)
|
||||||
|
const pulse1Scale = useRef(new Animated.Value(1)).current;
|
||||||
|
const pulse1Opacity = useRef(new Animated.Value(0)).current;
|
||||||
|
|
||||||
|
// Pulse ring 2 (second ring, offset timing)
|
||||||
|
const pulse2Scale = useRef(new Animated.Value(1)).current;
|
||||||
|
const pulse2Opacity = useRef(new Animated.Value(0)).current;
|
||||||
|
|
||||||
|
// Speaking glow animation
|
||||||
|
const glowScale = useRef(new Animated.Value(1)).current;
|
||||||
|
|
||||||
|
// Processing rotation
|
||||||
|
const rotation = useRef(new Animated.Value(0)).current;
|
||||||
|
|
||||||
|
// Store animation refs for cleanup
|
||||||
|
const animationRef = useRef<Animated.CompositeAnimation | null>(null);
|
||||||
|
|
||||||
|
// Determine effective state
|
||||||
|
const effectiveStatus = isListening
|
||||||
|
? (voiceStatus === 'processing' ? 'processing' : voiceStatus === 'speaking' ? 'speaking' : 'listening')
|
||||||
|
: 'idle';
|
||||||
|
|
||||||
// Hide FAB when call is active
|
// Hide FAB when call is active
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (isCallActive) {
|
if (isCallActive) {
|
||||||
Animated.parallel([
|
Animated.parallel([
|
||||||
Animated.timing(scale, {
|
Animated.timing(scale, { toValue: 0, duration: 200, useNativeDriver: true }),
|
||||||
toValue: 0,
|
Animated.timing(opacity, { toValue: 0, duration: 200, useNativeDriver: true }),
|
||||||
duration: 200,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
Animated.timing(opacity, {
|
|
||||||
toValue: 0,
|
|
||||||
duration: 200,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
]).start();
|
]).start();
|
||||||
} else {
|
} else {
|
||||||
Animated.parallel([
|
Animated.parallel([
|
||||||
Animated.spring(scale, {
|
Animated.spring(scale, { toValue: 1, friction: 5, tension: 40, useNativeDriver: true }),
|
||||||
toValue: 1,
|
Animated.timing(opacity, { toValue: 1, duration: 200, useNativeDriver: true }),
|
||||||
friction: 5,
|
|
||||||
tension: 40,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
Animated.timing(opacity, {
|
|
||||||
toValue: 1,
|
|
||||||
duration: 200,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
]).start();
|
]).start();
|
||||||
}
|
}
|
||||||
}, [isCallActive, scale, opacity]);
|
}, [isCallActive, scale, opacity]);
|
||||||
|
|
||||||
// Pulse animation when listening
|
// Animations based on voice status
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (isListening && !isCallActive) {
|
// Stop previous animation
|
||||||
// Start pulsing animation
|
if (animationRef.current) {
|
||||||
const pulseAnimation = Animated.loop(
|
animationRef.current.stop();
|
||||||
Animated.sequence([
|
animationRef.current = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset all animation values
|
||||||
|
pulse1Scale.setValue(1);
|
||||||
|
pulse1Opacity.setValue(0);
|
||||||
|
pulse2Scale.setValue(1);
|
||||||
|
pulse2Opacity.setValue(0);
|
||||||
|
glowScale.setValue(1);
|
||||||
|
rotation.setValue(0);
|
||||||
|
|
||||||
|
if (effectiveStatus === 'listening') {
|
||||||
|
// Double pulse ring animation - more active/dynamic
|
||||||
|
const pulseAnim = Animated.loop(
|
||||||
|
Animated.stagger(500, [
|
||||||
Animated.parallel([
|
Animated.parallel([
|
||||||
Animated.timing(pulseScale, {
|
Animated.timing(pulse1Scale, { toValue: 2.0, duration: 1200, useNativeDriver: true }),
|
||||||
toValue: 1.8,
|
Animated.timing(pulse1Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
|
||||||
duration: 1000,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
Animated.timing(pulseOpacity, {
|
|
||||||
toValue: 0,
|
|
||||||
duration: 1000,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
]),
|
]),
|
||||||
Animated.parallel([
|
Animated.parallel([
|
||||||
Animated.timing(pulseScale, {
|
Animated.timing(pulse1Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
|
||||||
toValue: 1,
|
Animated.timing(pulse1Opacity, { toValue: 0.5, duration: 0, useNativeDriver: true }),
|
||||||
duration: 0,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
Animated.timing(pulseOpacity, {
|
|
||||||
toValue: 0.6,
|
|
||||||
duration: 0,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}),
|
|
||||||
]),
|
]),
|
||||||
])
|
])
|
||||||
);
|
);
|
||||||
pulseAnimation.start();
|
|
||||||
|
const pulse2Anim = Animated.loop(
|
||||||
|
Animated.sequence([
|
||||||
|
Animated.delay(400),
|
||||||
|
Animated.parallel([
|
||||||
|
Animated.timing(pulse2Scale, { toValue: 1.8, duration: 1200, useNativeDriver: true }),
|
||||||
|
Animated.timing(pulse2Opacity, { toValue: 0, duration: 1200, useNativeDriver: true }),
|
||||||
|
]),
|
||||||
|
Animated.parallel([
|
||||||
|
Animated.timing(pulse2Scale, { toValue: 1, duration: 0, useNativeDriver: true }),
|
||||||
|
Animated.timing(pulse2Opacity, { toValue: 0.4, duration: 0, useNativeDriver: true }),
|
||||||
|
]),
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
const combined = Animated.parallel([pulseAnim, pulse2Anim]);
|
||||||
|
animationRef.current = combined;
|
||||||
|
combined.start();
|
||||||
|
|
||||||
|
} else if (effectiveStatus === 'speaking') {
|
||||||
|
// Gentle breathing glow when speaking
|
||||||
|
const glowAnim = Animated.loop(
|
||||||
|
Animated.sequence([
|
||||||
|
Animated.timing(glowScale, { toValue: 1.15, duration: 600, useNativeDriver: true }),
|
||||||
|
Animated.timing(glowScale, { toValue: 1.0, duration: 600, useNativeDriver: true }),
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Soft outer glow
|
||||||
|
const softPulse = Animated.loop(
|
||||||
|
Animated.sequence([
|
||||||
|
Animated.parallel([
|
||||||
|
Animated.timing(pulse1Scale, { toValue: 1.4, duration: 800, useNativeDriver: true }),
|
||||||
|
Animated.timing(pulse1Opacity, { toValue: 0.3, duration: 400, useNativeDriver: true }),
|
||||||
|
]),
|
||||||
|
Animated.parallel([
|
||||||
|
Animated.timing(pulse1Scale, { toValue: 1.0, duration: 800, useNativeDriver: true }),
|
||||||
|
Animated.timing(pulse1Opacity, { toValue: 0, duration: 400, useNativeDriver: true }),
|
||||||
|
]),
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
const combined = Animated.parallel([glowAnim, softPulse]);
|
||||||
|
animationRef.current = combined;
|
||||||
|
combined.start();
|
||||||
|
|
||||||
|
} else if (effectiveStatus === 'processing') {
|
||||||
|
// Spinning rotation for processing
|
||||||
|
const spinAnim = Animated.loop(
|
||||||
|
Animated.timing(rotation, { toValue: 1, duration: 1500, useNativeDriver: true })
|
||||||
|
);
|
||||||
|
animationRef.current = spinAnim;
|
||||||
|
spinAnim.start();
|
||||||
|
}
|
||||||
|
|
||||||
return () => {
|
return () => {
|
||||||
pulseAnimation.stop();
|
if (animationRef.current) {
|
||||||
pulseScale.setValue(1);
|
animationRef.current.stop();
|
||||||
pulseOpacity.setValue(0);
|
animationRef.current = null;
|
||||||
};
|
|
||||||
} else {
|
|
||||||
pulseScale.setValue(1);
|
|
||||||
pulseOpacity.setValue(0);
|
|
||||||
}
|
}
|
||||||
}, [isListening, isCallActive, pulseScale, pulseOpacity]);
|
};
|
||||||
|
}, [effectiveStatus]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
// Press animation with haptic feedback
|
// Press animation with haptic feedback
|
||||||
const handlePressIn = () => {
|
const handlePressIn = () => {
|
||||||
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
|
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
|
||||||
Animated.spring(scale, {
|
Animated.spring(scale, { toValue: 0.85, friction: 5, useNativeDriver: true }).start();
|
||||||
toValue: 0.9,
|
|
||||||
friction: 5,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}).start();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const handlePressOut = () => {
|
const handlePressOut = () => {
|
||||||
Animated.spring(scale, {
|
Animated.spring(scale, { toValue: 1, friction: 5, useNativeDriver: true }).start();
|
||||||
toValue: 1,
|
|
||||||
friction: 5,
|
|
||||||
useNativeDriver: true,
|
|
||||||
}).start();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Don't render if call is active
|
|
||||||
if (isCallActive) {
|
if (isCallActive) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine colors and icon based on state
|
||||||
|
let fabBgColor = AppColors.success; // idle: green
|
||||||
|
let iconName: 'mic-outline' | 'mic' | 'volume-high' = 'mic-outline';
|
||||||
|
let pulseColor = AppColors.error;
|
||||||
|
|
||||||
|
if (effectiveStatus === 'listening') {
|
||||||
|
fabBgColor = '#FF3B30'; // red
|
||||||
|
iconName = 'mic';
|
||||||
|
pulseColor = '#FF3B30';
|
||||||
|
} else if (effectiveStatus === 'processing') {
|
||||||
|
fabBgColor = AppColors.primary; // blue
|
||||||
|
iconName = 'mic';
|
||||||
|
pulseColor = AppColors.primary;
|
||||||
|
} else if (effectiveStatus === 'speaking') {
|
||||||
|
fabBgColor = '#34C759'; // green
|
||||||
|
iconName = 'volume-high';
|
||||||
|
pulseColor = '#34C759';
|
||||||
|
}
|
||||||
|
|
||||||
|
const spin = rotation.interpolate({
|
||||||
|
inputRange: [0, 1],
|
||||||
|
outputRange: ['0deg', '360deg'],
|
||||||
|
});
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Animated.View
|
<Animated.View
|
||||||
style={[
|
style={[
|
||||||
styles.container,
|
styles.container,
|
||||||
{
|
{
|
||||||
bottom: insets.bottom + 80, // Above tab bar
|
|
||||||
transform: [{ scale }],
|
transform: [{ scale }],
|
||||||
opacity,
|
opacity,
|
||||||
},
|
},
|
||||||
style,
|
style,
|
||||||
]}
|
]}
|
||||||
>
|
>
|
||||||
{/* Pulse ring when listening */}
|
{/* Pulse ring 1 */}
|
||||||
{isListening && (
|
{(effectiveStatus === 'listening' || effectiveStatus === 'speaking') && (
|
||||||
<Animated.View
|
<Animated.View
|
||||||
style={[
|
style={[
|
||||||
styles.pulseRing,
|
styles.pulseRing,
|
||||||
{
|
{
|
||||||
transform: [{ scale: pulseScale }],
|
backgroundColor: pulseColor,
|
||||||
opacity: pulseOpacity,
|
transform: [{ scale: pulse1Scale }],
|
||||||
|
opacity: pulse1Opacity,
|
||||||
},
|
},
|
||||||
]}
|
]}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Pulse ring 2 (listening only) */}
|
||||||
|
{effectiveStatus === 'listening' && (
|
||||||
|
<Animated.View
|
||||||
|
style={[
|
||||||
|
styles.pulseRing,
|
||||||
|
{
|
||||||
|
backgroundColor: pulseColor,
|
||||||
|
transform: [{ scale: pulse2Scale }],
|
||||||
|
opacity: pulse2Opacity,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<Animated.View
|
||||||
|
style={[
|
||||||
|
{ transform: [{ scale: effectiveStatus === 'speaking' ? glowScale : 1 }] },
|
||||||
|
]}
|
||||||
|
>
|
||||||
<TouchableOpacity
|
<TouchableOpacity
|
||||||
style={[
|
style={[
|
||||||
styles.fab,
|
styles.fab,
|
||||||
isListening && styles.fabListening,
|
{ backgroundColor: disabled ? AppColors.surface : fabBgColor },
|
||||||
disabled && styles.fabDisabled,
|
disabled && styles.fabDisabled,
|
||||||
]}
|
]}
|
||||||
onPress={onPress}
|
onPress={onPress}
|
||||||
@ -174,36 +264,38 @@ export function VoiceFAB({ onPress, style, disabled = false, isListening = false
|
|||||||
disabled={disabled}
|
disabled={disabled}
|
||||||
activeOpacity={0.9}
|
activeOpacity={0.9}
|
||||||
>
|
>
|
||||||
|
{effectiveStatus === 'processing' ? (
|
||||||
|
<Animated.View style={{ transform: [{ rotate: spin }] }}>
|
||||||
|
<ActivityIndicator size="small" color={AppColors.white} />
|
||||||
|
</Animated.View>
|
||||||
|
) : (
|
||||||
<Ionicons
|
<Ionicons
|
||||||
name={isListening ? 'mic' : 'mic-outline'}
|
name={iconName}
|
||||||
size={28}
|
size={28}
|
||||||
color={disabled ? AppColors.textMuted : AppColors.white}
|
color={disabled ? AppColors.textMuted : AppColors.white}
|
||||||
/>
|
/>
|
||||||
|
)}
|
||||||
</TouchableOpacity>
|
</TouchableOpacity>
|
||||||
</Animated.View>
|
</Animated.View>
|
||||||
|
</Animated.View>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const styles = StyleSheet.create({
|
const styles = StyleSheet.create({
|
||||||
container: {
|
container: {
|
||||||
position: 'absolute',
|
|
||||||
left: 0,
|
|
||||||
right: 0,
|
|
||||||
alignItems: 'center',
|
alignItems: 'center',
|
||||||
zIndex: 100,
|
justifyContent: 'center',
|
||||||
},
|
},
|
||||||
pulseRing: {
|
pulseRing: {
|
||||||
position: 'absolute',
|
position: 'absolute',
|
||||||
width: FAB_SIZE,
|
width: FAB_SIZE,
|
||||||
height: FAB_SIZE,
|
height: FAB_SIZE,
|
||||||
borderRadius: BorderRadius.full,
|
borderRadius: FAB_SIZE / 2,
|
||||||
backgroundColor: AppColors.error,
|
|
||||||
},
|
},
|
||||||
fab: {
|
fab: {
|
||||||
width: FAB_SIZE,
|
width: FAB_SIZE,
|
||||||
height: FAB_SIZE,
|
height: FAB_SIZE,
|
||||||
borderRadius: BorderRadius.full,
|
borderRadius: FAB_SIZE / 2,
|
||||||
backgroundColor: AppColors.success,
|
|
||||||
justifyContent: 'center',
|
justifyContent: 'center',
|
||||||
alignItems: 'center',
|
alignItems: 'center',
|
||||||
shadowColor: '#000',
|
shadowColor: '#000',
|
||||||
@ -212,11 +304,7 @@ const styles = StyleSheet.create({
|
|||||||
shadowRadius: 8,
|
shadowRadius: 8,
|
||||||
elevation: 8,
|
elevation: 8,
|
||||||
},
|
},
|
||||||
fabListening: {
|
|
||||||
backgroundColor: AppColors.error,
|
|
||||||
},
|
|
||||||
fabDisabled: {
|
fabDisabled: {
|
||||||
backgroundColor: AppColors.surface,
|
|
||||||
shadowOpacity: 0.1,
|
shadowOpacity: 0.1,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|||||||
51
contexts/ChatContext.tsx
Normal file
51
contexts/ChatContext.tsx
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/**
|
||||||
|
* Chat Context - Persists chat messages across tab navigation
|
||||||
|
*
|
||||||
|
* Without this context, messages are lost when switching tabs
|
||||||
|
* because ChatScreen component unmounts and remounts.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import React, { createContext, useContext, useState, useCallback, ReactNode } from 'react';
|
||||||
|
import type { Message } from '@/types';
|
||||||
|
|
||||||
|
interface ChatContextValue {
|
||||||
|
messages: Message[];
|
||||||
|
setMessages: React.Dispatch<React.SetStateAction<Message[]>>;
|
||||||
|
addMessage: (message: Message) => void;
|
||||||
|
clearMessages: (initialMessage: Message) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ChatContext = createContext<ChatContextValue | undefined>(undefined);
|
||||||
|
|
||||||
|
export function ChatProvider({ children }: { children: ReactNode }) {
|
||||||
|
const [messages, setMessages] = useState<Message[]>([
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
role: 'assistant',
|
||||||
|
content: "Hello! I'm Julia, your AI wellness companion.\n\nType a message below to chat with me.",
|
||||||
|
timestamp: new Date(),
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
|
||||||
|
const addMessage = useCallback((message: Message) => {
|
||||||
|
setMessages(prev => [...prev, message]);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const clearMessages = useCallback((initialMessage: Message) => {
|
||||||
|
setMessages([initialMessage]);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChatContext.Provider value={{ messages, setMessages, addMessage, clearMessages }}>
|
||||||
|
{children}
|
||||||
|
</ChatContext.Provider>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useChat() {
|
||||||
|
const context = useContext(ChatContext);
|
||||||
|
if (!context) {
|
||||||
|
throw new Error('useChat must be used within ChatProvider');
|
||||||
|
}
|
||||||
|
return context;
|
||||||
|
}
|
||||||
@ -153,6 +153,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
// API token cache
|
// API token cache
|
||||||
const apiTokenRef = useRef<string | null>(null);
|
const apiTokenRef = useRef<string | null>(null);
|
||||||
|
|
||||||
|
// Abort controller for cancelling in-flight API requests
|
||||||
|
const abortControllerRef = useRef<AbortController | null>(null);
|
||||||
|
|
||||||
|
// Flag to prevent speak() after session stopped
|
||||||
|
const sessionStoppedRef = useRef(false);
|
||||||
|
|
||||||
// Deployment ID from settings
|
// Deployment ID from settings
|
||||||
const deploymentIdRef = useRef<string | null>(null);
|
const deploymentIdRef = useRef<string | null>(null);
|
||||||
|
|
||||||
@ -207,6 +213,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Don't send if session was stopped
|
||||||
|
if (sessionStoppedRef.current) {
|
||||||
|
console.log('[VoiceContext] Session stopped, skipping API call');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
console.log('[VoiceContext] Sending transcript to API:', trimmedText);
|
console.log('[VoiceContext] Sending transcript to API:', trimmedText);
|
||||||
setStatus('processing');
|
setStatus('processing');
|
||||||
setError(null);
|
setError(null);
|
||||||
@ -214,10 +226,23 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
// Add user message to transcript for chat display
|
// Add user message to transcript for chat display
|
||||||
addTranscriptEntry('user', trimmedText);
|
addTranscriptEntry('user', trimmedText);
|
||||||
|
|
||||||
|
// Create abort controller for this request
|
||||||
|
if (abortControllerRef.current) {
|
||||||
|
abortControllerRef.current.abort();
|
||||||
|
}
|
||||||
|
const abortController = new AbortController();
|
||||||
|
abortControllerRef.current = abortController;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Get API token
|
// Get API token
|
||||||
const token = await getWellNuoToken();
|
const token = await getWellNuoToken();
|
||||||
|
|
||||||
|
// Check if aborted
|
||||||
|
if (abortController.signal.aborted || sessionStoppedRef.current) {
|
||||||
|
console.log('[VoiceContext] Request aborted before API call');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
// Normalize question
|
// Normalize question
|
||||||
const normalizedQuestion = normalizeQuestion(trimmedText);
|
const normalizedQuestion = normalizeQuestion(trimmedText);
|
||||||
|
|
||||||
@ -244,10 +269,17 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||||
body: new URLSearchParams(requestParams).toString(),
|
body: new URLSearchParams(requestParams).toString(),
|
||||||
|
signal: abortController.signal,
|
||||||
});
|
});
|
||||||
|
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
|
|
||||||
|
// Check if session was stopped while waiting for response
|
||||||
|
if (sessionStoppedRef.current) {
|
||||||
|
console.log('[VoiceContext] Session stopped during API call, discarding response');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
if (data.ok && data.response?.body) {
|
if (data.ok && data.response?.body) {
|
||||||
const responseText = data.response.body;
|
const responseText = data.response.body;
|
||||||
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
|
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
|
||||||
@ -256,7 +288,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
// Add Julia's response to transcript for chat display
|
// Add Julia's response to transcript for chat display
|
||||||
addTranscriptEntry('assistant', responseText);
|
addTranscriptEntry('assistant', responseText);
|
||||||
|
|
||||||
// Speak the response
|
// Speak the response (will be skipped if session stopped)
|
||||||
await speak(responseText);
|
await speak(responseText);
|
||||||
|
|
||||||
return responseText;
|
return responseText;
|
||||||
@ -269,8 +301,13 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
throw new Error(data.message || 'Could not get response');
|
throw new Error(data.message || 'Could not get response');
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
|
// Ignore abort errors
|
||||||
|
if (err instanceof Error && err.name === 'AbortError') {
|
||||||
|
console.log('[VoiceContext] API request aborted');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
const errorMsg = err instanceof Error ? err.message : 'Unknown error';
|
||||||
console.error('[VoiceContext] API error:', errorMsg);
|
console.warn('[VoiceContext] API error:', errorMsg);
|
||||||
setError(errorMsg);
|
setError(errorMsg);
|
||||||
setStatus('idle');
|
setStatus('idle');
|
||||||
return null;
|
return null;
|
||||||
@ -300,6 +337,12 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
const speak = useCallback(async (text: string): Promise<void> => {
|
const speak = useCallback(async (text: string): Promise<void> => {
|
||||||
if (!text.trim()) return;
|
if (!text.trim()) return;
|
||||||
|
|
||||||
|
// Don't speak if session was stopped
|
||||||
|
if (sessionStoppedRef.current) {
|
||||||
|
console.log('[VoiceContext] Session stopped, skipping TTS');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
console.log('[VoiceContext] Speaking:', text.slice(0, 50) + '...');
|
console.log('[VoiceContext] Speaking:', text.slice(0, 50) + '...');
|
||||||
setStatus('speaking');
|
setStatus('speaking');
|
||||||
setIsSpeaking(true);
|
setIsSpeaking(true);
|
||||||
@ -315,20 +358,27 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
onDone: () => {
|
onDone: () => {
|
||||||
console.log('[VoiceContext] TTS completed');
|
console.log('[VoiceContext] TTS completed');
|
||||||
setIsSpeaking(false);
|
setIsSpeaking(false);
|
||||||
// Return to listening state after speaking (if session is active)
|
// Return to listening state after speaking (if session wasn't stopped)
|
||||||
|
if (!sessionStoppedRef.current) {
|
||||||
setStatus('listening');
|
setStatus('listening');
|
||||||
|
}
|
||||||
resolve();
|
resolve();
|
||||||
},
|
},
|
||||||
onError: (error) => {
|
onError: (error) => {
|
||||||
console.error('[VoiceContext] TTS error:', error);
|
console.warn('[VoiceContext] TTS error:', error);
|
||||||
setIsSpeaking(false);
|
setIsSpeaking(false);
|
||||||
|
if (!sessionStoppedRef.current) {
|
||||||
setStatus('listening');
|
setStatus('listening');
|
||||||
|
}
|
||||||
resolve();
|
resolve();
|
||||||
},
|
},
|
||||||
onStopped: () => {
|
onStopped: () => {
|
||||||
console.log('[VoiceContext] TTS stopped (interrupted)');
|
console.log('[VoiceContext] TTS stopped (interrupted)');
|
||||||
setIsSpeaking(false);
|
setIsSpeaking(false);
|
||||||
|
// Don't set status to listening if session was stopped by user
|
||||||
|
if (!sessionStoppedRef.current) {
|
||||||
setStatus('listening');
|
setStatus('listening');
|
||||||
|
}
|
||||||
resolve();
|
resolve();
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
@ -348,6 +398,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
*/
|
*/
|
||||||
const startSession = useCallback(() => {
|
const startSession = useCallback(() => {
|
||||||
console.log('[VoiceContext] Starting voice session');
|
console.log('[VoiceContext] Starting voice session');
|
||||||
|
sessionStoppedRef.current = false;
|
||||||
setStatus('listening');
|
setStatus('listening');
|
||||||
setIsListening(true);
|
setIsListening(true);
|
||||||
setError(null);
|
setError(null);
|
||||||
@ -360,7 +411,16 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
*/
|
*/
|
||||||
const stopSession = useCallback(() => {
|
const stopSession = useCallback(() => {
|
||||||
console.log('[VoiceContext] Stopping voice session');
|
console.log('[VoiceContext] Stopping voice session');
|
||||||
|
// Mark session as stopped FIRST to prevent any pending callbacks
|
||||||
|
sessionStoppedRef.current = true;
|
||||||
|
// Abort any in-flight API requests
|
||||||
|
if (abortControllerRef.current) {
|
||||||
|
abortControllerRef.current.abort();
|
||||||
|
abortControllerRef.current = null;
|
||||||
|
}
|
||||||
|
// Stop TTS
|
||||||
Speech.stop();
|
Speech.stop();
|
||||||
|
// Reset all state
|
||||||
setStatus('idle');
|
setStatus('idle');
|
||||||
setIsListening(false);
|
setIsListening(false);
|
||||||
setIsSpeaking(false);
|
setIsSpeaking(false);
|
||||||
|
|||||||
@ -104,6 +104,8 @@ export function useSpeechRecognition(
|
|||||||
const isStartingRef = useRef(false);
|
const isStartingRef = useRef(false);
|
||||||
// Track if voice has been detected in current session (for onVoiceDetected callback)
|
// Track if voice has been detected in current session (for onVoiceDetected callback)
|
||||||
const voiceDetectedRef = useRef(false);
|
const voiceDetectedRef = useRef(false);
|
||||||
|
// Track last partial transcript for iOS fix (iOS never sends isFinal:true)
|
||||||
|
const lastPartialRef = useRef('');
|
||||||
|
|
||||||
// Check availability on mount
|
// Check availability on mount
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -120,7 +122,7 @@ export function useSpeechRecognition(
|
|||||||
setIsAvailable(true);
|
setIsAvailable(true);
|
||||||
console.log('[SpeechRecognition] Available, permission status:', status.status);
|
console.log('[SpeechRecognition] Available, permission status:', status.status);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('[SpeechRecognition] Not available:', err);
|
console.warn('[SpeechRecognition] Not available:', err);
|
||||||
setIsAvailable(false);
|
setIsAvailable(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -140,6 +142,16 @@ export function useSpeechRecognition(
|
|||||||
// Event: Recognition ended
|
// Event: Recognition ended
|
||||||
useSpeechRecognitionEvent('end', () => {
|
useSpeechRecognitionEvent('end', () => {
|
||||||
console.log('[SpeechRecognition] Ended');
|
console.log('[SpeechRecognition] Ended');
|
||||||
|
|
||||||
|
// iOS FIX: iOS never sends isFinal:true, so we send last partial as final when STT ends
|
||||||
|
const lastPartial = lastPartialRef.current;
|
||||||
|
if (lastPartial && lastPartial.trim().length > 0) {
|
||||||
|
console.log('[SpeechRecognition] 🍎 iOS FIX - Sending last partial as final:', lastPartial);
|
||||||
|
setRecognizedText(lastPartial);
|
||||||
|
onResult?.(lastPartial, true); // Send as final=true
|
||||||
|
lastPartialRef.current = ''; // Clear after sending
|
||||||
|
}
|
||||||
|
|
||||||
setIsListening(false);
|
setIsListening(false);
|
||||||
setPartialTranscript('');
|
setPartialTranscript('');
|
||||||
isStartingRef.current = false;
|
isStartingRef.current = false;
|
||||||
@ -167,8 +179,10 @@ export function useSpeechRecognition(
|
|||||||
if (isFinal) {
|
if (isFinal) {
|
||||||
setRecognizedText(transcript);
|
setRecognizedText(transcript);
|
||||||
setPartialTranscript('');
|
setPartialTranscript('');
|
||||||
|
lastPartialRef.current = ''; // Clear after final
|
||||||
} else {
|
} else {
|
||||||
setPartialTranscript(transcript);
|
setPartialTranscript(transcript);
|
||||||
|
lastPartialRef.current = transcript; // Save for iOS fix
|
||||||
}
|
}
|
||||||
|
|
||||||
onResult?.(transcript, isFinal);
|
onResult?.(transcript, isFinal);
|
||||||
@ -177,15 +191,20 @@ export function useSpeechRecognition(
|
|||||||
|
|
||||||
// Event: Error occurred
|
// Event: Error occurred
|
||||||
useSpeechRecognitionEvent('error', (event: any) => {
|
useSpeechRecognitionEvent('error', (event: any) => {
|
||||||
const errorMessage = event.message || event.error || 'Speech recognition error';
|
const errorCode = event.error || '';
|
||||||
console.error('[SpeechRecognition] Error:', errorMessage);
|
const errorMessage = event.message || errorCode || 'Speech recognition error';
|
||||||
|
|
||||||
// Don't set error for "no-speech" - this is normal when user doesn't say anything
|
// "no-speech" is normal when user is silent — ignore completely
|
||||||
if (event.error !== 'no-speech') {
|
if (errorCode === 'no-speech') {
|
||||||
setError(errorMessage);
|
console.log('[SpeechRecognition] No speech detected (silence) - ignoring');
|
||||||
onError?.(errorMessage);
|
setIsListening(false);
|
||||||
|
isStartingRef.current = false;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
console.warn('[SpeechRecognition] Error:', errorMessage);
|
||||||
|
setError(errorMessage);
|
||||||
|
onError?.(errorMessage);
|
||||||
setIsListening(false);
|
setIsListening(false);
|
||||||
isStartingRef.current = false;
|
isStartingRef.current = false;
|
||||||
});
|
});
|
||||||
@ -207,7 +226,7 @@ export function useSpeechRecognition(
|
|||||||
|
|
||||||
if (!isAvailable) {
|
if (!isAvailable) {
|
||||||
const msg = 'Speech recognition is not available on this device';
|
const msg = 'Speech recognition is not available on this device';
|
||||||
console.error('[SpeechRecognition]', msg);
|
console.warn('[SpeechRecognition]', msg);
|
||||||
setError(msg);
|
setError(msg);
|
||||||
onError?.(msg);
|
onError?.(msg);
|
||||||
return false;
|
return false;
|
||||||
@ -224,7 +243,7 @@ export function useSpeechRecognition(
|
|||||||
|
|
||||||
if (!permissionResult.granted) {
|
if (!permissionResult.granted) {
|
||||||
const msg = 'Microphone permission denied';
|
const msg = 'Microphone permission denied';
|
||||||
console.error('[SpeechRecognition]', msg);
|
console.warn('[SpeechRecognition]', msg);
|
||||||
setError(msg);
|
setError(msg);
|
||||||
onError?.(msg);
|
onError?.(msg);
|
||||||
isStartingRef.current = false;
|
isStartingRef.current = false;
|
||||||
@ -249,7 +268,7 @@ export function useSpeechRecognition(
|
|||||||
return true;
|
return true;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
const msg = err instanceof Error ? err.message : 'Failed to start speech recognition';
|
const msg = err instanceof Error ? err.message : 'Failed to start speech recognition';
|
||||||
console.error('[SpeechRecognition] Start error:', msg);
|
console.warn('[SpeechRecognition] Start error:', msg);
|
||||||
setError(msg);
|
setError(msg);
|
||||||
onError?.(msg);
|
onError?.(msg);
|
||||||
isStartingRef.current = false;
|
isStartingRef.current = false;
|
||||||
|
|||||||
@ -185,7 +185,7 @@ export function useTextToSpeech(
|
|||||||
},
|
},
|
||||||
onError: (err) => {
|
onError: (err) => {
|
||||||
const errorMsg = typeof err === 'string' ? err : 'Speech synthesis error';
|
const errorMsg = typeof err === 'string' ? err : 'Speech synthesis error';
|
||||||
console.error('[TTS] Error:', errorMsg);
|
console.warn('[TTS] Error:', errorMsg);
|
||||||
if (isMountedRef.current) {
|
if (isMountedRef.current) {
|
||||||
setIsSpeaking(false);
|
setIsSpeaking(false);
|
||||||
setCurrentText(null);
|
setCurrentText(null);
|
||||||
@ -227,7 +227,7 @@ export function useTextToSpeech(
|
|||||||
console.log('[TTS] Available voices:', voices.length);
|
console.log('[TTS] Available voices:', voices.length);
|
||||||
return voices;
|
return voices;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('[TTS] Could not get voices:', err);
|
console.warn('[TTS] Could not get voices:', err);
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
}, []);
|
}, []);
|
||||||
|
|||||||
34
package-lock.json
generated
34
package-lock.json
generated
@ -12,6 +12,7 @@
|
|||||||
"@expo/vector-icons": "^15.0.3",
|
"@expo/vector-icons": "^15.0.3",
|
||||||
"@jamsch/expo-speech-recognition": "^0.2.15",
|
"@jamsch/expo-speech-recognition": "^0.2.15",
|
||||||
"@notifee/react-native": "^9.1.8",
|
"@notifee/react-native": "^9.1.8",
|
||||||
|
"@react-native-async-storage/async-storage": "2.2.0",
|
||||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||||
"@react-navigation/elements": "^2.6.3",
|
"@react-navigation/elements": "^2.6.3",
|
||||||
"@react-navigation/native": "^7.1.8",
|
"@react-navigation/native": "^7.1.8",
|
||||||
@ -3568,6 +3569,18 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@react-native-async-storage/async-storage": {
|
||||||
|
"version": "2.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@react-native-async-storage/async-storage/-/async-storage-2.2.0.tgz",
|
||||||
|
"integrity": "sha512-gvRvjR5JAaUZF8tv2Kcq/Gbt3JHwbKFYfmb445rhOj6NUMx3qPLixmDx5pZAyb9at1bYvJ4/eTUipU5aki45xw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"merge-options": "^3.0.4"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"react-native": "^0.0.0-0 || >=0.65 <1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@react-native/assets-registry": {
|
"node_modules/@react-native/assets-registry": {
|
||||||
"version": "0.81.5",
|
"version": "0.81.5",
|
||||||
"resolved": "https://registry.npmjs.org/@react-native/assets-registry/-/assets-registry-0.81.5.tgz",
|
"resolved": "https://registry.npmjs.org/@react-native/assets-registry/-/assets-registry-0.81.5.tgz",
|
||||||
@ -8754,6 +8767,15 @@
|
|||||||
"url": "https://github.com/sponsors/ljharb"
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/is-plain-obj": {
|
||||||
|
"version": "2.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
|
||||||
|
"integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/is-regex": {
|
"node_modules/is-regex": {
|
||||||
"version": "1.2.1",
|
"version": "1.2.1",
|
||||||
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz",
|
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz",
|
||||||
@ -9785,6 +9807,18 @@
|
|||||||
"integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==",
|
"integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/merge-options": {
|
||||||
|
"version": "3.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/merge-options/-/merge-options-3.0.4.tgz",
|
||||||
|
"integrity": "sha512-2Sug1+knBjkaMsMgf1ctR1Ujx+Ayku4EdJN4Z+C2+JzoeF7A3OZ9KM2GY0CpQS51NR61LTurMJrRKPhSs3ZRTQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"is-plain-obj": "^2.1.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/merge-stream": {
|
"node_modules/merge-stream": {
|
||||||
"version": "2.0.0",
|
"version": "2.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
|
||||||
|
|||||||
@ -15,6 +15,7 @@
|
|||||||
"@expo/vector-icons": "^15.0.3",
|
"@expo/vector-icons": "^15.0.3",
|
||||||
"@jamsch/expo-speech-recognition": "^0.2.15",
|
"@jamsch/expo-speech-recognition": "^0.2.15",
|
||||||
"@notifee/react-native": "^9.1.8",
|
"@notifee/react-native": "^9.1.8",
|
||||||
|
"@react-native-async-storage/async-storage": "2.2.0",
|
||||||
"@react-navigation/bottom-tabs": "^7.4.0",
|
"@react-navigation/bottom-tabs": "^7.4.0",
|
||||||
"@react-navigation/elements": "^2.6.3",
|
"@react-navigation/elements": "^2.6.3",
|
||||||
"@react-navigation/native": "^7.1.8",
|
"@react-navigation/native": "^7.1.8",
|
||||||
|
|||||||
@ -113,7 +113,7 @@ class ApiService {
|
|||||||
console.log('[API] refreshToken result:', result.ok ? 'SUCCESS' : result.error?.message);
|
console.log('[API] refreshToken result:', result.ok ? 'SUCCESS' : result.error?.message);
|
||||||
return result;
|
return result;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('[API] refreshToken error:', error);
|
console.warn('[API] refreshToken error:', error);
|
||||||
return {
|
return {
|
||||||
ok: false,
|
ok: false,
|
||||||
error: { message: 'Failed to refresh token', code: 'REFRESH_ERROR' }
|
error: { message: 'Failed to refresh token', code: 'REFRESH_ERROR' }
|
||||||
@ -229,6 +229,20 @@ class ApiService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Voice API Type management
|
||||||
|
async setVoiceApiType(type: 'voice_ask' | 'ask_wellnuo_ai'): Promise<void> {
|
||||||
|
await SecureStore.setItemAsync('voiceApiType', type);
|
||||||
|
}
|
||||||
|
|
||||||
|
async getVoiceApiType(): Promise<'voice_ask' | 'ask_wellnuo_ai'> {
|
||||||
|
try {
|
||||||
|
const saved = await SecureStore.getItemAsync('voiceApiType');
|
||||||
|
return (saved as 'voice_ask' | 'ask_wellnuo_ai') || 'ask_wellnuo_ai';
|
||||||
|
} catch {
|
||||||
|
return 'ask_wellnuo_ai';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async validateDeploymentId(deploymentId: string): Promise<ApiResponse<{ valid: boolean; name?: string }>> {
|
async validateDeploymentId(deploymentId: string): Promise<ApiResponse<{ valid: boolean; name?: string }>> {
|
||||||
const token = await this.getToken();
|
const token = await this.getToken();
|
||||||
const userName = await this.getUserName();
|
const userName = await this.getUserName();
|
||||||
|
|||||||
@ -46,7 +46,7 @@ class CallManager {
|
|||||||
await this.disconnectCallback();
|
await this.disconnectCallback();
|
||||||
console.log(`[CallManager] Previous call disconnected`);
|
console.log(`[CallManager] Previous call disconnected`);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`[CallManager] Error disconnecting previous call:`, err);
|
console.warn(`[CallManager] Error disconnecting previous call:`, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -98,7 +98,7 @@ class CallManager {
|
|||||||
try {
|
try {
|
||||||
await this.disconnectCallback();
|
await this.disconnectCallback();
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`[CallManager] Error force disconnecting:`, err);
|
console.warn(`[CallManager] Error force disconnecting:`, err);
|
||||||
}
|
}
|
||||||
this.activeCallId = null;
|
this.activeCallId = null;
|
||||||
this.disconnectCallback = null;
|
this.disconnectCallback = null;
|
||||||
|
|||||||
@ -228,7 +228,7 @@ export async function createCall(options: {
|
|||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
const errorData = await response.json().catch(() => ({}));
|
const errorData = await response.json().catch(() => ({}));
|
||||||
console.error('[Ultravox] API error:', response.status, errorData);
|
console.warn('[Ultravox] API error:', response.status, errorData);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: errorData.message || `API error: ${response.status}`,
|
error: errorData.message || `API error: ${response.status}`,
|
||||||
@ -239,7 +239,7 @@ export async function createCall(options: {
|
|||||||
console.log('[Ultravox] Call created:', data.callId);
|
console.log('[Ultravox] Call created:', data.callId);
|
||||||
return { success: true, data };
|
return { success: true, data };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('[Ultravox] Create call error:', error);
|
console.warn('[Ultravox] Create call error:', error);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: error instanceof Error ? error.message : 'Failed to create call',
|
error: error instanceof Error ? error.message : 'Failed to create call',
|
||||||
@ -265,7 +265,7 @@ export async function getCall(callId: string): Promise<CreateCallResponse | null
|
|||||||
|
|
||||||
return await response.json();
|
return await response.json();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('[Ultravox] Get call error:', error);
|
console.warn('[Ultravox] Get call error:', error);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -284,7 +284,7 @@ export async function endCall(callId: string): Promise<boolean> {
|
|||||||
|
|
||||||
return response.ok;
|
return response.ok;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('[Ultravox] End call error:', error);
|
console.warn('[Ultravox] End call error:', error);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,7 +23,7 @@ async function getNotifee() {
|
|||||||
try {
|
try {
|
||||||
notifee = (await import('@notifee/react-native')).default;
|
notifee = (await import('@notifee/react-native')).default;
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[AndroidVoiceService] Failed to load notifee:', e);
|
console.warn('[AndroidVoiceService] Failed to load notifee:', e);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -52,7 +52,7 @@ async function createNotificationChannel(): Promise<void> {
|
|||||||
});
|
});
|
||||||
console.log('[AndroidVoiceService] Notification channel created');
|
console.log('[AndroidVoiceService] Notification channel created');
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[AndroidVoiceService] Failed to create channel:', e);
|
console.warn('[AndroidVoiceService] Failed to create channel:', e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ export async function startVoiceCallService(): Promise<void> {
|
|||||||
|
|
||||||
console.log('[AndroidVoiceService] Foreground service started');
|
console.log('[AndroidVoiceService] Foreground service started');
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[AndroidVoiceService] Failed to start foreground service:', e);
|
console.warn('[AndroidVoiceService] Failed to start foreground service:', e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +123,7 @@ export async function stopVoiceCallService(): Promise<void> {
|
|||||||
await notifeeModule.cancelNotification(NOTIFICATION_ID);
|
await notifeeModule.cancelNotification(NOTIFICATION_ID);
|
||||||
console.log('[AndroidVoiceService] Foreground service stopped');
|
console.log('[AndroidVoiceService] Foreground service stopped');
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[AndroidVoiceService] Failed to stop foreground service:', e);
|
console.warn('[AndroidVoiceService] Failed to stop foreground service:', e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,7 +178,7 @@ export async function openBatteryOptimizationSettings(): Promise<void> {
|
|||||||
// Try generic battery settings
|
// Try generic battery settings
|
||||||
await Linking.openSettings();
|
await Linking.openSettings();
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[AndroidVoiceService] Failed to open settings:', e);
|
console.warn('[AndroidVoiceService] Failed to open settings:', e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ export async function requestNotificationPermission(): Promise<boolean> {
|
|||||||
console.log('[AndroidVoiceService] Notification permission:', granted ? 'granted' : 'denied');
|
console.log('[AndroidVoiceService] Notification permission:', granted ? 'granted' : 'denied');
|
||||||
return granted;
|
return granted;
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[AndroidVoiceService] Failed to request notification permission:', e);
|
console.warn('[AndroidVoiceService] Failed to request notification permission:', e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user