import { Tabs } from 'expo-router'; import React, { useCallback, useEffect, useRef } from 'react'; import { Platform, View, AppState, AppStateStatus, TouchableOpacity, StyleSheet } from 'react-native'; import { Feather } from '@expo/vector-icons'; import { useSafeAreaInsets } from 'react-native-safe-area-context'; import { HapticTab } from '@/components/haptic-tab'; import { VoiceFAB } from '@/components/VoiceFAB'; import { AppColors } from '@/constants/theme'; import { useColorScheme } from '@/hooks/use-color-scheme'; import { useVoiceCall } from '@/contexts/VoiceCallContext'; import { useVoice } from '@/contexts/VoiceContext'; import { useSpeechRecognition } from '@/hooks/useSpeechRecognition'; export default function TabLayout() { const colorScheme = useColorScheme(); const isDark = colorScheme === 'dark'; const insets = useSafeAreaInsets(); // VoiceFAB uses VoiceCallContext internally to hide when call is active useVoiceCall(); // Ensure context is available // Voice context for listening mode toggle and TTS interruption const { isListening, isSpeaking, status, startSession, stopSession, interruptIfSpeaking, setTranscript, setPartialTranscript, partialTranscript, // for iOS auto-stop timer sendTranscript, } = useVoice(); // Track whether session is active (listening mode on, even during TTS) const sessionActiveRef = useRef(false); // Track if we need to restart STT after it ends during active session const shouldRestartSTTRef = useRef(false); // Track pending transcript from interruption (to send after TTS stops) const pendingInterruptTranscriptRef = useRef(null); // Callback for voice detection - interrupt TTS when user speaks // NOTE: On Android, STT doesn't run during TTS (shared audio focus), // so interruption on Android happens via FAB press instead. // On iOS, STT can run alongside TTS, so voice detection works. const handleVoiceDetected = useCallback(() => { if (Platform.OS === 'ios' && (status === 'speaking' || isSpeaking)) { console.log('[TabLayout] Voice detected during TTS (iOS) - INTERRUPTING Julia'); interruptIfSpeaking(); } }, [status, isSpeaking, interruptIfSpeaking]); // Callback when STT ends - may need to restart if session is still active const handleSTTEnd = useCallback(() => { console.log('[TabLayout] STT ended, sessionActive:', sessionActiveRef.current); // If session is still active (user didn't stop it), we should restart STT // This ensures STT continues during and after TTS playback if (sessionActiveRef.current) { shouldRestartSTTRef.current = true; } }, []); // Callback for STT results const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => { // Ignore any STT results during TTS playback or processing (echo prevention) if (status === 'speaking' || status === 'processing') { if (isFinal) { // User interrupted Julia with speech — store to send after TTS stops console.log('[TabLayout] Got final result during TTS/processing - storing for after interruption:', transcript); pendingInterruptTranscriptRef.current = transcript; } // Ignore partial transcripts during TTS (they're likely echo) return; } if (isFinal) { setTranscript(transcript); sendTranscript(transcript); } else { setPartialTranscript(transcript); } }, [setTranscript, setPartialTranscript, sendTranscript, status]); // Speech recognition with voice detection callback const { startListening, stopListening, isListening: sttIsListening, } = useSpeechRecognition({ lang: 'en-US', continuous: true, interimResults: true, onVoiceDetected: handleVoiceDetected, onResult: handleSpeechResult, onEnd: handleSTTEnd, }); // Ref to prevent concurrent startListening calls const sttStartingRef = useRef(false); // Ref to track last partial transcript for iOS auto-stop const lastPartialTextRef = useRef(''); const silenceTimerRef = useRef(null); // iOS AUTO-STOP: Stop STT after 2 seconds of silence (no new partial transcripts) // This triggers onEnd → iOS fix sends lastPartial as final useEffect(() => { // Clear existing timer if (silenceTimerRef.current) { clearTimeout(silenceTimerRef.current); silenceTimerRef.current = null; } // Only track silence when STT is listening (not during processing/speaking) if (sttIsListening && status !== 'processing' && status !== 'speaking') { // Get current partial from VoiceContext (set by handleSpeechResult) const currentPartial = partialTranscript; // If partial changed, update ref and set new 2s timer if (currentPartial !== lastPartialTextRef.current) { lastPartialTextRef.current = currentPartial; // Start 2-second silence timer silenceTimerRef.current = setTimeout(() => { if (sttIsListening && sessionActiveRef.current) { console.log('[TabLayout] 🍎 iOS AUTO-STOP: 2s silence - stopping STT to trigger onEnd → iOS fix'); stopListening(); } }, 2000); } } return () => { if (silenceTimerRef.current) { clearTimeout(silenceTimerRef.current); silenceTimerRef.current = null; } }; }, [sttIsListening, status, partialTranscript, stopListening]); // Safe wrapper to start STT with debounce protection const safeStartSTT = useCallback(() => { if (sttIsListening || sttStartingRef.current) { return; // Already listening or starting } // Don't start STT during TTS on Android - they share audio focus if (Platform.OS === 'android' && (status === 'speaking' || isSpeaking)) { console.log('[TabLayout] Skipping STT start - TTS is playing (Android audio focus)'); return; } sttStartingRef.current = true; console.log('[TabLayout] Starting STT...'); startListening().finally(() => { sttStartingRef.current = false; }); }, [sttIsListening, status, isSpeaking, startListening]); // Update session active ref when isListening changes useEffect(() => { sessionActiveRef.current = isListening; if (!isListening) { shouldRestartSTTRef.current = false; } }, [isListening]); // Start/stop STT when voice session starts/stops useEffect(() => { if (isListening) { console.log('[TabLayout] Voice session started - starting STT'); safeStartSTT(); } else { console.log('[TabLayout] Voice session ended - stopping STT'); stopListening(); } }, [isListening]); // eslint-disable-line react-hooks/exhaustive-deps // Track previous status to detect transition from speaking to listening const prevStatusRef = useRef('idle'); // Stop STT when entering processing or speaking state (prevent echo) // Restart STT when TTS finishes (speaking → listening) useEffect(() => { const prevStatus = prevStatusRef.current; prevStatusRef.current = status; // Stop STT when processing starts or TTS starts (prevent Julia hearing herself) if ((status === 'processing' || status === 'speaking') && sttIsListening) { console.log('[TabLayout] Stopping STT during', status, '(echo prevention)'); stopListening(); } // When TTS finishes (speaking → listening), restart STT if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) { console.log('[TabLayout] TTS finished - restarting STT'); // Process pending transcript from interruption if any const pendingTranscript = pendingInterruptTranscriptRef.current; if (pendingTranscript) { console.log('[TabLayout] Processing pending interrupt transcript:', pendingTranscript); pendingInterruptTranscriptRef.current = null; setTranscript(pendingTranscript); sendTranscript(pendingTranscript); } // Delay to let TTS fully release audio focus, then restart STT const timer = setTimeout(() => { if (sessionActiveRef.current) { safeStartSTT(); } }, 300); // 300ms to ensure TTS audio fully fades return () => clearTimeout(timer); } // When processing finishes and goes to speaking, STT is already stopped (above) // When speaking finishes and goes to listening, STT restarts (above) }, [status]); // eslint-disable-line react-hooks/exhaustive-deps // When STT ends unexpectedly during active session, restart it (but not during TTS) useEffect(() => { if ( shouldRestartSTTRef.current && sessionActiveRef.current && !sttIsListening && status !== 'processing' && status !== 'speaking' ) { shouldRestartSTTRef.current = false; console.log('[TabLayout] STT ended unexpectedly - restarting'); const timer = setTimeout(() => { if (sessionActiveRef.current) { safeStartSTT(); } }, 300); return () => clearTimeout(timer); } }, [sttIsListening]); // eslint-disable-line react-hooks/exhaustive-deps // Handle app state changes (background/foreground) useEffect(() => { const handleAppStateChange = (nextAppState: AppStateStatus) => { if (nextAppState === 'active' && sessionActiveRef.current) { setTimeout(() => { if (sessionActiveRef.current && !sttIsListening && status !== 'processing' && status !== 'speaking') { console.log('[TabLayout] App foregrounded - restarting STT'); safeStartSTT(); } }, 500); } }; const subscription = AppState.addEventListener('change', handleAppStateChange); return () => subscription.remove(); }, [sttIsListening, status, safeStartSTT]); // Handle voice FAB press - toggle listening mode // Must check ALL active states (listening, processing, speaking), not just isListening const handleVoiceFABPress = useCallback(() => { const isSessionActive = isListening || status === 'speaking' || status === 'processing'; console.log('[TabLayout] FAB pressed, isSessionActive:', isSessionActive, 'status:', status, 'isListening:', isListening); if (isSessionActive) { // Force-stop everything: STT, TTS, and session state console.log('[TabLayout] Force-stopping everything'); stopListening(); stopSession(); sessionActiveRef.current = false; shouldRestartSTTRef.current = false; pendingInterruptTranscriptRef.current = null; } else { startSession(); } }, [isListening, status, startSession, stopSession, stopListening]); // Calculate tab bar height based on safe area // On iOS with home indicator, insets.bottom is ~34px // On Android with gesture navigation or software buttons (Samsung/Pixel): // - insets.bottom should reflect the navigation bar height // - But some devices/modes may return 0, so we add a minimum for Android // Android minimum: 16px to ensure content doesn't touch system buttons const androidMinPadding = Platform.OS === 'android' ? 16 : 0; const bottomPadding = Math.max(insets.bottom, androidMinPadding, 10); const tabBarHeight = 60 + bottomPadding; // 60px for content + safe area padding return ( ( ), }} /> {/* Hide old dashboard - now index shows WebView dashboard */} {/* Chat with Julia AI */} ( ), }} /> {/* Voice FAB - center tab button */} ( ), }} /> {/* Voice Debug - hidden from tab bar */} ( ), }} /> {/* Audio Debug - hidden */} {/* Beneficiaries - hidden from tab bar but keeps tab bar visible */} ); } const tabFABStyles = StyleSheet.create({ fabWrapper: { flex: 1, alignItems: 'center', justifyContent: 'center', top: -20, }, });