wellnua-lite/app/(tabs)/_layout.tsx
Sergei 3ec0f5dae2 Fix 3 critical bugs: WebView navbar, Android STT, Speaker icon
1. WebView Dashboard Navbar
   - Add `localStorage.setItem('is_mobile', '1')` to hide nav bar
   - Fixes issue where web dashboard shows full navigation

2. Android STT Restart After TTS
   - Reduce delay from 300ms to 50ms for Android
   - Android Audio Focus releases immediately after TTS ends
   - iOS keeps 300ms delay for smooth audio fade
   - File: app/(tabs)/_layout.tsx:208

3. Remove Speaker Icon from Chat Header
   - Remove TTS Stop button (volume-high icon) from chat.tsx
   - Not needed in Lite version
   - File: app/(tabs)/chat.tsx:500-508

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-29 09:08:11 -08:00

397 lines
14 KiB
TypeScript

import { Tabs } from 'expo-router';
import React, { useCallback, useEffect, useRef } from 'react';
import { Platform, View, AppState, AppStateStatus, TouchableOpacity, StyleSheet } from 'react-native';
import { Feather } from '@expo/vector-icons';
import { useSafeAreaInsets } from 'react-native-safe-area-context';
import { HapticTab } from '@/components/haptic-tab';
import { VoiceFAB } from '@/components/VoiceFAB';
import { AppColors } from '@/constants/theme';
import { useColorScheme } from '@/hooks/use-color-scheme';
import { useVoiceCall } from '@/contexts/VoiceCallContext';
import { useVoice } from '@/contexts/VoiceContext';
import { useSpeechRecognition } from '@/hooks/useSpeechRecognition';
export default function TabLayout() {
const colorScheme = useColorScheme();
const isDark = colorScheme === 'dark';
const insets = useSafeAreaInsets();
// VoiceFAB uses VoiceCallContext internally to hide when call is active
useVoiceCall(); // Ensure context is available
// Voice context for listening mode toggle and TTS interruption
const {
isListening,
isSpeaking,
status,
startSession,
stopSession,
interruptIfSpeaking,
setTranscript,
setPartialTranscript,
partialTranscript, // for iOS auto-stop timer
sendTranscript,
} = useVoice();
// Track whether session is active (listening mode on, even during TTS)
const sessionActiveRef = useRef(false);
// Track if we need to restart STT after it ends during active session
const shouldRestartSTTRef = useRef(false);
// Track pending transcript from interruption (to send after TTS stops)
const pendingInterruptTranscriptRef = useRef<string | null>(null);
// Callback for voice detection - interrupt TTS when user speaks
// NOTE: On Android, STT doesn't run during TTS (shared audio focus),
// so interruption on Android happens via FAB press instead.
// On iOS, STT can run alongside TTS, so voice detection works.
const handleVoiceDetected = useCallback(() => {
if (Platform.OS === 'ios' && (status === 'speaking' || isSpeaking)) {
console.log('[TabLayout] Voice detected during TTS (iOS) - INTERRUPTING Julia');
interruptIfSpeaking();
}
}, [status, isSpeaking, interruptIfSpeaking]);
// Callback when STT ends - may need to restart if session is still active
const handleSTTEnd = useCallback(() => {
console.log('[TabLayout] STT ended, sessionActive:', sessionActiveRef.current);
// If session is still active (user didn't stop it), we should restart STT
// This ensures STT continues during and after TTS playback
if (sessionActiveRef.current) {
shouldRestartSTTRef.current = true;
}
}, []);
// Callback for STT results
const handleSpeechResult = useCallback((transcript: string, isFinal: boolean) => {
// Ignore any STT results during TTS playback or processing (echo prevention)
if (status === 'speaking' || status === 'processing') {
if (isFinal) {
// User interrupted Julia with speech — store to send after TTS stops
console.log('[TabLayout] Got final result during TTS/processing - storing for after interruption:', transcript);
pendingInterruptTranscriptRef.current = transcript;
}
// Ignore partial transcripts during TTS (they're likely echo)
return;
}
if (isFinal) {
setTranscript(transcript);
sendTranscript(transcript);
} else {
setPartialTranscript(transcript);
}
}, [setTranscript, setPartialTranscript, sendTranscript, status]);
// Speech recognition with voice detection callback
const {
startListening,
stopListening,
isListening: sttIsListening,
} = useSpeechRecognition({
lang: 'en-US',
continuous: true,
interimResults: true,
onVoiceDetected: handleVoiceDetected,
onResult: handleSpeechResult,
onEnd: handleSTTEnd,
});
// Ref to prevent concurrent startListening calls
const sttStartingRef = useRef(false);
// Ref to track last partial transcript for iOS auto-stop
const lastPartialTextRef = useRef('');
const silenceTimerRef = useRef<NodeJS.Timeout | null>(null);
// iOS AUTO-STOP: Stop STT after 2 seconds of silence (no new partial transcripts)
// This triggers onEnd → iOS fix sends lastPartial as final
useEffect(() => {
// Clear existing timer
if (silenceTimerRef.current) {
clearTimeout(silenceTimerRef.current);
silenceTimerRef.current = null;
}
// Only track silence when STT is listening (not during processing/speaking)
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
// Get current partial from VoiceContext (set by handleSpeechResult)
const currentPartial = partialTranscript;
// If partial changed, update ref and set new 2s timer
if (currentPartial !== lastPartialTextRef.current) {
lastPartialTextRef.current = currentPartial;
// Start 2-second silence timer
silenceTimerRef.current = setTimeout(() => {
if (sttIsListening && sessionActiveRef.current) {
console.log('[TabLayout] 🍎 iOS AUTO-STOP: 2s silence - stopping STT to trigger onEnd → iOS fix');
stopListening();
}
}, 2000);
}
}
return () => {
if (silenceTimerRef.current) {
clearTimeout(silenceTimerRef.current);
silenceTimerRef.current = null;
}
};
}, [sttIsListening, status, partialTranscript, stopListening]);
// Safe wrapper to start STT with debounce protection
const safeStartSTT = useCallback(() => {
if (sttIsListening || sttStartingRef.current) {
return; // Already listening or starting
}
// Don't start STT during TTS on Android - they share audio focus
if (Platform.OS === 'android' && (status === 'speaking' || isSpeaking)) {
console.log('[TabLayout] Skipping STT start - TTS is playing (Android audio focus)');
return;
}
sttStartingRef.current = true;
console.log('[TabLayout] Starting STT...');
startListening().finally(() => {
sttStartingRef.current = false;
});
}, [sttIsListening, status, isSpeaking, startListening]);
// Update session active ref when isListening changes
useEffect(() => {
sessionActiveRef.current = isListening;
if (!isListening) {
shouldRestartSTTRef.current = false;
}
}, [isListening]);
// Start/stop STT when voice session starts/stops
useEffect(() => {
if (isListening) {
console.log('[TabLayout] Voice session started - starting STT');
safeStartSTT();
} else {
console.log('[TabLayout] Voice session ended - stopping STT');
stopListening();
}
}, [isListening]); // eslint-disable-line react-hooks/exhaustive-deps
// Track previous status to detect transition from speaking to listening
const prevStatusRef = useRef<typeof status>('idle');
// Stop STT when entering processing or speaking state (prevent echo)
// Restart STT when TTS finishes (speaking → listening)
useEffect(() => {
const prevStatus = prevStatusRef.current;
prevStatusRef.current = status;
// Stop STT when processing starts or TTS starts (prevent Julia hearing herself)
if ((status === 'processing' || status === 'speaking') && sttIsListening) {
console.log('[TabLayout] Stopping STT during', status, '(echo prevention)');
stopListening();
}
// When TTS finishes (speaking → listening), restart STT
if (prevStatus === 'speaking' && status === 'listening' && sessionActiveRef.current) {
console.log('[TabLayout] TTS finished - restarting STT');
// Process pending transcript from interruption if any
const pendingTranscript = pendingInterruptTranscriptRef.current;
if (pendingTranscript) {
console.log('[TabLayout] Processing pending interrupt transcript:', pendingTranscript);
pendingInterruptTranscriptRef.current = null;
setTranscript(pendingTranscript);
sendTranscript(pendingTranscript);
}
// Delay to let TTS fully release audio focus, then restart STT
// iOS: 300ms for smooth audio fade
// Android: 50ms (Audio Focus releases immediately)
const delay = Platform.OS === 'android' ? 50 : 300;
const timer = setTimeout(() => {
if (sessionActiveRef.current) {
safeStartSTT();
}
}, delay);
return () => clearTimeout(timer);
}
// When processing finishes and goes to speaking, STT is already stopped (above)
// When speaking finishes and goes to listening, STT restarts (above)
}, [status]); // eslint-disable-line react-hooks/exhaustive-deps
// When STT ends unexpectedly during active session, restart it (but not during TTS)
useEffect(() => {
if (
shouldRestartSTTRef.current &&
sessionActiveRef.current &&
!sttIsListening &&
status !== 'processing' &&
status !== 'speaking'
) {
shouldRestartSTTRef.current = false;
console.log('[TabLayout] STT ended unexpectedly - restarting');
const timer = setTimeout(() => {
if (sessionActiveRef.current) {
safeStartSTT();
}
}, 300);
return () => clearTimeout(timer);
}
}, [sttIsListening]); // eslint-disable-line react-hooks/exhaustive-deps
// Handle app state changes (background/foreground)
useEffect(() => {
const handleAppStateChange = (nextAppState: AppStateStatus) => {
// When app goes to background/inactive - stop voice session
// STT/TTS cannot work in background, so it's pointless to keep session active
if ((nextAppState === 'background' || nextAppState === 'inactive') && sessionActiveRef.current) {
console.log('[TabLayout] App going to background - stopping voice session');
stopListening();
stopSession();
sessionActiveRef.current = false;
shouldRestartSTTRef.current = false;
pendingInterruptTranscriptRef.current = null;
}
// When app comes back to foreground - do NOT auto-restart session
// User must manually press FAB to start new session
if (nextAppState === 'active') {
console.log('[TabLayout] App foregrounded - session remains stopped (user must restart via FAB)');
}
};
const subscription = AppState.addEventListener('change', handleAppStateChange);
return () => subscription.remove();
}, [stopListening, stopSession]);
// Handle voice FAB press - toggle listening mode
// Must check ALL active states (listening, processing, speaking), not just isListening
const handleVoiceFABPress = useCallback(() => {
const isSessionActive = isListening || status === 'speaking' || status === 'processing';
console.log('[TabLayout] FAB pressed, isSessionActive:', isSessionActive, 'status:', status, 'isListening:', isListening);
if (isSessionActive) {
// Force-stop everything: STT, TTS, and session state
console.log('[TabLayout] Force-stopping everything');
stopListening();
stopSession();
sessionActiveRef.current = false;
shouldRestartSTTRef.current = false;
pendingInterruptTranscriptRef.current = null;
} else {
startSession();
}
}, [isListening, status, startSession, stopSession, stopListening]);
// Calculate tab bar height based on safe area
// On iOS with home indicator, insets.bottom is ~34px
// On Android with gesture navigation or software buttons (Samsung/Pixel):
// - insets.bottom should reflect the navigation bar height
// - But some devices/modes may return 0, so we add a minimum for Android
// Android minimum: 16px to ensure content doesn't touch system buttons
const androidMinPadding = Platform.OS === 'android' ? 16 : 0;
const bottomPadding = Math.max(insets.bottom, androidMinPadding, 10);
const tabBarHeight = 60 + bottomPadding; // 60px for content + safe area padding
return (
<View style={{ flex: 1 }}>
<Tabs
screenOptions={{
tabBarActiveTintColor: AppColors.primary,
tabBarInactiveTintColor: isDark ? '#9BA1A6' : '#687076',
tabBarStyle: {
backgroundColor: isDark ? '#151718' : AppColors.background,
borderTopColor: isDark ? '#2D3135' : AppColors.border,
height: tabBarHeight,
paddingBottom: bottomPadding,
paddingTop: 10,
},
tabBarLabelStyle: {
fontSize: 11,
fontWeight: '500',
},
headerShown: false,
tabBarButton: HapticTab,
}}
>
<Tabs.Screen
name="index"
options={{
title: 'Dashboard',
tabBarIcon: ({ color, size }) => (
<Feather name="grid" size={22} color={color} />
),
}}
/>
{/* Hide old dashboard - now index shows WebView dashboard */}
<Tabs.Screen
name="dashboard"
options={{
href: null,
}}
/>
{/* Chat with Julia AI */}
<Tabs.Screen
name="chat"
options={{
title: 'Julia',
tabBarIcon: ({ color, size }) => (
<Feather name="message-circle" size={22} color={color} />
),
}}
/>
{/* Voice FAB - center tab button */}
<Tabs.Screen
name="explore"
options={{
title: '',
tabBarButton: () => (
<View style={tabFABStyles.fabWrapper}>
<VoiceFAB onPress={handleVoiceFABPress} isListening={isListening || status === 'speaking' || status === 'processing'} />
</View>
),
}}
/>
{/* Voice Debug - hidden from tab bar */}
<Tabs.Screen
name="voice-debug"
options={{
href: null,
}}
/>
<Tabs.Screen
name="profile"
options={{
title: 'Profile',
tabBarIcon: ({ color, size }) => (
<Feather name="user" size={22} color={color} />
),
}}
/>
{/* Audio Debug - hidden */}
<Tabs.Screen
name="audio-debug"
options={{
href: null,
}}
/>
{/* Beneficiaries - hidden from tab bar but keeps tab bar visible */}
<Tabs.Screen
name="beneficiaries"
options={{
href: null,
}}
/>
</Tabs>
</View>
);
}
const tabFABStyles = StyleSheet.create({
fabWrapper: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
top: -20,
},
});