import React, { useState, useCallback, useRef, useEffect } from 'react'; import { View, Text, StyleSheet, FlatList, TextInput, TouchableOpacity, KeyboardAvoidingView, Platform, Modal, ActivityIndicator, Keyboard, Animated, Alert, Linking, ScrollView, } from 'react-native'; import { Ionicons } from '@expo/vector-icons'; import { SafeAreaView } from 'react-native-safe-area-context'; import * as SecureStore from 'expo-secure-store'; import { useRouter } from 'expo-router'; import { api } from '@/services/api'; import { useBeneficiary } from '@/contexts/BeneficiaryContext'; import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme'; import type { Message, Beneficiary } from '@/types'; import { useSpeechRecognition } from '@/hooks/useSpeechRecognition'; import sherpaTTS from '@/services/sherpaTTS'; import { VoiceIndicator } from '@/components/VoiceIndicator'; import { TTSErrorBoundary } from '@/components/TTSErrorBoundary'; const API_URL = 'https://eluxnetworks.net/function/well-api/api'; function ChatScreenContent() { const router = useRouter(); const { currentBeneficiary, setCurrentBeneficiary, getBeneficiaryContext } = useBeneficiary(); const [messages, setMessages] = useState([ { id: '1', role: 'assistant', content: 'Hello! I\'m Julia, your AI assistant. How can I help you today?', timestamp: new Date(), }, ]); const [input, setInput] = useState(''); const [isSending, setIsSending] = useState(false); const flatListRef = useRef(null); // Voice state const [isSpeaking, setIsSpeaking] = useState(false); const [ttsInitialized, setTtsInitialized] = useState(false); const [voiceFeedback, setVoiceFeedback] = useState(null); const [isVoiceConversation, setIsVoiceConversation] = useState(false); // Auto-listen mode const pulseAnim = useRef(new Animated.Value(1)).current; // Speech recognition hook const { isListening, recognizedText, startListening, stopListening, isAvailable: speechRecognitionAvailable, requestPermission, } = useSpeechRecognition(); // Beneficiary picker state const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false); const [beneficiaries, setBeneficiaries] = useState([]); const [loadingBeneficiaries, setLoadingBeneficiaries] = useState(false); // Initialize TTS on mount useEffect(() => { const initTTS = async () => { try { const success = await sherpaTTS.initialize(); setTtsInitialized(success); console.log('[Chat] SherpaTTS initialized:', success); } catch (error) { console.log('[Chat] SherpaTTS init failed, will use fallback'); } }; initTTS(); return () => { sherpaTTS.deinitialize(); }; }, []); // Pulse animation for listening state useEffect(() => { if (isListening) { const pulse = Animated.loop( Animated.sequence([ Animated.timing(pulseAnim, { toValue: 1.3, duration: 500, useNativeDriver: true, }), Animated.timing(pulseAnim, { toValue: 1, duration: 500, useNativeDriver: true, }), ]) ); pulse.start(); return () => pulse.stop(); } else { pulseAnim.setValue(1); } }, [isListening, pulseAnim]); // Track if we were just listening (to show feedback when stopped) const wasListeningRef = useRef(false); // Auto-send when speech recognition completes useEffect(() => { if (!isListening && wasListeningRef.current) { // We just stopped listening wasListeningRef.current = false; if (recognizedText.trim()) { // We have text - send it setInput(recognizedText); setTimeout(() => { if (recognizedText.trim()) { handleVoiceSend(recognizedText.trim()); } }, 300); } else { // No text recognized (C4 scenario) - show brief feedback setInput(''); setVoiceFeedback("Didn't catch that. Try again."); // Auto-hide after 2 seconds setTimeout(() => setVoiceFeedback(null), 2000); } } if (isListening) { wasListeningRef.current = true; } }, [isListening, recognizedText]); // Auto-start listening after TTS finishes const autoStartListening = useCallback(async () => { if (!isVoiceConversation) return; // IMPORTANT: Wait longer to ensure TTS audio has fully stopped // This prevents the microphone from capturing TTS output await new Promise(resolve => setTimeout(resolve, 800)); // Double-check we're not speaking anymore (TTS may have restarted) const stillSpeaking = await sherpaTTS.isSpeaking().catch(() => false); if (stillSpeaking) { console.log('[Chat] TTS still speaking, not starting listening yet'); return; } const hasPermission = await requestPermission(); if (hasPermission && isVoiceConversation) { console.log('[Chat] Auto-starting listening after TTS'); startListening({ continuous: false }); } }, [isVoiceConversation, requestPermission, startListening]); // TTS function - use SherpaTTS or fallback to expo-speech const speakText = useCallback(async (text: string, shouldAutoListen = false) => { if (isSpeaking) return; // CRITICAL: Stop any active listening BEFORE TTS starts // This prevents the microphone from capturing TTS audio output if (isListening) { console.log('[Chat] Stopping listening before TTS'); stopListening(); wasListeningRef.current = false; // Prevent auto-send of any partial text } setIsSpeaking(true); if (shouldAutoListen) { setIsVoiceConversation(true); } const handleDone = () => { setIsSpeaking(false); // Auto-start listening if in voice conversation mode if (shouldAutoListen || isVoiceConversation) { autoStartListening(); } }; try { if (ttsInitialized && sherpaTTS.isAvailable()) { await sherpaTTS.speak(text, { onDone: handleDone, onError: (error) => { console.error('[Chat] TTS speak error:', error); setIsSpeaking(false); }, }); } else { console.warn('[Chat] TTS not available'); setIsSpeaking(false); } } catch (error) { console.error('[Chat] TTS error:', error); setIsSpeaking(false); } }, [isSpeaking, ttsInitialized, isVoiceConversation, autoStartListening]); // Stop TTS only (without exiting voice mode) const stopTTS = useCallback(() => { if (ttsInitialized && sherpaTTS.isAvailable()) { sherpaTTS.stop(); } setIsSpeaking(false); }, [ttsInitialized]); // Stop TTS and exit voice conversation mode completely const stopSpeaking = useCallback(() => { stopTTS(); setIsVoiceConversation(false); // Exit voice mode when user stops TTS }, [stopTTS]); // Smart handler for VoiceIndicator tap - behavior depends on current mode const handleVoiceIndicatorTap = useCallback(async (currentMode: 'listening' | 'speaking') => { console.log('[Chat] VoiceIndicator tapped in mode:', currentMode); if (currentMode === 'listening') { // User tapped while we're recording their voice // Action: Cancel recording and exit voice mode completely console.log('[Chat] Cancelling listening, exiting voice mode'); stopListening(); setIsVoiceConversation(false); wasListeningRef.current = false; // Prevent auto-send of partial text setInput(''); // Clear any partial text } else if (currentMode === 'speaking') { // User tapped while AI is speaking // Action: Interrupt AI and immediately start listening to user (like interrupting in conversation) console.log('[Chat] Interrupting AI speech, starting to listen'); stopTTS(); // Small delay then start listening await new Promise(resolve => setTimeout(resolve, 200)); const hasPermission = await requestPermission(); if (hasPermission) { startListening({ continuous: false }); } } }, [stopListening, stopTTS, requestPermission, startListening]); // Show permission denied alert const showPermissionDeniedAlert = useCallback(() => { Alert.alert( 'Microphone Access Required', 'To use voice input, please allow microphone access in Settings.', [ { text: 'Cancel', style: 'cancel' }, { text: 'Open Settings', onPress: () => Linking.openSettings(), }, ] ); }, []); // Handle voice input toggle const handleVoiceToggle = useCallback(async () => { if (isListening) { // User tapped while listening - stop and check if we have text stopListening(); // Note: The useEffect below handles auto-send if recognizedText exists // If no text was recognized, it just cancels (B3 scenario) } else { // Stop any ongoing speech first if (isSpeaking) { stopSpeaking(); } // Dismiss keyboard (E1 scenario) Keyboard.dismiss(); // Request permission if needed const hasPermission = await requestPermission(); if (!hasPermission) { // Show alert with option to open settings (C1 scenario) showPermissionDeniedAlert(); return; } startListening({ continuous: false }); } }, [isListening, isSpeaking, startListening, stopListening, stopSpeaking, requestPermission, showPermissionDeniedAlert]); // Handle sending voice message const handleVoiceSend = useCallback(async (text: string) => { if (!text.trim() || isSending) return; // Mark that we're in voice conversation mode setIsVoiceConversation(true); const userMessage: Message = { id: Date.now().toString(), role: 'user', content: text, timestamp: new Date(), }; setMessages((prev) => [...prev, userMessage]); setInput(''); setIsSending(true); try { const aiResponse = await sendWithContext(text); const assistantMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: aiResponse, timestamp: new Date(), }; setMessages((prev) => [...prev, assistantMessage]); // Speak the response with auto-listen enabled speakText(aiResponse, true); } catch (error) { const errorText = `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}`; const errorMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: errorText, timestamp: new Date(), }; setMessages((prev) => [...prev, errorMessage]); // Speak error message with auto-listen enabled speakText(errorText, true); } finally { setIsSending(false); } }, [isSending, speakText]); // Load beneficiaries when picker opens const loadBeneficiaries = useCallback(async () => { setLoadingBeneficiaries(true); try { const response = await api.getAllBeneficiaries(); if (response.ok && response.data) { setBeneficiaries(response.data); return response.data; } return []; } catch (error) { console.error('Failed to load beneficiaries:', error); return []; } finally { setLoadingBeneficiaries(false); } }, []); // Auto-select first beneficiary on mount if none selected useEffect(() => { const autoSelectBeneficiary = async () => { if (!currentBeneficiary) { const loaded = await loadBeneficiaries(); if (loaded.length > 0) { setCurrentBeneficiary(loaded[0]); console.log('Auto-selected first beneficiary:', loaded[0].name); } } }; autoSelectBeneficiary(); }, []); const openBeneficiaryPicker = useCallback(() => { setShowBeneficiaryPicker(true); loadBeneficiaries(); }, [loadBeneficiaries]); const selectBeneficiary = useCallback((beneficiary: Beneficiary) => { setCurrentBeneficiary(beneficiary); setShowBeneficiaryPicker(false); }, [setCurrentBeneficiary]); // Fetch activity data for context const getActivityContext = async (token: string, userName: string, deploymentId: string): Promise => { try { const response = await fetch(API_URL, { method: 'POST', headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, body: new URLSearchParams({ function: 'activities_report_details', user_name: userName, token: token, deployment_id: deploymentId, filter: '0', }).toString(), }); const data = await response.json(); if (!data.chart_data || data.chart_data.length === 0) return ''; const weeklyData = data.chart_data.find((d: any) => d.name === 'Weekly'); if (!weeklyData) return ''; const lines: string[] = []; if (data.alert_text) lines.push(`Alert status: ${data.alert_text}`); const todayStats: string[] = []; for (const room of weeklyData.rooms) { const todayData = room.data[room.data.length - 1]; if (todayData && todayData.hours > 0) { todayStats.push(`${room.name}: ${todayData.hours.toFixed(1)} hours (${todayData.events} events)`); } } if (todayStats.length > 0) lines.push(`Today's activity: ${todayStats.join(', ')}`); const weeklyStats: string[] = []; for (const room of weeklyData.rooms) { const totalHours = room.data.reduce((sum: number, d: any) => sum + d.hours, 0); if (totalHours > 0) { weeklyStats.push(`${room.name}: ${totalHours.toFixed(1)} hours total this week`); } } if (weeklyStats.length > 0) lines.push(`Weekly summary: ${weeklyStats.join(', ')}`); return lines.join('. '); } catch (error) { console.log('Failed to fetch activity context:', error); return ''; } }; // Fetch dashboard data as fallback context const getDashboardContext = async (token: string, userName: string, deploymentId: string): Promise => { try { const today = new Date().toISOString().split('T')[0]; const response = await fetch(API_URL, { method: 'POST', headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, body: new URLSearchParams({ function: 'dashboard_single', user_name: userName, token: token, deployment_id: deploymentId, date: today, }).toString(), }); const data = await response.json(); if (!data.result_list || data.result_list.length === 0) return ''; const info = data.result_list[0]; const lines: string[] = []; if (info.wellness_descriptor) lines.push(`Current wellness: ${info.wellness_descriptor}`); if (info.wellness_score_percent) lines.push(`Wellness score: ${info.wellness_score_percent}%`); if (info.last_location) lines.push(`Last seen in: ${info.last_location}`); if (info.last_detected_time) lines.push(`Last activity: ${info.last_detected_time}`); if (info.sleep_hours) lines.push(`Sleep hours: ${info.sleep_hours}`); if (info.temperature) lines.push(`Temperature: ${info.temperature}${info.units === 'F' ? '°F' : '°C'}`); return lines.join('. '); } catch (error) { console.log('Failed to fetch dashboard context:', error); return ''; } }; // Send message with full context - fetches context in parallel for speed const sendWithContext = async (question: string): Promise => { const token = await SecureStore.getItemAsync('accessToken'); const userName = await SecureStore.getItemAsync('userName'); if (!token || !userName) throw new Error('Please log in'); // Auto-select first beneficiary if none selected let beneficiary = currentBeneficiary; if (!beneficiary?.id) { console.log('[Chat] No beneficiary selected, auto-loading first one...'); const loaded = await loadBeneficiaries(); if (loaded.length > 0) { beneficiary = loaded[0]; setCurrentBeneficiary(beneficiary); console.log('[Chat] Auto-selected beneficiary:', beneficiary.name); } else { throw new Error('No beneficiaries found. Please add one first.'); } } const beneficiaryName = beneficiary.name || 'the patient'; const deploymentId = beneficiary.id.toString(); // Fetch both contexts in PARALLEL for speed const [activityContext, dashboardContext] = await Promise.all([ getActivityContext(token, userName, deploymentId), getDashboardContext(token, userName, deploymentId), ]); // Use activity context, fallback to dashboard const context = activityContext || dashboardContext; // Build the question with embedded context let enhancedQuestion: string; if (context) { enhancedQuestion = `You are a caring assistant helping monitor ${beneficiaryName}'s wellbeing. Here is the current data about ${beneficiaryName}: ${context} Based on this data, please answer the following question: ${question}`; } else { enhancedQuestion = `You are a caring assistant helping monitor ${beneficiaryName}'s wellbeing. Please answer: ${question}`; } // Call API const requestBody = new URLSearchParams({ function: 'voice_ask', clientId: '001', user_name: userName, token: token, question: enhancedQuestion, deployment_id: deploymentId, context: context || '', }).toString(); const response = await fetch(API_URL, { method: 'POST', headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, body: requestBody, }); const data = await response.json(); if (data.ok && data.response?.body) { return data.response.body; } else if (data.status === '401 Unauthorized') { throw new Error('Session expired. Please log in again.'); } else { throw new Error('Could not get response'); } }; const handleSend = useCallback(async () => { const trimmedInput = input.trim(); if (!trimmedInput || isSending) return; // If no beneficiary selected, auto-selection should have happened // but if still none, just proceed without context if (!currentBeneficiary?.id) { console.log('No beneficiary selected, proceeding without context'); } const userMessage: Message = { id: Date.now().toString(), role: 'user', content: trimmedInput, timestamp: new Date(), }; setMessages((prev) => [...prev, userMessage]); setInput(''); setIsSending(true); Keyboard.dismiss(); try { const aiResponse = await sendWithContext(trimmedInput); const assistantMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: aiResponse, timestamp: new Date(), }; setMessages((prev) => [...prev, assistantMessage]); } catch (error) { const errorMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}. Please try again.`, timestamp: new Date(), }; setMessages((prev) => [...prev, errorMessage]); } finally { setIsSending(false); } }, [input, isSending, currentBeneficiary]); const renderMessage = ({ item }: { item: Message }) => { const isUser = item.role === 'user'; return ( {!isUser && ( J )} {item.content} {item.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} ); }; return ( {/* Header */} router.push('/(tabs)/dashboard')} hitSlop={{ top: 10, bottom: 10, left: 10, right: 10 }} > J Julia AI {isSending ? 'Typing...' : currentBeneficiary ? `About ${currentBeneficiary.name}` : 'Online'} {/* Beneficiary Picker Modal */} setShowBeneficiaryPicker(false)} > Select Beneficiary setShowBeneficiaryPicker(false)}> {loadingBeneficiaries ? ( Loading beneficiaries... ) : beneficiaries.length === 0 ? ( No beneficiaries found ) : ( item.id.toString()} renderItem={({ item }) => ( selectBeneficiary(item)} > {item.name.split(' ').map(n => n[0]).join('').slice(0, 2)} {item.name} {item.email && ( {item.email} )} {currentBeneficiary?.id === item.id && ( )} )} style={styles.beneficiaryList} /> )} {/* Messages */} item.id} renderItem={renderMessage} contentContainerStyle={styles.messagesList} showsVerticalScrollIndicator={false} onContentSizeChange={() => flatListRef.current?.scrollToEnd({ animated: true })} /> {/* Voice Feedback Text (for errors) */} {voiceFeedback && !isListening && !isSpeaking && ( {voiceFeedback} )} {/* Beautiful Voice Indicator Animation */} {(isListening || isSpeaking) && ( )} {/* Input */} {/* Microphone / Stop Button */} ); } const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: AppColors.surface, }, header: { flexDirection: 'row', alignItems: 'center', justifyContent: 'space-between', paddingHorizontal: Spacing.md, paddingVertical: Spacing.sm, backgroundColor: AppColors.background, borderBottomWidth: 1, borderBottomColor: AppColors.border, }, backButton: { padding: Spacing.xs, marginRight: Spacing.sm, }, headerInfo: { flexDirection: 'row', alignItems: 'center', }, headerAvatar: { width: 40, height: 40, borderRadius: BorderRadius.full, backgroundColor: AppColors.success, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.sm, }, headerAvatarText: { fontSize: FontSizes.lg, fontWeight: '600', color: AppColors.white, }, headerTitle: { fontSize: FontSizes.lg, fontWeight: '600', color: AppColors.textPrimary, }, headerSubtitle: { fontSize: FontSizes.sm, color: AppColors.success, }, headerButton: { padding: Spacing.xs, }, chatContainer: { flex: 1, }, messagesList: { padding: Spacing.md, paddingBottom: Spacing.lg, }, messageContainer: { flexDirection: 'row', marginBottom: Spacing.md, alignItems: 'flex-end', }, userMessageContainer: { justifyContent: 'flex-end', }, assistantMessageContainer: { justifyContent: 'flex-start', }, avatarContainer: { width: 32, height: 32, borderRadius: BorderRadius.full, backgroundColor: AppColors.success, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.xs, }, avatarText: { fontSize: FontSizes.sm, fontWeight: '600', color: AppColors.white, }, messageBubble: { maxWidth: '75%', padding: Spacing.sm + 4, borderRadius: BorderRadius.lg, }, userBubble: { backgroundColor: AppColors.primary, borderBottomRightRadius: BorderRadius.sm, }, assistantBubble: { backgroundColor: AppColors.background, borderBottomLeftRadius: BorderRadius.sm, }, messageText: { fontSize: FontSizes.base, lineHeight: 22, }, userMessageText: { color: AppColors.white, }, assistantMessageText: { color: AppColors.textPrimary, }, timestamp: { fontSize: FontSizes.xs, color: AppColors.textMuted, marginTop: Spacing.xs, alignSelf: 'flex-end', }, userTimestamp: { color: 'rgba(255,255,255,0.7)', }, inputContainer: { flexDirection: 'row', alignItems: 'flex-end', padding: Spacing.md, backgroundColor: AppColors.background, borderTopWidth: 1, borderTopColor: AppColors.border, }, input: { flex: 1, backgroundColor: AppColors.surface, borderRadius: BorderRadius.xl, paddingHorizontal: Spacing.md, paddingVertical: Spacing.sm, fontSize: FontSizes.base, color: AppColors.textPrimary, maxHeight: 100, marginRight: Spacing.sm, }, sendButton: { width: 44, height: 44, borderRadius: BorderRadius.full, backgroundColor: AppColors.primary, justifyContent: 'center', alignItems: 'center', }, sendButtonDisabled: { backgroundColor: AppColors.surface, }, // Modal styles modalOverlay: { flex: 1, backgroundColor: 'rgba(0, 0, 0, 0.5)', justifyContent: 'flex-end', }, modalContent: { backgroundColor: AppColors.background, borderTopLeftRadius: BorderRadius.xl, borderTopRightRadius: BorderRadius.xl, maxHeight: '70%', paddingBottom: Spacing.xl, }, modalHeader: { flexDirection: 'row', justifyContent: 'space-between', alignItems: 'center', padding: Spacing.md, borderBottomWidth: 1, borderBottomColor: AppColors.border, }, modalTitle: { fontSize: FontSizes.lg, fontWeight: '600', color: AppColors.textPrimary, }, modalLoading: { padding: Spacing.xl, alignItems: 'center', }, loadingText: { marginTop: Spacing.md, fontSize: FontSizes.base, color: AppColors.textSecondary, }, modalEmpty: { padding: Spacing.xl, alignItems: 'center', }, emptyText: { fontSize: FontSizes.base, color: AppColors.textSecondary, }, beneficiaryList: { paddingHorizontal: Spacing.md, }, beneficiaryItem: { flexDirection: 'row', alignItems: 'center', padding: Spacing.md, backgroundColor: AppColors.surface, borderRadius: BorderRadius.md, marginTop: Spacing.sm, }, beneficiaryItemSelected: { backgroundColor: AppColors.primaryLight || '#E3F2FD', borderWidth: 1, borderColor: AppColors.primary, }, beneficiaryAvatar: { width: 44, height: 44, borderRadius: BorderRadius.full, backgroundColor: AppColors.primary, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.md, }, beneficiaryAvatarText: { fontSize: FontSizes.base, fontWeight: '600', color: AppColors.white, }, beneficiaryInfo: { flex: 1, }, beneficiaryName: { fontSize: FontSizes.base, fontWeight: '500', color: AppColors.textPrimary, }, beneficiaryEmail: { fontSize: FontSizes.sm, color: AppColors.textSecondary, marginTop: 2, }, // Voice UI styles voiceFeedbackContainer: { paddingHorizontal: Spacing.md, paddingVertical: Spacing.sm, backgroundColor: 'rgba(255, 152, 0, 0.1)', borderRadius: BorderRadius.md, marginHorizontal: Spacing.md, marginBottom: Spacing.sm, }, voiceFeedbackText: { fontSize: FontSizes.sm, color: AppColors.warning || '#FF9800', textAlign: 'center', }, micButton: { width: 44, height: 44, borderRadius: 22, backgroundColor: AppColors.surface, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.sm, borderWidth: 1, borderColor: AppColors.border, }, micButtonActive: { backgroundColor: AppColors.primary, borderColor: AppColors.primary, }, micButtonSpeaking: { backgroundColor: AppColors.error || '#E53935', borderColor: AppColors.error || '#E53935', }, micButtonDisabled: { opacity: 0.5, }, // Header buttons (for beneficiary picker) headerButtons: { flexDirection: 'row', gap: Spacing.xs, }, }); // Wrap with TTSErrorBoundary to catch TTS crashes export default function ChatScreen() { return ( ); }