import React, { useState, useCallback, useRef, useEffect } from 'react'; import { View, Text, StyleSheet, FlatList, TextInput, TouchableOpacity, KeyboardAvoidingView, Platform, Alert, ActivityIndicator, Modal, ScrollView, } from 'react-native'; import { Ionicons } from '@expo/vector-icons'; import { SafeAreaView } from 'react-native-safe-area-context'; import { useFocusEffect } from 'expo-router'; import { api } from '@/services/api'; import { useBeneficiary } from '@/contexts/BeneficiaryContext'; import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme'; import type { Message } from '@/types'; import { useTTS } from '@/hooks/useTTS'; import { AVAILABLE_VOICES, getCurrentVoice, setVoice, type PiperVoice } from '@/services/sherpaTTS'; // Try to import speech recognition if available let ExpoSpeechRecognitionModule: any = null; let useSpeechRecognitionEvent: any = null; try { const speechRecognition = require('expo-speech-recognition'); ExpoSpeechRecognitionModule = speechRecognition.ExpoSpeechRecognitionModule; useSpeechRecognitionEvent = speechRecognition.useSpeechRecognitionEvent; } catch (e) { console.log('expo-speech-recognition not available'); } export default function ChatScreen() { const { currentBeneficiary, getBeneficiaryContext } = useBeneficiary(); const [messages, setMessages] = useState([ { id: '1', role: 'assistant', content: 'Hello! I\'m Julia, your AI assistant. How can I help you today?', timestamp: new Date(), }, ]); const [input, setInput] = useState(''); const [isSending, setIsSending] = useState(false); const [isListening, setIsListening] = useState(false); const [recognizedText, setRecognizedText] = useState(''); const [showVoicePicker, setShowVoicePicker] = useState(false); const [selectedVoice, setSelectedVoice] = useState(getCurrentVoice()); const [isChangingVoice, setIsChangingVoice] = useState(false); const [voiceModeEnabled, setVoiceModeEnabled] = useState(false); // Voice Mode toggle const flatListRef = useRef(null); const lastSendTimeRef = useRef(0); const SEND_COOLDOWN_MS = 1000; // 1 second cooldown between messages // TTS hook for speaking responses const { speak, stop, isSpeaking } = useTTS(); // Stop TTS and mic when navigating away from screen useFocusEffect( useCallback(() => { // Screen focused return () => { // Screen unfocused - cleanup stop(); // Stop any playing TTS if (ExpoSpeechRecognitionModule && isListening) { ExpoSpeechRecognitionModule.stop(); setIsListening(false); } setVoiceModeEnabled(false); // Disable voice mode on leave }; }, [stop, isListening]) ); // Handle voice change const handleVoiceChange = useCallback(async (voice: PiperVoice) => { if (voice.id === selectedVoice.id) { setShowVoicePicker(false); return; } setIsChangingVoice(true); try { const success = await setVoice(voice.id); if (success) { setSelectedVoice(voice); // Test the new voice speak(`Hello, I'm ${voice.name}. How can I help you?`); } else { Alert.alert('Error', `Failed to switch to ${voice.name} voice.`); } } catch (error) { Alert.alert('Error', 'Failed to change voice.'); } finally { setIsChangingVoice(false); setShowVoicePicker(false); } }, [selectedVoice, speak]); // Speech recognition events (if available) useEffect(() => { if (!useSpeechRecognitionEvent) return; // Handle recognized speech result const resultSubscription = useSpeechRecognitionEvent('result', (event: any) => { const transcript = event.results?.[0]?.transcript || ''; setRecognizedText(transcript); if (event.isFinal) { setInput(transcript); setIsListening(false); } }); // Handle errors const errorSubscription = useSpeechRecognitionEvent('error', (event: any) => { console.log('Speech recognition error:', event.error); setIsListening(false); }); // Handle end const endSubscription = useSpeechRecognitionEvent('end', () => { setIsListening(false); }); return () => { resultSubscription?.remove?.(); errorSubscription?.remove?.(); endSubscription?.remove?.(); }; }, []); // Start voice input const startListening = useCallback(async () => { if (!ExpoSpeechRecognitionModule) { Alert.alert('Not Available', 'Voice input is not available on this device.'); return; } // PREVENT SELF-RECORDING: Don't start mic while TTS is speaking if (isSpeaking) { console.log('[Voice] Blocked: TTS is still speaking'); return; } try { const result = await ExpoSpeechRecognitionModule.requestPermissionsAsync(); if (!result.granted) { Alert.alert('Permission Denied', 'Please enable microphone access to use voice input.'); return; } // Enable voice mode when user starts listening setVoiceModeEnabled(true); setIsListening(true); setRecognizedText(''); ExpoSpeechRecognitionModule.start({ lang: 'en-US', interimResults: true, maxAlternatives: 1, }); } catch (error) { console.error('Failed to start speech recognition:', error); setIsListening(false); Alert.alert('Error', 'Failed to start voice input.'); } }, [isSpeaking]); // Stop voice input const stopListening = useCallback(() => { if (ExpoSpeechRecognitionModule) { ExpoSpeechRecognitionModule.stop(); } setIsListening(false); }, []); const handleSend = useCallback(async () => { const trimmedInput = input.trim(); if (!trimmedInput || isSending) return; // Debounce: prevent rapid-fire messages const now = Date.now(); if (now - lastSendTimeRef.current < SEND_COOLDOWN_MS) { return; } lastSendTimeRef.current = now; // Security: require beneficiary to be selected if (!currentBeneficiary?.id) { Alert.alert( 'Select Beneficiary', 'Please select a beneficiary from the Dashboard tab before starting a conversation.', [{ text: 'OK' }] ); return; } const userMessage: Message = { id: Date.now().toString(), role: 'user', content: trimmedInput, timestamp: new Date(), }; setMessages((prev) => [...prev, userMessage]); setInput(''); setIsSending(true); try { // Prepend beneficiary context to the question if available const beneficiaryContext = getBeneficiaryContext(); const questionWithContext = beneficiaryContext ? `${beneficiaryContext} ${trimmedInput}` : trimmedInput; // Pass deployment_id from selected beneficiary (required, no fallback) const deploymentId = currentBeneficiary.id.toString(); const response = await api.sendMessage(questionWithContext, deploymentId); if (response.ok && response.data?.response) { const responseText = response.data.response.body; const assistantMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: responseText, timestamp: new Date(), }; setMessages((prev) => [...prev, assistantMessage]); // Speak the response using neural TTS speak(responseText); } else { const errorMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: 'Sorry, I encountered an error. Please try again.', timestamp: new Date(), }; setMessages((prev) => [...prev, errorMessage]); } } catch (error) { const errorMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: 'Sorry, I couldn\'t connect to the server. Please check your internet connection.', timestamp: new Date(), }; setMessages((prev) => [...prev, errorMessage]); } finally { setIsSending(false); } }, [input, isSending, currentBeneficiary, getBeneficiaryContext]); const renderMessage = ({ item }: { item: Message }) => { const isUser = item.role === 'user'; return ( {!isUser && ( J )} {item.content} {item.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} ); }; return ( {/* Header */} J Julia AI {isSending ? 'Typing...' : currentBeneficiary ? `About ${currentBeneficiary.name}` : 'Online'} {/* Voice Mode Toggle */} {voiceModeEnabled && ( { stop(); stopListening(); setVoiceModeEnabled(false); }} > ON )} {/* TTS Speaking indicator */} {isSpeaking && ( )} {/* Voice Picker */} setShowVoicePicker(true)} > {/* Messages */} item.id} renderItem={renderMessage} contentContainerStyle={styles.messagesList} showsVerticalScrollIndicator={false} onContentSizeChange={() => flatListRef.current?.scrollToEnd({ animated: true })} /> {/* Listening indicator */} {isListening && ( {recognizedText || 'Listening...'} )} {/* Input */} {/* Microphone button */} {/* Voice Picker Modal */} setShowVoicePicker(false)} > Select Voice setShowVoicePicker(false)} > Neural TTS voices for Julia AI {AVAILABLE_VOICES.map((voice) => ( handleVoiceChange(voice)} disabled={isChangingVoice} > {voice.name} {voice.description} {selectedVoice.id === voice.id && ( )} {isChangingVoice && selectedVoice.id !== voice.id && ( )} ))} Tap a voice to hear a preview ); } const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: AppColors.surface, }, header: { flexDirection: 'row', alignItems: 'center', justifyContent: 'space-between', paddingHorizontal: Spacing.md, paddingVertical: Spacing.sm, backgroundColor: AppColors.background, borderBottomWidth: 1, borderBottomColor: AppColors.border, }, headerInfo: { flexDirection: 'row', alignItems: 'center', }, headerAvatar: { width: 40, height: 40, borderRadius: BorderRadius.full, backgroundColor: AppColors.success, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.sm, }, headerAvatarText: { fontSize: FontSizes.lg, fontWeight: '600', color: AppColors.white, }, headerTitle: { fontSize: FontSizes.lg, fontWeight: '600', color: AppColors.textPrimary, }, headerSubtitle: { fontSize: FontSizes.sm, color: AppColors.success, }, headerButtons: { flexDirection: 'row', alignItems: 'center', gap: 8, }, headerButton: { padding: Spacing.xs, }, voiceModeActive: { flexDirection: 'row', alignItems: 'center', backgroundColor: AppColors.primary, borderRadius: BorderRadius.md, paddingHorizontal: 10, paddingVertical: 6, gap: 4, }, voiceModeText: { color: AppColors.white, fontSize: FontSizes.xs, fontWeight: '600', }, speakingIndicator: { backgroundColor: AppColors.success, borderRadius: BorderRadius.md, padding: 6, }, chatContainer: { flex: 1, }, messagesList: { padding: Spacing.md, paddingBottom: Spacing.lg, }, messageContainer: { flexDirection: 'row', marginBottom: Spacing.md, alignItems: 'flex-end', }, userMessageContainer: { justifyContent: 'flex-end', }, assistantMessageContainer: { justifyContent: 'flex-start', }, avatarContainer: { width: 32, height: 32, borderRadius: BorderRadius.full, backgroundColor: AppColors.success, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.xs, }, avatarText: { fontSize: FontSizes.sm, fontWeight: '600', color: AppColors.white, }, messageBubble: { maxWidth: '75%', padding: Spacing.sm + 4, borderRadius: BorderRadius.lg, }, userBubble: { backgroundColor: AppColors.primary, borderBottomRightRadius: BorderRadius.sm, }, assistantBubble: { backgroundColor: AppColors.background, borderBottomLeftRadius: BorderRadius.sm, }, messageText: { fontSize: FontSizes.base, lineHeight: 22, }, userMessageText: { color: AppColors.white, }, assistantMessageText: { color: AppColors.textPrimary, }, timestamp: { fontSize: FontSizes.xs, color: AppColors.textMuted, marginTop: Spacing.xs, alignSelf: 'flex-end', }, userTimestamp: { color: 'rgba(255,255,255,0.7)', }, inputContainer: { flexDirection: 'row', alignItems: 'flex-end', padding: Spacing.md, backgroundColor: AppColors.background, borderTopWidth: 1, borderTopColor: AppColors.border, }, input: { flex: 1, backgroundColor: AppColors.surface, borderRadius: BorderRadius.xl, paddingHorizontal: Spacing.md, paddingVertical: Spacing.sm, fontSize: FontSizes.base, color: AppColors.textPrimary, maxHeight: 100, marginRight: Spacing.sm, }, sendButton: { width: 44, height: 44, borderRadius: BorderRadius.full, backgroundColor: AppColors.primary, justifyContent: 'center', alignItems: 'center', }, sendButtonDisabled: { backgroundColor: AppColors.surface, }, micButton: { width: 44, height: 44, borderRadius: BorderRadius.full, backgroundColor: AppColors.surface, justifyContent: 'center', alignItems: 'center', marginRight: Spacing.sm, borderWidth: 1, borderColor: AppColors.primary, }, micButtonActive: { backgroundColor: AppColors.primary, borderColor: AppColors.primary, }, listeningIndicator: { flexDirection: 'row', alignItems: 'center', paddingHorizontal: Spacing.md, paddingVertical: Spacing.sm, backgroundColor: AppColors.surface, borderTopWidth: 1, borderTopColor: AppColors.border, }, listeningText: { flex: 1, marginLeft: Spacing.sm, fontSize: FontSizes.sm, color: AppColors.textSecondary, fontStyle: 'italic', }, stopButton: { padding: Spacing.xs, }, // Voice Picker Modal styles modalOverlay: { flex: 1, backgroundColor: 'rgba(0, 0, 0, 0.5)', justifyContent: 'flex-end', }, modalContent: { backgroundColor: AppColors.background, borderTopLeftRadius: BorderRadius.xl, borderTopRightRadius: BorderRadius.xl, paddingTop: Spacing.lg, paddingBottom: Spacing.xl + 20, maxHeight: '60%', }, modalHeader: { flexDirection: 'row', justifyContent: 'space-between', alignItems: 'center', paddingHorizontal: Spacing.lg, marginBottom: Spacing.sm, }, modalTitle: { fontSize: FontSizes.xl, fontWeight: '600', color: AppColors.textPrimary, }, modalCloseButton: { padding: Spacing.xs, }, modalSubtitle: { fontSize: FontSizes.sm, color: AppColors.textSecondary, paddingHorizontal: Spacing.lg, marginBottom: Spacing.md, }, voiceList: { paddingHorizontal: Spacing.lg, }, voiceItem: { flexDirection: 'row', alignItems: 'center', padding: Spacing.md, backgroundColor: AppColors.surface, borderRadius: BorderRadius.lg, marginBottom: Spacing.sm, borderWidth: 2, borderColor: 'transparent', }, voiceItemSelected: { borderColor: AppColors.primary, backgroundColor: `${AppColors.primary}15`, }, voiceIcon: { width: 48, height: 48, borderRadius: BorderRadius.full, backgroundColor: AppColors.primary + '20', justifyContent: 'center', alignItems: 'center', marginRight: Spacing.md, }, voiceInfo: { flex: 1, }, voiceName: { fontSize: FontSizes.base, fontWeight: '600', color: AppColors.textPrimary, marginBottom: 2, }, voiceNameSelected: { color: AppColors.primary, }, voiceDescription: { fontSize: FontSizes.sm, color: AppColors.textSecondary, }, voiceDescriptionSelected: { color: AppColors.primary, }, voiceHint: { fontSize: FontSizes.xs, color: AppColors.textSecondary, marginTop: 4, }, });