Sergei da2c4bebc9 Integrate voice chat with TTS and speech recognition
App screens:
- chat.tsx: Voice-enabled chat with TTS responses
- debug.tsx: TTS debugging and testing screen
- index.tsx: Updated home with voice indicators
- _layout.tsx: Added TTS and error boundaries

Config:
- app.json: Microphone permissions for voice input
- package.json: Added Sherpa ONNX dependencies
- constants/theme.ts: Voice UI colors

Features:
- Voice input via speech recognition
- TTS voice output for chat responses
- Real-time voice activity indication
- Debug screen for TTS testing
- Error boundaries for stability

User experience:
- Hands-free chat interaction
- Visual feedback during voice processing
- Graceful error handling
2026-01-14 19:09:50 -08:00

1096 lines
34 KiB
TypeScript

import React, { useState, useCallback, useRef, useEffect } from 'react';
import {
View,
Text,
StyleSheet,
FlatList,
TextInput,
TouchableOpacity,
KeyboardAvoidingView,
Platform,
Modal,
ActivityIndicator,
Keyboard,
Animated,
Alert,
Linking,
ScrollView,
} from 'react-native';
import { Ionicons } from '@expo/vector-icons';
import { SafeAreaView } from 'react-native-safe-area-context';
import * as SecureStore from 'expo-secure-store';
import { useRouter } from 'expo-router';
import { api } from '@/services/api';
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
import type { Message, Beneficiary } from '@/types';
import { useSpeechRecognition } from '@/hooks/useSpeechRecognition';
import sherpaTTS from '@/services/sherpaTTS';
import { VoiceIndicator } from '@/components/VoiceIndicator';
import { TTSErrorBoundary } from '@/components/TTSErrorBoundary';
const API_URL = 'https://eluxnetworks.net/function/well-api/api';
function ChatScreenContent() {
const router = useRouter();
const { currentBeneficiary, setCurrentBeneficiary, getBeneficiaryContext } = useBeneficiary();
const [messages, setMessages] = useState<Message[]>([
{
id: '1',
role: 'assistant',
content: 'Hello! I\'m Julia, your AI assistant. How can I help you today?',
timestamp: new Date(),
},
]);
const [input, setInput] = useState('');
const [isSending, setIsSending] = useState(false);
const flatListRef = useRef<FlatList>(null);
// Voice state
const [isSpeaking, setIsSpeaking] = useState(false);
const [ttsInitialized, setTtsInitialized] = useState(false);
const [voiceFeedback, setVoiceFeedback] = useState<string | null>(null);
const [isVoiceConversation, setIsVoiceConversation] = useState(false); // Auto-listen mode
const pulseAnim = useRef(new Animated.Value(1)).current;
// Speech recognition hook
const {
isListening,
recognizedText,
startListening,
stopListening,
isAvailable: speechRecognitionAvailable,
requestPermission,
} = useSpeechRecognition();
// Beneficiary picker state
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
const [loadingBeneficiaries, setLoadingBeneficiaries] = useState(false);
// Initialize TTS on mount
useEffect(() => {
const initTTS = async () => {
try {
const success = await sherpaTTS.initialize();
setTtsInitialized(success);
console.log('[Chat] SherpaTTS initialized:', success);
} catch (error) {
console.log('[Chat] SherpaTTS init failed, will use fallback');
}
};
initTTS();
return () => {
sherpaTTS.deinitialize();
};
}, []);
// Pulse animation for listening state
useEffect(() => {
if (isListening) {
const pulse = Animated.loop(
Animated.sequence([
Animated.timing(pulseAnim, {
toValue: 1.3,
duration: 500,
useNativeDriver: true,
}),
Animated.timing(pulseAnim, {
toValue: 1,
duration: 500,
useNativeDriver: true,
}),
])
);
pulse.start();
return () => pulse.stop();
} else {
pulseAnim.setValue(1);
}
}, [isListening, pulseAnim]);
// Track if we were just listening (to show feedback when stopped)
const wasListeningRef = useRef(false);
// Auto-send when speech recognition completes
useEffect(() => {
if (!isListening && wasListeningRef.current) {
// We just stopped listening
wasListeningRef.current = false;
if (recognizedText.trim()) {
// We have text - send it
setInput(recognizedText);
setTimeout(() => {
if (recognizedText.trim()) {
handleVoiceSend(recognizedText.trim());
}
}, 300);
} else {
// No text recognized (C4 scenario) - show brief feedback
setInput('');
setVoiceFeedback("Didn't catch that. Try again.");
// Auto-hide after 2 seconds
setTimeout(() => setVoiceFeedback(null), 2000);
}
}
if (isListening) {
wasListeningRef.current = true;
}
}, [isListening, recognizedText]);
// Auto-start listening after TTS finishes
const autoStartListening = useCallback(async () => {
if (!isVoiceConversation) return;
// IMPORTANT: Wait longer to ensure TTS audio has fully stopped
// This prevents the microphone from capturing TTS output
await new Promise(resolve => setTimeout(resolve, 800));
// Double-check we're not speaking anymore (TTS may have restarted)
const stillSpeaking = await sherpaTTS.isSpeaking().catch(() => false);
if (stillSpeaking) {
console.log('[Chat] TTS still speaking, not starting listening yet');
return;
}
const hasPermission = await requestPermission();
if (hasPermission && isVoiceConversation) {
console.log('[Chat] Auto-starting listening after TTS');
startListening({ continuous: false });
}
}, [isVoiceConversation, requestPermission, startListening]);
// TTS function - use SherpaTTS or fallback to expo-speech
const speakText = useCallback(async (text: string, shouldAutoListen = false) => {
if (isSpeaking) return;
// CRITICAL: Stop any active listening BEFORE TTS starts
// This prevents the microphone from capturing TTS audio output
if (isListening) {
console.log('[Chat] Stopping listening before TTS');
stopListening();
wasListeningRef.current = false; // Prevent auto-send of any partial text
}
setIsSpeaking(true);
if (shouldAutoListen) {
setIsVoiceConversation(true);
}
const handleDone = () => {
setIsSpeaking(false);
// Auto-start listening if in voice conversation mode
if (shouldAutoListen || isVoiceConversation) {
autoStartListening();
}
};
try {
if (ttsInitialized && sherpaTTS.isAvailable()) {
await sherpaTTS.speak(text, {
onDone: handleDone,
onError: (error) => {
console.error('[Chat] TTS speak error:', error);
setIsSpeaking(false);
},
});
} else {
console.warn('[Chat] TTS not available');
setIsSpeaking(false);
}
} catch (error) {
console.error('[Chat] TTS error:', error);
setIsSpeaking(false);
}
}, [isSpeaking, ttsInitialized, isVoiceConversation, autoStartListening]);
// Stop TTS only (without exiting voice mode)
const stopTTS = useCallback(() => {
if (ttsInitialized && sherpaTTS.isAvailable()) {
sherpaTTS.stop();
}
setIsSpeaking(false);
}, [ttsInitialized]);
// Stop TTS and exit voice conversation mode completely
const stopSpeaking = useCallback(() => {
stopTTS();
setIsVoiceConversation(false); // Exit voice mode when user stops TTS
}, [stopTTS]);
// Smart handler for VoiceIndicator tap - behavior depends on current mode
const handleVoiceIndicatorTap = useCallback(async (currentMode: 'listening' | 'speaking') => {
console.log('[Chat] VoiceIndicator tapped in mode:', currentMode);
if (currentMode === 'listening') {
// User tapped while we're recording their voice
// Action: Cancel recording and exit voice mode completely
console.log('[Chat] Cancelling listening, exiting voice mode');
stopListening();
setIsVoiceConversation(false);
wasListeningRef.current = false; // Prevent auto-send of partial text
setInput(''); // Clear any partial text
} else if (currentMode === 'speaking') {
// User tapped while AI is speaking
// Action: Interrupt AI and immediately start listening to user (like interrupting in conversation)
console.log('[Chat] Interrupting AI speech, starting to listen');
stopTTS();
// Small delay then start listening
await new Promise(resolve => setTimeout(resolve, 200));
const hasPermission = await requestPermission();
if (hasPermission) {
startListening({ continuous: false });
}
}
}, [stopListening, stopTTS, requestPermission, startListening]);
// Show permission denied alert
const showPermissionDeniedAlert = useCallback(() => {
Alert.alert(
'Microphone Access Required',
'To use voice input, please allow microphone access in Settings.',
[
{ text: 'Cancel', style: 'cancel' },
{
text: 'Open Settings',
onPress: () => Linking.openSettings(),
},
]
);
}, []);
// Handle voice input toggle
const handleVoiceToggle = useCallback(async () => {
if (isListening) {
// User tapped while listening - stop and check if we have text
stopListening();
// Note: The useEffect below handles auto-send if recognizedText exists
// If no text was recognized, it just cancels (B3 scenario)
} else {
// Stop any ongoing speech first
if (isSpeaking) {
stopSpeaking();
}
// Dismiss keyboard (E1 scenario)
Keyboard.dismiss();
// Request permission if needed
const hasPermission = await requestPermission();
if (!hasPermission) {
// Show alert with option to open settings (C1 scenario)
showPermissionDeniedAlert();
return;
}
startListening({ continuous: false });
}
}, [isListening, isSpeaking, startListening, stopListening, stopSpeaking, requestPermission, showPermissionDeniedAlert]);
// Handle sending voice message
const handleVoiceSend = useCallback(async (text: string) => {
if (!text.trim() || isSending) return;
// Mark that we're in voice conversation mode
setIsVoiceConversation(true);
const userMessage: Message = {
id: Date.now().toString(),
role: 'user',
content: text,
timestamp: new Date(),
};
setMessages((prev) => [...prev, userMessage]);
setInput('');
setIsSending(true);
try {
const aiResponse = await sendWithContext(text);
const assistantMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: aiResponse,
timestamp: new Date(),
};
setMessages((prev) => [...prev, assistantMessage]);
// Speak the response with auto-listen enabled
speakText(aiResponse, true);
} catch (error) {
const errorText = `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}`;
const errorMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: errorText,
timestamp: new Date(),
};
setMessages((prev) => [...prev, errorMessage]);
// Speak error message with auto-listen enabled
speakText(errorText, true);
} finally {
setIsSending(false);
}
}, [isSending, speakText]);
// Load beneficiaries when picker opens
const loadBeneficiaries = useCallback(async () => {
setLoadingBeneficiaries(true);
try {
const response = await api.getAllBeneficiaries();
if (response.ok && response.data) {
setBeneficiaries(response.data);
return response.data;
}
return [];
} catch (error) {
console.error('Failed to load beneficiaries:', error);
return [];
} finally {
setLoadingBeneficiaries(false);
}
}, []);
// Auto-select first beneficiary on mount if none selected
useEffect(() => {
const autoSelectBeneficiary = async () => {
if (!currentBeneficiary) {
const loaded = await loadBeneficiaries();
if (loaded.length > 0) {
setCurrentBeneficiary(loaded[0]);
console.log('Auto-selected first beneficiary:', loaded[0].name);
}
}
};
autoSelectBeneficiary();
}, []);
const openBeneficiaryPicker = useCallback(() => {
setShowBeneficiaryPicker(true);
loadBeneficiaries();
}, [loadBeneficiaries]);
const selectBeneficiary = useCallback((beneficiary: Beneficiary) => {
setCurrentBeneficiary(beneficiary);
setShowBeneficiaryPicker(false);
}, [setCurrentBeneficiary]);
// Fetch activity data for context
const getActivityContext = async (token: string, userName: string, deploymentId: string): Promise<string> => {
try {
const response = await fetch(API_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: new URLSearchParams({
function: 'activities_report_details',
user_name: userName,
token: token,
deployment_id: deploymentId,
filter: '0',
}).toString(),
});
const data = await response.json();
if (!data.chart_data || data.chart_data.length === 0) return '';
const weeklyData = data.chart_data.find((d: any) => d.name === 'Weekly');
if (!weeklyData) return '';
const lines: string[] = [];
if (data.alert_text) lines.push(`Alert status: ${data.alert_text}`);
const todayStats: string[] = [];
for (const room of weeklyData.rooms) {
const todayData = room.data[room.data.length - 1];
if (todayData && todayData.hours > 0) {
todayStats.push(`${room.name}: ${todayData.hours.toFixed(1)} hours (${todayData.events} events)`);
}
}
if (todayStats.length > 0) lines.push(`Today's activity: ${todayStats.join(', ')}`);
const weeklyStats: string[] = [];
for (const room of weeklyData.rooms) {
const totalHours = room.data.reduce((sum: number, d: any) => sum + d.hours, 0);
if (totalHours > 0) {
weeklyStats.push(`${room.name}: ${totalHours.toFixed(1)} hours total this week`);
}
}
if (weeklyStats.length > 0) lines.push(`Weekly summary: ${weeklyStats.join(', ')}`);
return lines.join('. ');
} catch (error) {
console.log('Failed to fetch activity context:', error);
return '';
}
};
// Fetch dashboard data as fallback context
const getDashboardContext = async (token: string, userName: string, deploymentId: string): Promise<string> => {
try {
const today = new Date().toISOString().split('T')[0];
const response = await fetch(API_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: new URLSearchParams({
function: 'dashboard_single',
user_name: userName,
token: token,
deployment_id: deploymentId,
date: today,
}).toString(),
});
const data = await response.json();
if (!data.result_list || data.result_list.length === 0) return '';
const info = data.result_list[0];
const lines: string[] = [];
if (info.wellness_descriptor) lines.push(`Current wellness: ${info.wellness_descriptor}`);
if (info.wellness_score_percent) lines.push(`Wellness score: ${info.wellness_score_percent}%`);
if (info.last_location) lines.push(`Last seen in: ${info.last_location}`);
if (info.last_detected_time) lines.push(`Last activity: ${info.last_detected_time}`);
if (info.sleep_hours) lines.push(`Sleep hours: ${info.sleep_hours}`);
if (info.temperature) lines.push(`Temperature: ${info.temperature}${info.units === 'F' ? '°F' : '°C'}`);
return lines.join('. ');
} catch (error) {
console.log('Failed to fetch dashboard context:', error);
return '';
}
};
// Send message with full context - fetches context in parallel for speed
const sendWithContext = async (question: string): Promise<string> => {
const token = await SecureStore.getItemAsync('accessToken');
const userName = await SecureStore.getItemAsync('userName');
if (!token || !userName) throw new Error('Please log in');
// Auto-select first beneficiary if none selected
let beneficiary = currentBeneficiary;
if (!beneficiary?.id) {
console.log('[Chat] No beneficiary selected, auto-loading first one...');
const loaded = await loadBeneficiaries();
if (loaded.length > 0) {
beneficiary = loaded[0];
setCurrentBeneficiary(beneficiary);
console.log('[Chat] Auto-selected beneficiary:', beneficiary.name);
} else {
throw new Error('No beneficiaries found. Please add one first.');
}
}
const beneficiaryName = beneficiary.name || 'the patient';
const deploymentId = beneficiary.id.toString();
// Fetch both contexts in PARALLEL for speed
const [activityContext, dashboardContext] = await Promise.all([
getActivityContext(token, userName, deploymentId),
getDashboardContext(token, userName, deploymentId),
]);
// Use activity context, fallback to dashboard
const context = activityContext || dashboardContext;
// Build the question with embedded context
let enhancedQuestion: string;
if (context) {
enhancedQuestion = `You are a caring assistant helping monitor ${beneficiaryName}'s wellbeing.
Here is the current data about ${beneficiaryName}:
${context}
Based on this data, please answer the following question: ${question}`;
} else {
enhancedQuestion = `You are a caring assistant helping monitor ${beneficiaryName}'s wellbeing. Please answer: ${question}`;
}
// Call API
const requestBody = new URLSearchParams({
function: 'voice_ask',
clientId: '001',
user_name: userName,
token: token,
question: enhancedQuestion,
deployment_id: deploymentId,
context: context || '',
}).toString();
const response = await fetch(API_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: requestBody,
});
const data = await response.json();
if (data.ok && data.response?.body) {
return data.response.body;
} else if (data.status === '401 Unauthorized') {
throw new Error('Session expired. Please log in again.');
} else {
throw new Error('Could not get response');
}
};
const handleSend = useCallback(async () => {
const trimmedInput = input.trim();
if (!trimmedInput || isSending) return;
// If no beneficiary selected, auto-selection should have happened
// but if still none, just proceed without context
if (!currentBeneficiary?.id) {
console.log('No beneficiary selected, proceeding without context');
}
const userMessage: Message = {
id: Date.now().toString(),
role: 'user',
content: trimmedInput,
timestamp: new Date(),
};
setMessages((prev) => [...prev, userMessage]);
setInput('');
setIsSending(true);
Keyboard.dismiss();
try {
const aiResponse = await sendWithContext(trimmedInput);
const assistantMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: aiResponse,
timestamp: new Date(),
};
setMessages((prev) => [...prev, assistantMessage]);
} catch (error) {
const errorMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}. Please try again.`,
timestamp: new Date(),
};
setMessages((prev) => [...prev, errorMessage]);
} finally {
setIsSending(false);
}
}, [input, isSending, currentBeneficiary]);
const renderMessage = ({ item }: { item: Message }) => {
const isUser = item.role === 'user';
return (
<View
style={[
styles.messageContainer,
isUser ? styles.userMessageContainer : styles.assistantMessageContainer,
]}
>
{!isUser && (
<View style={styles.avatarContainer}>
<Text style={styles.avatarText}>J</Text>
</View>
)}
<View
style={[
styles.messageBubble,
isUser ? styles.userBubble : styles.assistantBubble,
]}
>
<Text
style={[
styles.messageText,
isUser ? styles.userMessageText : styles.assistantMessageText,
]}
>
{item.content}
</Text>
<Text style={[styles.timestamp, isUser && styles.userTimestamp]}>
{item.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}
</Text>
</View>
</View>
);
};
return (
<SafeAreaView style={styles.container} edges={['top']}>
{/* Header */}
<View style={styles.header}>
<TouchableOpacity
style={styles.backButton}
onPress={() => router.push('/(tabs)/dashboard')}
hitSlop={{ top: 10, bottom: 10, left: 10, right: 10 }}
>
<Ionicons name="arrow-back" size={24} color={AppColors.textPrimary} />
</TouchableOpacity>
<View style={styles.headerInfo}>
<View style={styles.headerAvatar}>
<Text style={styles.headerAvatarText}>J</Text>
</View>
<View>
<Text style={styles.headerTitle}>Julia AI</Text>
<Text style={styles.headerSubtitle}>
{isSending
? 'Typing...'
: currentBeneficiary
? `About ${currentBeneficiary.name}`
: 'Online'}
</Text>
</View>
</View>
<View style={styles.headerButtons}>
<TouchableOpacity style={styles.headerButton} onPress={openBeneficiaryPicker}>
<Ionicons name="people-outline" size={24} color={AppColors.primary} />
</TouchableOpacity>
</View>
</View>
{/* Beneficiary Picker Modal */}
<Modal
visible={showBeneficiaryPicker}
transparent
animationType="slide"
onRequestClose={() => setShowBeneficiaryPicker(false)}
>
<View style={styles.modalOverlay}>
<View style={styles.modalContent}>
<View style={styles.modalHeader}>
<Text style={styles.modalTitle}>Select Beneficiary</Text>
<TouchableOpacity onPress={() => setShowBeneficiaryPicker(false)}>
<Ionicons name="close" size={24} color={AppColors.textPrimary} />
</TouchableOpacity>
</View>
{loadingBeneficiaries ? (
<View style={styles.modalLoading}>
<ActivityIndicator size="large" color={AppColors.primary} />
<Text style={styles.loadingText}>Loading beneficiaries...</Text>
</View>
) : beneficiaries.length === 0 ? (
<View style={styles.modalEmpty}>
<Text style={styles.emptyText}>No beneficiaries found</Text>
</View>
) : (
<FlatList
data={beneficiaries}
keyExtractor={(item) => item.id.toString()}
renderItem={({ item }) => (
<TouchableOpacity
style={[
styles.beneficiaryItem,
currentBeneficiary?.id === item.id && styles.beneficiaryItemSelected,
]}
onPress={() => selectBeneficiary(item)}
>
<View style={styles.beneficiaryAvatar}>
<Text style={styles.beneficiaryAvatarText}>
{item.name.split(' ').map(n => n[0]).join('').slice(0, 2)}
</Text>
</View>
<View style={styles.beneficiaryInfo}>
<Text style={styles.beneficiaryName}>{item.name}</Text>
{item.email && (
<Text style={styles.beneficiaryEmail}>{item.email}</Text>
)}
</View>
{currentBeneficiary?.id === item.id && (
<Ionicons name="checkmark-circle" size={24} color={AppColors.success} />
)}
</TouchableOpacity>
)}
style={styles.beneficiaryList}
/>
)}
</View>
</View>
</Modal>
{/* Messages */}
<KeyboardAvoidingView
style={styles.chatContainer}
behavior={Platform.OS === 'ios' ? 'padding' : undefined}
keyboardVerticalOffset={Platform.OS === 'ios' ? 90 : 0}
>
<FlatList
ref={flatListRef}
data={messages}
keyExtractor={(item) => item.id}
renderItem={renderMessage}
contentContainerStyle={styles.messagesList}
showsVerticalScrollIndicator={false}
onContentSizeChange={() => flatListRef.current?.scrollToEnd({ animated: true })}
/>
{/* Voice Feedback Text (for errors) */}
{voiceFeedback && !isListening && !isSpeaking && (
<View style={styles.voiceFeedbackContainer}>
<Text style={styles.voiceFeedbackText}>{voiceFeedback}</Text>
</View>
)}
{/* Beautiful Voice Indicator Animation */}
{(isListening || isSpeaking) && (
<VoiceIndicator
mode={isListening ? 'listening' : 'speaking'}
onTap={handleVoiceIndicatorTap}
/>
)}
{/* Input */}
<View style={styles.inputContainer}>
{/* Microphone / Stop Button */}
<Animated.View style={{ transform: [{ scale: isListening ? pulseAnim : 1 }] }}>
<TouchableOpacity
style={[
styles.micButton,
isListening && styles.micButtonActive,
isSpeaking && styles.micButtonSpeaking,
!speechRecognitionAvailable && !isSpeaking && styles.micButtonDisabled,
]}
onPress={isSpeaking ? stopSpeaking : handleVoiceToggle}
disabled={!speechRecognitionAvailable && !isSpeaking || isSending}
>
<Ionicons
name={isSpeaking ? 'stop' : isListening ? 'mic' : 'mic-outline'}
size={22}
color={
isSpeaking
? AppColors.white
: isListening
? AppColors.white
: speechRecognitionAvailable
? AppColors.primary
: AppColors.textMuted
}
/>
</TouchableOpacity>
</Animated.View>
<TextInput
style={styles.input}
placeholder={
isSpeaking
? 'AI is speaking... tap stop to interrupt'
: isListening
? 'Listening...'
: 'Type or speak...'
}
placeholderTextColor={AppColors.textMuted}
value={input}
onChangeText={setInput}
multiline
maxLength={1000}
editable={!isListening && !isSpeaking}
onSubmitEditing={handleSend}
/>
<TouchableOpacity
style={[styles.sendButton, (!input.trim() || isSending) && styles.sendButtonDisabled]}
onPress={handleSend}
disabled={!input.trim() || isSending}
>
<Ionicons
name={isSending ? 'hourglass' : 'send'}
size={20}
color={input.trim() && !isSending ? AppColors.white : AppColors.textMuted}
/>
</TouchableOpacity>
</View>
</KeyboardAvoidingView>
</SafeAreaView>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: AppColors.surface,
},
header: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'space-between',
paddingHorizontal: Spacing.md,
paddingVertical: Spacing.sm,
backgroundColor: AppColors.background,
borderBottomWidth: 1,
borderBottomColor: AppColors.border,
},
backButton: {
padding: Spacing.xs,
marginRight: Spacing.sm,
},
headerInfo: {
flexDirection: 'row',
alignItems: 'center',
},
headerAvatar: {
width: 40,
height: 40,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.success,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.sm,
},
headerAvatarText: {
fontSize: FontSizes.lg,
fontWeight: '600',
color: AppColors.white,
},
headerTitle: {
fontSize: FontSizes.lg,
fontWeight: '600',
color: AppColors.textPrimary,
},
headerSubtitle: {
fontSize: FontSizes.sm,
color: AppColors.success,
},
headerButton: {
padding: Spacing.xs,
},
chatContainer: {
flex: 1,
},
messagesList: {
padding: Spacing.md,
paddingBottom: Spacing.lg,
},
messageContainer: {
flexDirection: 'row',
marginBottom: Spacing.md,
alignItems: 'flex-end',
},
userMessageContainer: {
justifyContent: 'flex-end',
},
assistantMessageContainer: {
justifyContent: 'flex-start',
},
avatarContainer: {
width: 32,
height: 32,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.success,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.xs,
},
avatarText: {
fontSize: FontSizes.sm,
fontWeight: '600',
color: AppColors.white,
},
messageBubble: {
maxWidth: '75%',
padding: Spacing.sm + 4,
borderRadius: BorderRadius.lg,
},
userBubble: {
backgroundColor: AppColors.primary,
borderBottomRightRadius: BorderRadius.sm,
},
assistantBubble: {
backgroundColor: AppColors.background,
borderBottomLeftRadius: BorderRadius.sm,
},
messageText: {
fontSize: FontSizes.base,
lineHeight: 22,
},
userMessageText: {
color: AppColors.white,
},
assistantMessageText: {
color: AppColors.textPrimary,
},
timestamp: {
fontSize: FontSizes.xs,
color: AppColors.textMuted,
marginTop: Spacing.xs,
alignSelf: 'flex-end',
},
userTimestamp: {
color: 'rgba(255,255,255,0.7)',
},
inputContainer: {
flexDirection: 'row',
alignItems: 'flex-end',
padding: Spacing.md,
backgroundColor: AppColors.background,
borderTopWidth: 1,
borderTopColor: AppColors.border,
},
input: {
flex: 1,
backgroundColor: AppColors.surface,
borderRadius: BorderRadius.xl,
paddingHorizontal: Spacing.md,
paddingVertical: Spacing.sm,
fontSize: FontSizes.base,
color: AppColors.textPrimary,
maxHeight: 100,
marginRight: Spacing.sm,
},
sendButton: {
width: 44,
height: 44,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.primary,
justifyContent: 'center',
alignItems: 'center',
},
sendButtonDisabled: {
backgroundColor: AppColors.surface,
},
// Modal styles
modalOverlay: {
flex: 1,
backgroundColor: 'rgba(0, 0, 0, 0.5)',
justifyContent: 'flex-end',
},
modalContent: {
backgroundColor: AppColors.background,
borderTopLeftRadius: BorderRadius.xl,
borderTopRightRadius: BorderRadius.xl,
maxHeight: '70%',
paddingBottom: Spacing.xl,
},
modalHeader: {
flexDirection: 'row',
justifyContent: 'space-between',
alignItems: 'center',
padding: Spacing.md,
borderBottomWidth: 1,
borderBottomColor: AppColors.border,
},
modalTitle: {
fontSize: FontSizes.lg,
fontWeight: '600',
color: AppColors.textPrimary,
},
modalLoading: {
padding: Spacing.xl,
alignItems: 'center',
},
loadingText: {
marginTop: Spacing.md,
fontSize: FontSizes.base,
color: AppColors.textSecondary,
},
modalEmpty: {
padding: Spacing.xl,
alignItems: 'center',
},
emptyText: {
fontSize: FontSizes.base,
color: AppColors.textSecondary,
},
beneficiaryList: {
paddingHorizontal: Spacing.md,
},
beneficiaryItem: {
flexDirection: 'row',
alignItems: 'center',
padding: Spacing.md,
backgroundColor: AppColors.surface,
borderRadius: BorderRadius.md,
marginTop: Spacing.sm,
},
beneficiaryItemSelected: {
backgroundColor: AppColors.primaryLight || '#E3F2FD',
borderWidth: 1,
borderColor: AppColors.primary,
},
beneficiaryAvatar: {
width: 44,
height: 44,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.primary,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.md,
},
beneficiaryAvatarText: {
fontSize: FontSizes.base,
fontWeight: '600',
color: AppColors.white,
},
beneficiaryInfo: {
flex: 1,
},
beneficiaryName: {
fontSize: FontSizes.base,
fontWeight: '500',
color: AppColors.textPrimary,
},
beneficiaryEmail: {
fontSize: FontSizes.sm,
color: AppColors.textSecondary,
marginTop: 2,
},
// Voice UI styles
voiceFeedbackContainer: {
paddingHorizontal: Spacing.md,
paddingVertical: Spacing.sm,
backgroundColor: 'rgba(255, 152, 0, 0.1)',
borderRadius: BorderRadius.md,
marginHorizontal: Spacing.md,
marginBottom: Spacing.sm,
},
voiceFeedbackText: {
fontSize: FontSizes.sm,
color: AppColors.warning || '#FF9800',
textAlign: 'center',
},
micButton: {
width: 44,
height: 44,
borderRadius: 22,
backgroundColor: AppColors.surface,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.sm,
borderWidth: 1,
borderColor: AppColors.border,
},
micButtonActive: {
backgroundColor: AppColors.primary,
borderColor: AppColors.primary,
},
micButtonSpeaking: {
backgroundColor: AppColors.error || '#E53935',
borderColor: AppColors.error || '#E53935',
},
micButtonDisabled: {
opacity: 0.5,
},
// Header buttons (for beneficiary picker)
headerButtons: {
flexDirection: 'row',
gap: Spacing.xs,
},
});
// Wrap with TTSErrorBoundary to catch TTS crashes
export default function ChatScreen() {
return (
<TTSErrorBoundary>
<ChatScreenContent />
</TTSErrorBoundary>
);
}