Sergei aec300bd98 feat: Add floating bubble during voice calls
- Create VoiceCallContext for global voice call state management
- Add FloatingCallBubble component with drag support
- Add minimize button to voice call screen
- Show bubble when call is minimized, tap to return to call
- Button shows active call state with green color

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-24 20:39:27 -08:00

1186 lines
36 KiB
TypeScript

/**
* Chat Screen - Text Chat with Julia AI
*
* Clean text chat interface with integrated voice calls.
*/
import React, { useState, useCallback, useRef, useEffect } from 'react';
import {
View,
Text,
StyleSheet,
FlatList,
TextInput,
TouchableOpacity,
Modal,
ActivityIndicator,
Keyboard,
Platform,
Alert,
} from 'react-native';
import { KeyboardAvoidingView } from 'react-native-keyboard-controller';
import { Ionicons } from '@expo/vector-icons';
import { SafeAreaView } from 'react-native-safe-area-context';
import { useRouter } from 'expo-router';
import { activateKeepAwakeAsync, deactivateKeepAwake } from 'expo-keep-awake';
import { api } from '@/services/api';
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
import { useVoiceCall } from '@/contexts/VoiceCallContext';
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
import type { Message, Beneficiary } from '@/types';
// LiveKit imports
import {
registerGlobals,
LiveKitRoom,
useVoiceAssistant,
useConnectionState,
useRoomContext,
BarVisualizer,
useTrackTranscription,
useTracks,
} from '@livekit/react-native';
import { ConnectionState, RoomEvent, Track, TranscriptionSegment } from 'livekit-client';
import { getToken, type BeneficiaryData } from '@/services/livekitService';
import { useAuth } from '@/contexts/AuthContext';
// Register LiveKit globals (must be called before using LiveKit)
registerGlobals();
const API_URL = 'https://eluxnetworks.net/function/well-api/api';
// WellNuo API credentials (same as julia-agent)
const WELLNUO_USER = 'anandk';
const WELLNUO_PASSWORD = 'anandk_8';
// ============================================================================
// SINGLE_DEPLOYMENT_MODE
// When true: sends only deployment_id (no beneficiary_names_dict)
// When false: sends both deployment_id AND beneficiary_names_dict
//
// Use true for WellNuo Lite (single beneficiary per user)
// Use false for full WellNuo app (multiple beneficiaries)
// ============================================================================
const SINGLE_DEPLOYMENT_MODE = true;
// Keywords for question normalization (same as julia-agent/julia-ai/src/agent.py)
const STATUS_KEYWORDS = [
/\bhow\s+is\b/i,
/\bhow'?s\b/i,
/\bhow\s+are\b/i,
/\btell\s+me\s+about\b/i,
/\bwhat'?s\s+up\s+with\b/i,
/\bupdate\s+on\b/i,
/\bstatus\b/i,
/\bdoing\b/i,
/\bfeeling\b/i,
/\bcheck\s+on\b/i,
/\bis\s+\w+\s+okay\b/i,
/\bis\s+\w+\s+alright\b/i,
/\bis\s+\w+\s+fine\b/i,
/\bokay\?\b/i,
/\balright\?\b/i,
];
const SUBJECT_KEYWORDS = [
/\bdad\b/i,
/\bfather\b/i,
/\bferdinand\b/i,
/\bhim\b/i,
/\bhe\b/i,
/\bmy\s+dad\b/i,
/\bmy\s+father\b/i,
/\bthe\s+patient\b/i,
/\bloved\s+one\b/i,
/\bparent\b/i,
/\bgrandpa\b/i,
/\bgrandfather\b/i,
];
/**
* Transform user questions into format WellNuo API understands.
* WellNuo API only responds with real sensor data for very specific phrases.
* This function maps common user questions to those phrases.
* (Same logic as julia-agent/julia-ai/src/agent.py normalize_question)
*/
function normalizeQuestion(userMessage: string): string {
const msgLower = userMessage.toLowerCase().trim();
const isStatusQuery = STATUS_KEYWORDS.some(pattern => pattern.test(msgLower));
const isAboutRecipient = SUBJECT_KEYWORDS.some(pattern => pattern.test(msgLower));
// If asking about the care recipient's general status
if (isStatusQuery && isAboutRecipient) {
console.log(`[Chat] Normalized '${userMessage}' -> 'how is dad doing'`);
return 'how is dad doing';
}
// Generic status questions without clear subject - assume they mean the care recipient
if (isStatusQuery && !isAboutRecipient) {
console.log(`[Chat] Normalized '${userMessage}' -> 'how is dad doing' (assumed recipient)`);
return 'how is dad doing';
}
// If no transformation needed, return original
console.log(`[Chat] No normalization applied to: '${userMessage}'`);
return userMessage;
}
// ============================================================================
// Voice Call Overlay Component
// ============================================================================
interface VoiceCallOverlayProps {
onHangUp: () => void;
onMinimize: () => void;
onTranscript: (role: 'user' | 'assistant', text: string) => void;
onDurationUpdate: (seconds: number) => void;
beneficiaryName?: string;
}
function VoiceCallContent({ onHangUp, onMinimize, onTranscript, onDurationUpdate, beneficiaryName }: VoiceCallOverlayProps) {
const room = useRoomContext();
const connectionState = useConnectionState();
const { state: agentState, audioTrack } = useVoiceAssistant();
const [callDuration, setCallDuration] = useState(0);
const [lastProcessedId, setLastProcessedId] = useState<string | null>(null);
// Track all audio tracks for transcription
const tracks = useTracks([Track.Source.Microphone], { onlySubscribed: true });
// Get transcription from agent's audio track
const { segments: agentSegments } = useTrackTranscription(audioTrack);
// Get transcription from user's microphone
const localTrack = tracks.find(t => t.participant?.isLocal);
const { segments: userSegments } = useTrackTranscription(localTrack);
// Process agent transcription
useEffect(() => {
if (agentSegments && agentSegments.length > 0) {
const lastSegment = agentSegments[agentSegments.length - 1];
if (lastSegment && lastSegment.final && lastSegment.id !== lastProcessedId) {
setLastProcessedId(lastSegment.id);
onTranscript('assistant', lastSegment.text);
console.log('[VoiceCall] Agent said:', lastSegment.text);
}
}
}, [agentSegments, lastProcessedId, onTranscript]);
// Process user transcription
const [lastUserSegmentId, setLastUserSegmentId] = useState<string | null>(null);
useEffect(() => {
if (userSegments && userSegments.length > 0) {
const lastSegment = userSegments[userSegments.length - 1];
if (lastSegment && lastSegment.final && lastSegment.id !== lastUserSegmentId) {
setLastUserSegmentId(lastSegment.id);
onTranscript('user', lastSegment.text);
console.log('[VoiceCall] User said:', lastSegment.text);
}
}
}, [userSegments, lastUserSegmentId, onTranscript]);
// Call duration timer
useEffect(() => {
if (connectionState === ConnectionState.Connected) {
const interval = setInterval(() => {
setCallDuration(prev => {
const newDuration = prev + 1;
onDurationUpdate(newDuration);
return newDuration;
});
}, 1000);
return () => clearInterval(interval);
}
}, [connectionState, onDurationUpdate]);
// Keep screen awake during call
useEffect(() => {
activateKeepAwakeAsync('voice-call');
return () => {
deactivateKeepAwake('voice-call');
};
}, []);
// Format duration as mm:ss
const formatDuration = (seconds: number) => {
const mins = Math.floor(seconds / 60);
const secs = seconds % 60;
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
};
// Get status text based on agent state
const getStatusText = () => {
if (connectionState === ConnectionState.Connecting) return 'Connecting...';
if (connectionState === ConnectionState.Reconnecting) return 'Reconnecting...';
if (connectionState !== ConnectionState.Connected) return 'Disconnected';
switch (agentState) {
case 'listening': return 'Listening...';
case 'thinking': return 'Thinking...';
case 'speaking': return 'Speaking...';
case 'connecting': return 'Connecting to Julia...';
case 'initializing': return 'Starting...';
default: return 'Connected';
}
};
return (
<View style={voiceStyles.container}>
<View style={voiceStyles.content}>
{/* Avatar */}
<View style={voiceStyles.avatarContainer}>
<View style={[
voiceStyles.avatar,
agentState === 'speaking' && voiceStyles.avatarSpeaking,
]}>
<Text style={voiceStyles.avatarText}>J</Text>
</View>
{agentState === 'speaking' && (
<View style={voiceStyles.speakingRing} />
)}
</View>
{/* Name and status */}
<Text style={voiceStyles.name}>Julia AI</Text>
{beneficiaryName && (
<Text style={voiceStyles.beneficiary}>About {beneficiaryName}</Text>
)}
<Text style={voiceStyles.status}>{getStatusText()}</Text>
{/* Duration */}
{connectionState === ConnectionState.Connected && (
<Text style={voiceStyles.duration}>{formatDuration(callDuration)}</Text>
)}
{/* Audio Visualizer */}
{audioTrack && agentState === 'speaking' && (
<View style={voiceStyles.visualizerContainer}>
<BarVisualizer
trackRef={{ participant: audioTrack.participant, source: Track.Source.Microphone, publication: audioTrack.publication }}
barCount={5}
options={{ minHeight: 10 }}
/>
</View>
)}
</View>
{/* Call controls */}
<View style={voiceStyles.callControls}>
{/* Minimize button */}
<TouchableOpacity style={voiceStyles.minimizeButton} onPress={onMinimize}>
<Ionicons name="chevron-down" size={28} color={AppColors.white} />
</TouchableOpacity>
{/* Hang up button */}
<TouchableOpacity style={voiceStyles.hangUpButton} onPress={onHangUp}>
<Ionicons name="call" size={32} color={AppColors.white} style={{ transform: [{ rotate: '135deg' }] }} />
</TouchableOpacity>
{/* Placeholder for symmetry */}
<View style={voiceStyles.controlPlaceholder} />
</View>
</View>
);
}
const voiceStyles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'rgba(0, 0, 0, 0.95)',
justifyContent: 'space-between',
alignItems: 'center',
paddingVertical: 60,
},
content: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
},
avatarContainer: {
position: 'relative',
marginBottom: Spacing.lg,
},
avatar: {
width: 120,
height: 120,
borderRadius: 60,
backgroundColor: AppColors.success,
justifyContent: 'center',
alignItems: 'center',
},
avatarSpeaking: {
backgroundColor: AppColors.primary,
},
avatarText: {
fontSize: 48,
fontWeight: '600',
color: AppColors.white,
},
speakingRing: {
position: 'absolute',
top: -10,
left: -10,
right: -10,
bottom: -10,
borderRadius: 70,
borderWidth: 3,
borderColor: AppColors.primary,
opacity: 0.5,
},
name: {
fontSize: FontSizes['2xl'],
fontWeight: '600',
color: AppColors.white,
marginBottom: Spacing.xs,
},
beneficiary: {
fontSize: FontSizes.base,
color: 'rgba(255, 255, 255, 0.7)',
marginBottom: Spacing.sm,
},
status: {
fontSize: FontSizes.base,
color: AppColors.success,
marginBottom: Spacing.md,
},
duration: {
fontSize: FontSizes.lg,
color: 'rgba(255, 255, 255, 0.8)',
fontVariant: ['tabular-nums'],
},
visualizerContainer: {
marginTop: Spacing.xl,
height: 60,
width: 200,
},
callControls: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'center',
gap: Spacing.xl,
marginBottom: Spacing.xl,
},
minimizeButton: {
width: 56,
height: 56,
borderRadius: 28,
backgroundColor: 'rgba(255, 255, 255, 0.2)',
justifyContent: 'center',
alignItems: 'center',
},
hangUpButton: {
width: 72,
height: 72,
borderRadius: 36,
backgroundColor: AppColors.error,
justifyContent: 'center',
alignItems: 'center',
},
controlPlaceholder: {
width: 56,
height: 56,
},
});
export default function ChatScreen() {
const router = useRouter();
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
const { getTranscriptAsMessages, hasNewTranscript, markTranscriptAsShown, addTranscriptEntry, clearTranscript } = useVoiceTranscript();
const { user } = useAuth();
const {
callState,
startCall,
endCall: endVoiceCallContext,
minimizeCall,
maximizeCall,
updateDuration,
isCallActive,
} = useVoiceCall();
// Chat state
const [messages, setMessages] = useState<Message[]>([
{
id: '1',
role: 'assistant',
content: 'Hello! I\'m Julia, your AI wellness assistant. You can type a message or tap the phone button to start a voice call.',
timestamp: new Date(),
},
]);
// Voice call state (local connecting state only)
const [isConnectingVoice, setIsConnectingVoice] = useState(false);
// Add voice call transcript to messages when returning from call
useEffect(() => {
if (hasNewTranscript) {
const transcriptMessages = getTranscriptAsMessages();
if (transcriptMessages.length > 0) {
// Add a separator message
const separatorMessage: Message = {
id: `voice-separator-${Date.now()}`,
role: 'assistant',
content: '--- Voice Call Transcript ---',
timestamp: new Date(),
isSystem: true,
};
setMessages(prev => [...prev, separatorMessage, ...transcriptMessages]);
markTranscriptAsShown();
// Scroll to bottom
setTimeout(() => {
flatListRef.current?.scrollToEnd({ animated: true });
}, 100);
}
}
}, [hasNewTranscript, getTranscriptAsMessages, markTranscriptAsShown]);
const [input, setInput] = useState('');
const [isSending, setIsSending] = useState(false);
const flatListRef = useRef<FlatList>(null);
// Beneficiary picker
const [showBeneficiaryPicker, setShowBeneficiaryPicker] = useState(false);
const [beneficiaries, setBeneficiaries] = useState<Beneficiary[]>([]);
const [loadingBeneficiaries, setLoadingBeneficiaries] = useState(false);
// Load beneficiaries
const loadBeneficiaries = useCallback(async () => {
setLoadingBeneficiaries(true);
try {
const response = await api.getAllBeneficiaries();
if (response.ok && response.data) {
setBeneficiaries(response.data);
return response.data;
}
return [];
} catch (error) {
console.error('Failed to load beneficiaries:', error);
return [];
} finally {
setLoadingBeneficiaries(false);
}
}, []);
// Auto-select first beneficiary
useEffect(() => {
const autoSelect = async () => {
if (!currentBeneficiary) {
const loaded = await loadBeneficiaries();
if (loaded.length > 0) {
setCurrentBeneficiary(loaded[0]);
}
}
};
autoSelect();
}, []);
// Scroll to end when keyboard shows
useEffect(() => {
const keyboardShowListener = Keyboard.addListener(
Platform.OS === 'ios' ? 'keyboardWillShow' : 'keyboardDidShow',
() => {
setTimeout(() => {
flatListRef.current?.scrollToEnd({ animated: true });
}, 100);
}
);
return () => keyboardShowListener.remove();
}, []);
const openBeneficiaryPicker = useCallback(() => {
setShowBeneficiaryPicker(true);
loadBeneficiaries();
}, [loadBeneficiaries]);
const selectBeneficiary = useCallback((beneficiary: Beneficiary) => {
setCurrentBeneficiary(beneficiary);
setShowBeneficiaryPicker(false);
}, [setCurrentBeneficiary]);
// ============================================================================
// Voice Call Functions
// ============================================================================
// Start voice call
const startVoiceCall = useCallback(async () => {
if (isConnectingVoice || isCallActive) return;
setIsConnectingVoice(true);
console.log('[Chat] Starting voice call...');
try {
// Build beneficiary data for the agent
const beneficiaryData: BeneficiaryData = {
deploymentId: currentBeneficiary?.id?.toString() || beneficiaries[0]?.id?.toString() || '21',
beneficiaryNamesDict: {},
};
// Add names dict if not in single deployment mode
if (!SINGLE_DEPLOYMENT_MODE) {
beneficiaries.forEach(b => {
beneficiaryData.beneficiaryNamesDict[b.id.toString()] = b.name;
});
}
// Get LiveKit token
const userIdStr = user?.user_id?.toString() || 'user-' + Date.now();
const tokenResponse = await getToken(userIdStr, beneficiaryData);
if (!tokenResponse.success || !tokenResponse.data) {
throw new Error(tokenResponse.error || 'Failed to get voice token');
}
console.log('[Chat] Got voice token, connecting to room:', tokenResponse.data.roomName);
// Clear previous transcript and start call via context
clearTranscript();
startCall({
token: tokenResponse.data.token,
wsUrl: tokenResponse.data.wsUrl,
beneficiaryName: currentBeneficiary?.name,
beneficiaryId: currentBeneficiary?.id?.toString(),
});
} catch (error) {
console.error('[Chat] Voice call error:', error);
Alert.alert(
'Voice Call Error',
error instanceof Error ? error.message : 'Failed to start voice call'
);
} finally {
setIsConnectingVoice(false);
}
}, [isConnectingVoice, isCallActive, currentBeneficiary, beneficiaries, user, clearTranscript, startCall]);
// End voice call
const endVoiceCall = useCallback(() => {
console.log('[Chat] Ending voice call...');
endVoiceCallContext();
}, [endVoiceCallContext]);
// Handle voice transcript entries
const handleVoiceTranscript = useCallback((role: 'user' | 'assistant', text: string) => {
addTranscriptEntry(role, text);
}, [addTranscriptEntry]);
// Cached API token for WellNuo
const apiTokenRef = useRef<string | null>(null);
// Get WellNuo API token (same credentials as julia-agent)
const getWellNuoToken = useCallback(async (): Promise<string> => {
if (apiTokenRef.current) {
return apiTokenRef.current;
}
const nonce = Math.floor(Math.random() * 1000000).toString();
const response = await fetch(API_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: new URLSearchParams({
function: 'credentials',
clientId: 'MA_001',
user_name: WELLNUO_USER,
ps: WELLNUO_PASSWORD,
nonce: nonce,
}).toString(),
});
const data = await response.json();
if (data.status === '200 OK' && data.access_token) {
apiTokenRef.current = data.access_token;
console.log('[Chat] WellNuo token obtained');
return data.access_token;
}
throw new Error('Failed to authenticate with WellNuo API');
}, []);
// Text chat - send message via API (same as julia-agent)
const sendTextMessage = useCallback(async () => {
const trimmedInput = input.trim();
if (!trimmedInput || isSending) return;
const userMessage: Message = {
id: Date.now().toString(),
role: 'user',
content: trimmedInput,
timestamp: new Date(),
};
setMessages(prev => [...prev, userMessage]);
setInput('');
setIsSending(true);
Keyboard.dismiss();
try {
// Get WellNuo API token (uses anandk credentials like julia-agent)
const token = await getWellNuoToken();
// Normalize question to format WellNuo API understands
// (same logic as julia-agent/julia-ai/src/agent.py)
const normalizedQuestion = normalizeQuestion(trimmedInput);
// Build beneficiary_names_dict from all loaded beneficiaries
// Format: {"21": "papa", "69": "David"}
const beneficiaryNamesDict: Record<string, string> = {};
beneficiaries.forEach(b => {
beneficiaryNamesDict[b.id.toString()] = b.name;
});
// Get deployment_id from current beneficiary or fallback to first one
const deploymentId = currentBeneficiary?.id?.toString() || beneficiaries[0]?.id?.toString() || '21';
// Call API with EXACT same params as voice agent
// SINGLE_DEPLOYMENT_MODE: sends only deployment_id (no beneficiary_names_dict)
const requestParams: Record<string, string> = {
function: 'ask_wellnuo_ai',
clientId: 'MA_001',
user_name: WELLNUO_USER,
token: token,
question: normalizedQuestion,
deployment_id: deploymentId,
};
// Only add beneficiary_names_dict if NOT in single deployment mode
if (!SINGLE_DEPLOYMENT_MODE) {
requestParams.beneficiary_names_dict = JSON.stringify(beneficiaryNamesDict);
}
const response = await fetch(API_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: new URLSearchParams(requestParams).toString(),
});
const data = await response.json();
if (data.ok && data.response?.body) {
const assistantMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: data.response.body,
timestamp: new Date(),
};
setMessages(prev => [...prev, assistantMessage]);
} else {
// Token might be expired, clear and retry once
if (data.status === '401 Unauthorized') {
apiTokenRef.current = null;
throw new Error('Session expired, please try again');
}
throw new Error('Could not get response');
}
} catch (error) {
const errorMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}`,
timestamp: new Date(),
};
setMessages(prev => [...prev, errorMessage]);
} finally {
setIsSending(false);
}
}, [input, isSending, getWellNuoToken]);
// Render message bubble
const renderMessage = ({ item }: { item: Message }) => {
const isUser = item.role === 'user';
const isVoice = item.isVoice;
const isSystem = item.isSystem;
// System messages (like "Voice Call Transcript" separator)
if (isSystem) {
return (
<View style={styles.systemMessageContainer}>
<View style={styles.systemMessageLine} />
<View style={styles.systemMessageBadge}>
<Ionicons name="call" size={12} color={AppColors.textMuted} />
<Text style={styles.systemMessageText}>{item.content.replace(/---/g, '').trim()}</Text>
</View>
<View style={styles.systemMessageLine} />
</View>
);
}
return (
<View style={[styles.messageContainer, isUser ? styles.userMessageContainer : styles.assistantMessageContainer]}>
{!isUser && (
<View style={styles.avatarContainer}>
<Text style={styles.avatarText}>J</Text>
</View>
)}
<View style={[styles.messageBubble, isUser ? styles.userBubble : styles.assistantBubble, isVoice && styles.voiceBubble]}>
{isVoice && (
<View style={styles.voiceIndicator}>
<Ionicons name="mic" size={12} color={isUser ? 'rgba(255,255,255,0.7)' : AppColors.textMuted} />
</View>
)}
<Text style={[styles.messageText, isUser ? styles.userMessageText : styles.assistantMessageText]}>
{item.content}
</Text>
<Text style={[styles.timestamp, isUser && styles.userTimestamp]}>
{item.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}
</Text>
</View>
</View>
);
};
return (
<SafeAreaView style={styles.container} edges={['top', 'bottom']}>
{/* Header */}
<View style={styles.header}>
<TouchableOpacity style={styles.backButton} onPress={() => router.push('/(tabs)')}>
<Ionicons name="arrow-back" size={24} color={AppColors.textPrimary} />
</TouchableOpacity>
<View style={styles.headerInfo}>
<View style={styles.headerAvatar}>
<Text style={styles.headerAvatarText}>J</Text>
</View>
<View>
<Text style={styles.headerTitle}>Julia AI</Text>
<Text style={styles.headerSubtitle}>
{currentBeneficiary ? `About ${currentBeneficiary.name}` : 'Online'}
</Text>
</View>
</View>
<View style={styles.headerButtons} />
</View>
{/* Beneficiary Picker Modal */}
<Modal
visible={showBeneficiaryPicker}
transparent
animationType="slide"
onRequestClose={() => setShowBeneficiaryPicker(false)}
>
<View style={styles.modalOverlay}>
<View style={styles.modalContent}>
<View style={styles.modalHeader}>
<Text style={styles.modalTitle}>Select Beneficiary</Text>
<TouchableOpacity onPress={() => setShowBeneficiaryPicker(false)}>
<Ionicons name="close" size={24} color={AppColors.textPrimary} />
</TouchableOpacity>
</View>
{loadingBeneficiaries ? (
<View style={styles.modalLoading}>
<ActivityIndicator size="large" color={AppColors.primary} />
</View>
) : beneficiaries.length === 0 ? (
<View style={styles.modalEmpty}>
<Text style={styles.emptyText}>No beneficiaries found</Text>
</View>
) : (
<FlatList
data={beneficiaries}
keyExtractor={(item) => item.id.toString()}
renderItem={({ item }) => (
<TouchableOpacity
style={[
styles.beneficiaryItem,
currentBeneficiary?.id === item.id && styles.beneficiaryItemSelected,
]}
onPress={() => selectBeneficiary(item)}
>
<View style={styles.beneficiaryAvatar}>
<Text style={styles.beneficiaryAvatarText}>
{item.name.split(' ').map(n => n[0]).join('').slice(0, 2)}
</Text>
</View>
<View style={styles.beneficiaryInfo}>
<Text style={styles.beneficiaryName}>{item.name}</Text>
</View>
{currentBeneficiary?.id === item.id && (
<Ionicons name="checkmark-circle" size={24} color={AppColors.success} />
)}
</TouchableOpacity>
)}
style={styles.beneficiaryList}
/>
)}
</View>
</View>
</Modal>
{/* Messages */}
<KeyboardAvoidingView
style={styles.chatContainer}
behavior="padding"
>
<FlatList
ref={flatListRef}
data={messages}
keyExtractor={(item) => item.id}
renderItem={renderMessage}
contentContainerStyle={styles.messagesList}
showsVerticalScrollIndicator={false}
onContentSizeChange={() => flatListRef.current?.scrollToEnd({ animated: true })}
/>
{/* Input */}
<View style={styles.inputContainer}>
{/* Voice Call Button */}
<TouchableOpacity
style={[
styles.voiceButton,
(isConnectingVoice || isCallActive) && styles.voiceButtonConnecting,
]}
onPress={isCallActive ? maximizeCall : startVoiceCall}
disabled={isConnectingVoice}
>
{isConnectingVoice ? (
<ActivityIndicator size="small" color={AppColors.primary} />
) : isCallActive ? (
<Ionicons name="call" size={20} color={AppColors.success} />
) : (
<Ionicons name="call" size={20} color={AppColors.primary} />
)}
</TouchableOpacity>
<TextInput
style={styles.input}
placeholder="Type a message..."
placeholderTextColor={AppColors.textMuted}
value={input}
onChangeText={setInput}
multiline
maxLength={1000}
onSubmitEditing={sendTextMessage}
/>
<TouchableOpacity
style={[styles.sendButton, (!input.trim() || isSending) && styles.sendButtonDisabled]}
onPress={sendTextMessage}
disabled={!input.trim() || isSending}
>
<Ionicons
name={isSending ? 'hourglass' : 'send'}
size={20}
color={input.trim() && !isSending ? AppColors.white : AppColors.textMuted}
/>
</TouchableOpacity>
</View>
</KeyboardAvoidingView>
{/* Voice Call Modal */}
<Modal
visible={isCallActive && !callState.isMinimized}
animationType="slide"
presentationStyle="fullScreen"
onRequestClose={minimizeCall}
>
<SafeAreaView style={{ flex: 1, backgroundColor: 'black' }} edges={['top', 'bottom']}>
{callState.token && callState.wsUrl ? (
<LiveKitRoom
serverUrl={callState.wsUrl}
token={callState.token}
connect={true}
audio={true}
video={false}
onConnected={() => console.log('[Chat] LiveKit connected')}
onDisconnected={endVoiceCall}
onError={(error) => {
console.error('[Chat] LiveKit error:', error);
Alert.alert('Voice Call Error', error.message);
endVoiceCall();
}}
>
<VoiceCallContent
onHangUp={endVoiceCall}
onMinimize={minimizeCall}
onTranscript={handleVoiceTranscript}
onDurationUpdate={updateDuration}
beneficiaryName={currentBeneficiary?.name}
/>
</LiveKitRoom>
) : (
<View style={{ flex: 1, justifyContent: 'center', alignItems: 'center' }}>
<ActivityIndicator size="large" color={AppColors.primary} />
<Text style={{ color: 'white', marginTop: 16 }}>Connecting...</Text>
</View>
)}
</SafeAreaView>
</Modal>
</SafeAreaView>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: AppColors.surface,
},
header: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'space-between',
paddingHorizontal: Spacing.md,
paddingVertical: Spacing.sm,
backgroundColor: AppColors.background,
borderBottomWidth: 1,
borderBottomColor: AppColors.border,
},
backButton: {
padding: Spacing.xs,
marginRight: Spacing.sm,
},
headerInfo: {
flex: 1,
flexDirection: 'row',
alignItems: 'center',
},
headerAvatar: {
width: 40,
height: 40,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.success,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.sm,
},
headerAvatarText: {
fontSize: FontSizes.lg,
fontWeight: '600',
color: AppColors.white,
},
headerTitle: {
fontSize: FontSizes.lg,
fontWeight: '600',
color: AppColors.textPrimary,
},
headerSubtitle: {
fontSize: FontSizes.sm,
color: AppColors.success,
},
headerButtons: {
flexDirection: 'row',
alignItems: 'center',
gap: Spacing.sm,
},
headerButton: {
padding: Spacing.xs,
},
chatContainer: {
flex: 1,
},
messagesList: {
padding: Spacing.md,
paddingBottom: Spacing.lg,
},
messageContainer: {
flexDirection: 'row',
marginBottom: Spacing.md,
alignItems: 'flex-end',
},
userMessageContainer: {
justifyContent: 'flex-end',
},
assistantMessageContainer: {
justifyContent: 'flex-start',
},
avatarContainer: {
width: 32,
height: 32,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.success,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.xs,
},
avatarText: {
fontSize: FontSizes.sm,
fontWeight: '600',
color: AppColors.white,
},
messageBubble: {
maxWidth: '75%',
padding: Spacing.sm + 4,
borderRadius: BorderRadius.lg,
},
userBubble: {
backgroundColor: AppColors.primary,
borderBottomRightRadius: BorderRadius.sm,
},
assistantBubble: {
backgroundColor: AppColors.background,
borderBottomLeftRadius: BorderRadius.sm,
},
messageText: {
fontSize: FontSizes.base,
lineHeight: 22,
},
userMessageText: {
color: AppColors.white,
},
assistantMessageText: {
color: AppColors.textPrimary,
},
timestamp: {
fontSize: FontSizes.xs,
color: AppColors.textMuted,
marginTop: Spacing.xs,
alignSelf: 'flex-end',
},
userTimestamp: {
color: 'rgba(255,255,255,0.7)',
},
inputContainer: {
flexDirection: 'row',
alignItems: 'flex-end',
padding: Spacing.md,
backgroundColor: AppColors.background,
borderTopWidth: 1,
borderTopColor: AppColors.border,
},
input: {
flex: 1,
backgroundColor: AppColors.surface,
borderRadius: BorderRadius.xl,
paddingHorizontal: Spacing.md,
paddingVertical: Spacing.sm,
fontSize: FontSizes.base,
color: AppColors.textPrimary,
maxHeight: 100,
marginRight: Spacing.sm,
},
voiceButton: {
width: 44,
height: 44,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.surface,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.sm,
borderWidth: 1,
borderColor: AppColors.primary,
},
voiceButtonConnecting: {
borderColor: AppColors.success,
backgroundColor: 'rgba(90, 200, 168, 0.1)',
},
sendButton: {
width: 44,
height: 44,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.primary,
justifyContent: 'center',
alignItems: 'center',
},
sendButtonDisabled: {
backgroundColor: AppColors.surface,
},
// Modal styles
modalOverlay: {
flex: 1,
backgroundColor: 'rgba(0, 0, 0, 0.5)',
justifyContent: 'flex-end',
},
modalContent: {
backgroundColor: AppColors.background,
borderTopLeftRadius: BorderRadius.xl,
borderTopRightRadius: BorderRadius.xl,
maxHeight: '70%',
paddingBottom: Spacing.xl,
},
modalHeader: {
flexDirection: 'row',
justifyContent: 'space-between',
alignItems: 'center',
padding: Spacing.md,
borderBottomWidth: 1,
borderBottomColor: AppColors.border,
},
modalTitle: {
fontSize: FontSizes.lg,
fontWeight: '600',
color: AppColors.textPrimary,
},
modalLoading: {
padding: Spacing.xl,
alignItems: 'center',
},
modalEmpty: {
padding: Spacing.xl,
alignItems: 'center',
},
emptyText: {
fontSize: FontSizes.base,
color: AppColors.textSecondary,
},
beneficiaryList: {
paddingHorizontal: Spacing.md,
},
beneficiaryItem: {
flexDirection: 'row',
alignItems: 'center',
padding: Spacing.md,
backgroundColor: AppColors.surface,
borderRadius: BorderRadius.md,
marginTop: Spacing.sm,
},
beneficiaryItemSelected: {
backgroundColor: AppColors.primaryLight || '#E3F2FD',
borderWidth: 1,
borderColor: AppColors.primary,
},
beneficiaryAvatar: {
width: 44,
height: 44,
borderRadius: BorderRadius.full,
backgroundColor: AppColors.primary,
justifyContent: 'center',
alignItems: 'center',
marginRight: Spacing.md,
},
beneficiaryAvatarText: {
fontSize: FontSizes.base,
fontWeight: '600',
color: AppColors.white,
},
beneficiaryInfo: {
flex: 1,
},
beneficiaryName: {
fontSize: FontSizes.base,
fontWeight: '500',
color: AppColors.textPrimary,
},
// Voice message styles
voiceBubble: {
borderWidth: 1,
borderColor: 'rgba(59, 130, 246, 0.3)',
},
voiceIndicator: {
position: 'absolute',
top: 6,
right: 6,
},
// System message styles
systemMessageContainer: {
flexDirection: 'row',
alignItems: 'center',
marginVertical: Spacing.md,
paddingHorizontal: Spacing.md,
},
systemMessageLine: {
flex: 1,
height: 1,
backgroundColor: AppColors.border,
},
systemMessageBadge: {
flexDirection: 'row',
alignItems: 'center',
paddingHorizontal: Spacing.sm,
paddingVertical: 4,
backgroundColor: AppColors.surface,
borderRadius: BorderRadius.sm,
marginHorizontal: Spacing.sm,
},
systemMessageText: {
fontSize: FontSizes.xs,
color: AppColors.textMuted,
marginLeft: 4,
},
});