- WellNuoLite: облегчённая версия для модерации Apple - Обновлены chat и voice tabs - Добавлены TTS модели и сервисы - Обновлены зависимости
776 lines
22 KiB
TypeScript
776 lines
22 KiB
TypeScript
import React, { useState, useCallback, useRef, useEffect } from 'react';
|
|
import {
|
|
View,
|
|
Text,
|
|
StyleSheet,
|
|
FlatList,
|
|
TextInput,
|
|
TouchableOpacity,
|
|
KeyboardAvoidingView,
|
|
Platform,
|
|
Alert,
|
|
ActivityIndicator,
|
|
Modal,
|
|
ScrollView,
|
|
} from 'react-native';
|
|
import { Ionicons } from '@expo/vector-icons';
|
|
import { SafeAreaView } from 'react-native-safe-area-context';
|
|
import { useFocusEffect } from 'expo-router';
|
|
import { api } from '@/services/api';
|
|
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
|
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
|
import type { Message } from '@/types';
|
|
import { useTTS } from '@/hooks/useTTS';
|
|
import { AVAILABLE_VOICES, getCurrentVoice, setVoice, type PiperVoice } from '@/services/sherpaTTS';
|
|
|
|
// Try to import speech recognition if available
|
|
let ExpoSpeechRecognitionModule: any = null;
|
|
let useSpeechRecognitionEvent: any = null;
|
|
try {
|
|
const speechRecognition = require('expo-speech-recognition');
|
|
ExpoSpeechRecognitionModule = speechRecognition.ExpoSpeechRecognitionModule;
|
|
useSpeechRecognitionEvent = speechRecognition.useSpeechRecognitionEvent;
|
|
} catch (e) {
|
|
console.log('expo-speech-recognition not available');
|
|
}
|
|
|
|
export default function ChatScreen() {
|
|
const { currentBeneficiary, getBeneficiaryContext } = useBeneficiary();
|
|
const [messages, setMessages] = useState<Message[]>([
|
|
{
|
|
id: '1',
|
|
role: 'assistant',
|
|
content: 'Hello! I\'m Julia, your AI assistant. How can I help you today?',
|
|
timestamp: new Date(),
|
|
},
|
|
]);
|
|
const [input, setInput] = useState('');
|
|
const [isSending, setIsSending] = useState(false);
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [recognizedText, setRecognizedText] = useState('');
|
|
const [showVoicePicker, setShowVoicePicker] = useState(false);
|
|
const [selectedVoice, setSelectedVoice] = useState<PiperVoice>(getCurrentVoice());
|
|
const [isChangingVoice, setIsChangingVoice] = useState(false);
|
|
const [voiceModeEnabled, setVoiceModeEnabled] = useState(false); // Voice Mode toggle
|
|
const flatListRef = useRef<FlatList>(null);
|
|
const lastSendTimeRef = useRef<number>(0);
|
|
const SEND_COOLDOWN_MS = 1000; // 1 second cooldown between messages
|
|
|
|
// TTS hook for speaking responses
|
|
const { speak, stop, isSpeaking } = useTTS();
|
|
|
|
// Stop TTS and mic when navigating away from screen
|
|
useFocusEffect(
|
|
useCallback(() => {
|
|
// Screen focused
|
|
return () => {
|
|
// Screen unfocused - cleanup
|
|
stop(); // Stop any playing TTS
|
|
if (ExpoSpeechRecognitionModule && isListening) {
|
|
ExpoSpeechRecognitionModule.stop();
|
|
setIsListening(false);
|
|
}
|
|
setVoiceModeEnabled(false); // Disable voice mode on leave
|
|
};
|
|
}, [stop, isListening])
|
|
);
|
|
|
|
// Handle voice change
|
|
const handleVoiceChange = useCallback(async (voice: PiperVoice) => {
|
|
if (voice.id === selectedVoice.id) {
|
|
setShowVoicePicker(false);
|
|
return;
|
|
}
|
|
|
|
setIsChangingVoice(true);
|
|
try {
|
|
const success = await setVoice(voice.id);
|
|
if (success) {
|
|
setSelectedVoice(voice);
|
|
// Test the new voice
|
|
speak(`Hello, I'm ${voice.name}. How can I help you?`);
|
|
} else {
|
|
Alert.alert('Error', `Failed to switch to ${voice.name} voice.`);
|
|
}
|
|
} catch (error) {
|
|
Alert.alert('Error', 'Failed to change voice.');
|
|
} finally {
|
|
setIsChangingVoice(false);
|
|
setShowVoicePicker(false);
|
|
}
|
|
}, [selectedVoice, speak]);
|
|
|
|
// Speech recognition events (if available)
|
|
useEffect(() => {
|
|
if (!useSpeechRecognitionEvent) return;
|
|
|
|
// Handle recognized speech result
|
|
const resultSubscription = useSpeechRecognitionEvent('result', (event: any) => {
|
|
const transcript = event.results?.[0]?.transcript || '';
|
|
setRecognizedText(transcript);
|
|
if (event.isFinal) {
|
|
setInput(transcript);
|
|
setIsListening(false);
|
|
}
|
|
});
|
|
|
|
// Handle errors
|
|
const errorSubscription = useSpeechRecognitionEvent('error', (event: any) => {
|
|
console.log('Speech recognition error:', event.error);
|
|
setIsListening(false);
|
|
});
|
|
|
|
// Handle end
|
|
const endSubscription = useSpeechRecognitionEvent('end', () => {
|
|
setIsListening(false);
|
|
});
|
|
|
|
return () => {
|
|
resultSubscription?.remove?.();
|
|
errorSubscription?.remove?.();
|
|
endSubscription?.remove?.();
|
|
};
|
|
}, []);
|
|
|
|
// Start voice input
|
|
const startListening = useCallback(async () => {
|
|
if (!ExpoSpeechRecognitionModule) {
|
|
Alert.alert('Not Available', 'Voice input is not available on this device.');
|
|
return;
|
|
}
|
|
|
|
// PREVENT SELF-RECORDING: Don't start mic while TTS is speaking
|
|
if (isSpeaking) {
|
|
console.log('[Voice] Blocked: TTS is still speaking');
|
|
return;
|
|
}
|
|
|
|
try {
|
|
const result = await ExpoSpeechRecognitionModule.requestPermissionsAsync();
|
|
if (!result.granted) {
|
|
Alert.alert('Permission Denied', 'Please enable microphone access to use voice input.');
|
|
return;
|
|
}
|
|
|
|
// Enable voice mode when user starts listening
|
|
setVoiceModeEnabled(true);
|
|
setIsListening(true);
|
|
setRecognizedText('');
|
|
ExpoSpeechRecognitionModule.start({
|
|
lang: 'en-US',
|
|
interimResults: true,
|
|
maxAlternatives: 1,
|
|
});
|
|
} catch (error) {
|
|
console.error('Failed to start speech recognition:', error);
|
|
setIsListening(false);
|
|
Alert.alert('Error', 'Failed to start voice input.');
|
|
}
|
|
}, [isSpeaking]);
|
|
|
|
// Stop voice input
|
|
const stopListening = useCallback(() => {
|
|
if (ExpoSpeechRecognitionModule) {
|
|
ExpoSpeechRecognitionModule.stop();
|
|
}
|
|
setIsListening(false);
|
|
}, []);
|
|
|
|
const handleSend = useCallback(async () => {
|
|
const trimmedInput = input.trim();
|
|
if (!trimmedInput || isSending) return;
|
|
|
|
// Debounce: prevent rapid-fire messages
|
|
const now = Date.now();
|
|
if (now - lastSendTimeRef.current < SEND_COOLDOWN_MS) {
|
|
return;
|
|
}
|
|
lastSendTimeRef.current = now;
|
|
|
|
// Security: require beneficiary to be selected
|
|
if (!currentBeneficiary?.id) {
|
|
Alert.alert(
|
|
'Select Beneficiary',
|
|
'Please select a beneficiary from the Dashboard tab before starting a conversation.',
|
|
[{ text: 'OK' }]
|
|
);
|
|
return;
|
|
}
|
|
|
|
const userMessage: Message = {
|
|
id: Date.now().toString(),
|
|
role: 'user',
|
|
content: trimmedInput,
|
|
timestamp: new Date(),
|
|
};
|
|
|
|
setMessages((prev) => [...prev, userMessage]);
|
|
setInput('');
|
|
setIsSending(true);
|
|
|
|
try {
|
|
// Prepend beneficiary context to the question if available
|
|
const beneficiaryContext = getBeneficiaryContext();
|
|
const questionWithContext = beneficiaryContext
|
|
? `${beneficiaryContext} ${trimmedInput}`
|
|
: trimmedInput;
|
|
|
|
// Pass deployment_id from selected beneficiary (required, no fallback)
|
|
const deploymentId = currentBeneficiary.id.toString();
|
|
const response = await api.sendMessage(questionWithContext, deploymentId);
|
|
|
|
if (response.ok && response.data?.response) {
|
|
const responseText = response.data.response.body;
|
|
const assistantMessage: Message = {
|
|
id: (Date.now() + 1).toString(),
|
|
role: 'assistant',
|
|
content: responseText,
|
|
timestamp: new Date(),
|
|
};
|
|
setMessages((prev) => [...prev, assistantMessage]);
|
|
// Speak the response using neural TTS
|
|
speak(responseText);
|
|
} else {
|
|
const errorMessage: Message = {
|
|
id: (Date.now() + 1).toString(),
|
|
role: 'assistant',
|
|
content: 'Sorry, I encountered an error. Please try again.',
|
|
timestamp: new Date(),
|
|
};
|
|
setMessages((prev) => [...prev, errorMessage]);
|
|
}
|
|
} catch (error) {
|
|
const errorMessage: Message = {
|
|
id: (Date.now() + 1).toString(),
|
|
role: 'assistant',
|
|
content: 'Sorry, I couldn\'t connect to the server. Please check your internet connection.',
|
|
timestamp: new Date(),
|
|
};
|
|
setMessages((prev) => [...prev, errorMessage]);
|
|
} finally {
|
|
setIsSending(false);
|
|
}
|
|
}, [input, isSending, currentBeneficiary, getBeneficiaryContext]);
|
|
|
|
const renderMessage = ({ item }: { item: Message }) => {
|
|
const isUser = item.role === 'user';
|
|
|
|
return (
|
|
<View
|
|
style={[
|
|
styles.messageContainer,
|
|
isUser ? styles.userMessageContainer : styles.assistantMessageContainer,
|
|
]}
|
|
>
|
|
{!isUser && (
|
|
<View style={styles.avatarContainer}>
|
|
<Text style={styles.avatarText}>J</Text>
|
|
</View>
|
|
)}
|
|
<View
|
|
style={[
|
|
styles.messageBubble,
|
|
isUser ? styles.userBubble : styles.assistantBubble,
|
|
]}
|
|
>
|
|
<Text
|
|
style={[
|
|
styles.messageText,
|
|
isUser ? styles.userMessageText : styles.assistantMessageText,
|
|
]}
|
|
>
|
|
{item.content}
|
|
</Text>
|
|
<Text style={[styles.timestamp, isUser && styles.userTimestamp]}>
|
|
{item.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}
|
|
</Text>
|
|
</View>
|
|
</View>
|
|
);
|
|
};
|
|
|
|
return (
|
|
<SafeAreaView style={styles.container} edges={['top']}>
|
|
{/* Header */}
|
|
<View style={styles.header}>
|
|
<View style={styles.headerInfo}>
|
|
<View style={styles.headerAvatar}>
|
|
<Text style={styles.headerAvatarText}>J</Text>
|
|
</View>
|
|
<View>
|
|
<Text style={styles.headerTitle}>Julia AI</Text>
|
|
<Text style={styles.headerSubtitle}>
|
|
{isSending
|
|
? 'Typing...'
|
|
: currentBeneficiary
|
|
? `About ${currentBeneficiary.name}`
|
|
: 'Online'}
|
|
</Text>
|
|
</View>
|
|
</View>
|
|
<View style={styles.headerButtons}>
|
|
{/* Voice Mode Toggle */}
|
|
{voiceModeEnabled && (
|
|
<TouchableOpacity
|
|
style={[styles.headerButton, styles.voiceModeActive]}
|
|
onPress={() => {
|
|
stop();
|
|
stopListening();
|
|
setVoiceModeEnabled(false);
|
|
}}
|
|
>
|
|
<Ionicons name="mic" size={20} color={AppColors.white} />
|
|
<Text style={styles.voiceModeText}>ON</Text>
|
|
</TouchableOpacity>
|
|
)}
|
|
{/* TTS Speaking indicator */}
|
|
{isSpeaking && (
|
|
<TouchableOpacity
|
|
style={[styles.headerButton, styles.speakingIndicator]}
|
|
onPress={stop}
|
|
>
|
|
<Ionicons name="volume-high" size={20} color={AppColors.white} />
|
|
</TouchableOpacity>
|
|
)}
|
|
{/* Voice Picker */}
|
|
<TouchableOpacity
|
|
style={styles.headerButton}
|
|
onPress={() => setShowVoicePicker(true)}
|
|
>
|
|
<Ionicons name="volume-high-outline" size={24} color={AppColors.textPrimary} />
|
|
</TouchableOpacity>
|
|
</View>
|
|
</View>
|
|
|
|
{/* Messages */}
|
|
<KeyboardAvoidingView
|
|
style={styles.chatContainer}
|
|
behavior={Platform.OS === 'ios' ? 'padding' : undefined}
|
|
keyboardVerticalOffset={Platform.OS === 'ios' ? 90 : 0}
|
|
>
|
|
<FlatList
|
|
ref={flatListRef}
|
|
data={messages}
|
|
keyExtractor={(item) => item.id}
|
|
renderItem={renderMessage}
|
|
contentContainerStyle={styles.messagesList}
|
|
showsVerticalScrollIndicator={false}
|
|
onContentSizeChange={() => flatListRef.current?.scrollToEnd({ animated: true })}
|
|
/>
|
|
|
|
{/* Listening indicator */}
|
|
{isListening && (
|
|
<View style={styles.listeningIndicator}>
|
|
<ActivityIndicator color={AppColors.primary} size="small" />
|
|
<Text style={styles.listeningText}>
|
|
{recognizedText || 'Listening...'}
|
|
</Text>
|
|
<TouchableOpacity onPress={stopListening} style={styles.stopButton}>
|
|
<Ionicons name="close-circle" size={24} color={AppColors.error} />
|
|
</TouchableOpacity>
|
|
</View>
|
|
)}
|
|
|
|
{/* Input */}
|
|
<View style={styles.inputContainer}>
|
|
{/* Microphone button */}
|
|
<TouchableOpacity
|
|
style={[styles.micButton, isListening && styles.micButtonActive]}
|
|
onPress={isListening ? stopListening : startListening}
|
|
disabled={isSending}
|
|
>
|
|
<Ionicons
|
|
name={isListening ? 'mic' : 'mic-outline'}
|
|
size={22}
|
|
color={isListening ? AppColors.white : AppColors.primary}
|
|
/>
|
|
</TouchableOpacity>
|
|
|
|
<TextInput
|
|
style={styles.input}
|
|
placeholder="Type a message..."
|
|
placeholderTextColor={AppColors.textMuted}
|
|
value={input}
|
|
onChangeText={setInput}
|
|
multiline
|
|
maxLength={1000}
|
|
editable={!isSending && !isListening}
|
|
onSubmitEditing={handleSend}
|
|
/>
|
|
<TouchableOpacity
|
|
style={[styles.sendButton, (!input.trim() || isSending) && styles.sendButtonDisabled]}
|
|
onPress={handleSend}
|
|
disabled={!input.trim() || isSending}
|
|
>
|
|
<Ionicons
|
|
name={isSending ? 'hourglass' : 'send'}
|
|
size={20}
|
|
color={input.trim() && !isSending ? AppColors.white : AppColors.textMuted}
|
|
/>
|
|
</TouchableOpacity>
|
|
</View>
|
|
</KeyboardAvoidingView>
|
|
|
|
{/* Voice Picker Modal */}
|
|
<Modal
|
|
visible={showVoicePicker}
|
|
animationType="slide"
|
|
transparent={true}
|
|
onRequestClose={() => setShowVoicePicker(false)}
|
|
>
|
|
<View style={styles.modalOverlay}>
|
|
<View style={styles.modalContent}>
|
|
<View style={styles.modalHeader}>
|
|
<Text style={styles.modalTitle}>Select Voice</Text>
|
|
<TouchableOpacity
|
|
style={styles.modalCloseButton}
|
|
onPress={() => setShowVoicePicker(false)}
|
|
>
|
|
<Ionicons name="close" size={24} color={AppColors.textPrimary} />
|
|
</TouchableOpacity>
|
|
</View>
|
|
|
|
<Text style={styles.modalSubtitle}>
|
|
Neural TTS voices for Julia AI
|
|
</Text>
|
|
|
|
<ScrollView style={styles.voiceList}>
|
|
{AVAILABLE_VOICES.map((voice) => (
|
|
<TouchableOpacity
|
|
key={voice.id}
|
|
style={[
|
|
styles.voiceItem,
|
|
selectedVoice.id === voice.id && styles.voiceItemSelected,
|
|
]}
|
|
onPress={() => handleVoiceChange(voice)}
|
|
disabled={isChangingVoice}
|
|
>
|
|
<View style={styles.voiceIcon}>
|
|
<Ionicons
|
|
name={voice.gender === 'female' ? 'woman' : 'man'}
|
|
size={24}
|
|
color={selectedVoice.id === voice.id ? AppColors.white : AppColors.primary}
|
|
/>
|
|
</View>
|
|
<View style={styles.voiceInfo}>
|
|
<Text
|
|
style={[
|
|
styles.voiceName,
|
|
selectedVoice.id === voice.id && styles.voiceNameSelected,
|
|
]}
|
|
>
|
|
{voice.name}
|
|
</Text>
|
|
<Text
|
|
style={[
|
|
styles.voiceDescription,
|
|
selectedVoice.id === voice.id && styles.voiceDescriptionSelected,
|
|
]}
|
|
>
|
|
{voice.description}
|
|
</Text>
|
|
</View>
|
|
{selectedVoice.id === voice.id && (
|
|
<Ionicons name="checkmark-circle" size={24} color={AppColors.white} />
|
|
)}
|
|
{isChangingVoice && selectedVoice.id !== voice.id && (
|
|
<ActivityIndicator size="small" color={AppColors.primary} />
|
|
)}
|
|
</TouchableOpacity>
|
|
))}
|
|
</ScrollView>
|
|
|
|
<Text style={styles.voiceHint}>
|
|
Tap a voice to hear a preview
|
|
</Text>
|
|
</View>
|
|
</View>
|
|
</Modal>
|
|
</SafeAreaView>
|
|
);
|
|
}
|
|
|
|
const styles = StyleSheet.create({
|
|
container: {
|
|
flex: 1,
|
|
backgroundColor: AppColors.surface,
|
|
},
|
|
header: {
|
|
flexDirection: 'row',
|
|
alignItems: 'center',
|
|
justifyContent: 'space-between',
|
|
paddingHorizontal: Spacing.md,
|
|
paddingVertical: Spacing.sm,
|
|
backgroundColor: AppColors.background,
|
|
borderBottomWidth: 1,
|
|
borderBottomColor: AppColors.border,
|
|
},
|
|
headerInfo: {
|
|
flexDirection: 'row',
|
|
alignItems: 'center',
|
|
},
|
|
headerAvatar: {
|
|
width: 40,
|
|
height: 40,
|
|
borderRadius: BorderRadius.full,
|
|
backgroundColor: AppColors.success,
|
|
justifyContent: 'center',
|
|
alignItems: 'center',
|
|
marginRight: Spacing.sm,
|
|
},
|
|
headerAvatarText: {
|
|
fontSize: FontSizes.lg,
|
|
fontWeight: '600',
|
|
color: AppColors.white,
|
|
},
|
|
headerTitle: {
|
|
fontSize: FontSizes.lg,
|
|
fontWeight: '600',
|
|
color: AppColors.textPrimary,
|
|
},
|
|
headerSubtitle: {
|
|
fontSize: FontSizes.sm,
|
|
color: AppColors.success,
|
|
},
|
|
headerButtons: {
|
|
flexDirection: 'row',
|
|
alignItems: 'center',
|
|
gap: 8,
|
|
},
|
|
headerButton: {
|
|
padding: Spacing.xs,
|
|
},
|
|
voiceModeActive: {
|
|
flexDirection: 'row',
|
|
alignItems: 'center',
|
|
backgroundColor: AppColors.primary,
|
|
borderRadius: BorderRadius.md,
|
|
paddingHorizontal: 10,
|
|
paddingVertical: 6,
|
|
gap: 4,
|
|
},
|
|
voiceModeText: {
|
|
color: AppColors.white,
|
|
fontSize: FontSizes.xs,
|
|
fontWeight: '600',
|
|
},
|
|
speakingIndicator: {
|
|
backgroundColor: AppColors.success,
|
|
borderRadius: BorderRadius.md,
|
|
padding: 6,
|
|
},
|
|
chatContainer: {
|
|
flex: 1,
|
|
},
|
|
messagesList: {
|
|
padding: Spacing.md,
|
|
paddingBottom: Spacing.lg,
|
|
},
|
|
messageContainer: {
|
|
flexDirection: 'row',
|
|
marginBottom: Spacing.md,
|
|
alignItems: 'flex-end',
|
|
},
|
|
userMessageContainer: {
|
|
justifyContent: 'flex-end',
|
|
},
|
|
assistantMessageContainer: {
|
|
justifyContent: 'flex-start',
|
|
},
|
|
avatarContainer: {
|
|
width: 32,
|
|
height: 32,
|
|
borderRadius: BorderRadius.full,
|
|
backgroundColor: AppColors.success,
|
|
justifyContent: 'center',
|
|
alignItems: 'center',
|
|
marginRight: Spacing.xs,
|
|
},
|
|
avatarText: {
|
|
fontSize: FontSizes.sm,
|
|
fontWeight: '600',
|
|
color: AppColors.white,
|
|
},
|
|
messageBubble: {
|
|
maxWidth: '75%',
|
|
padding: Spacing.sm + 4,
|
|
borderRadius: BorderRadius.lg,
|
|
},
|
|
userBubble: {
|
|
backgroundColor: AppColors.primary,
|
|
borderBottomRightRadius: BorderRadius.sm,
|
|
},
|
|
assistantBubble: {
|
|
backgroundColor: AppColors.background,
|
|
borderBottomLeftRadius: BorderRadius.sm,
|
|
},
|
|
messageText: {
|
|
fontSize: FontSizes.base,
|
|
lineHeight: 22,
|
|
},
|
|
userMessageText: {
|
|
color: AppColors.white,
|
|
},
|
|
assistantMessageText: {
|
|
color: AppColors.textPrimary,
|
|
},
|
|
timestamp: {
|
|
fontSize: FontSizes.xs,
|
|
color: AppColors.textMuted,
|
|
marginTop: Spacing.xs,
|
|
alignSelf: 'flex-end',
|
|
},
|
|
userTimestamp: {
|
|
color: 'rgba(255,255,255,0.7)',
|
|
},
|
|
inputContainer: {
|
|
flexDirection: 'row',
|
|
alignItems: 'flex-end',
|
|
padding: Spacing.md,
|
|
backgroundColor: AppColors.background,
|
|
borderTopWidth: 1,
|
|
borderTopColor: AppColors.border,
|
|
},
|
|
input: {
|
|
flex: 1,
|
|
backgroundColor: AppColors.surface,
|
|
borderRadius: BorderRadius.xl,
|
|
paddingHorizontal: Spacing.md,
|
|
paddingVertical: Spacing.sm,
|
|
fontSize: FontSizes.base,
|
|
color: AppColors.textPrimary,
|
|
maxHeight: 100,
|
|
marginRight: Spacing.sm,
|
|
},
|
|
sendButton: {
|
|
width: 44,
|
|
height: 44,
|
|
borderRadius: BorderRadius.full,
|
|
backgroundColor: AppColors.primary,
|
|
justifyContent: 'center',
|
|
alignItems: 'center',
|
|
},
|
|
sendButtonDisabled: {
|
|
backgroundColor: AppColors.surface,
|
|
},
|
|
micButton: {
|
|
width: 44,
|
|
height: 44,
|
|
borderRadius: BorderRadius.full,
|
|
backgroundColor: AppColors.surface,
|
|
justifyContent: 'center',
|
|
alignItems: 'center',
|
|
marginRight: Spacing.sm,
|
|
borderWidth: 1,
|
|
borderColor: AppColors.primary,
|
|
},
|
|
micButtonActive: {
|
|
backgroundColor: AppColors.primary,
|
|
borderColor: AppColors.primary,
|
|
},
|
|
listeningIndicator: {
|
|
flexDirection: 'row',
|
|
alignItems: 'center',
|
|
paddingHorizontal: Spacing.md,
|
|
paddingVertical: Spacing.sm,
|
|
backgroundColor: AppColors.surface,
|
|
borderTopWidth: 1,
|
|
borderTopColor: AppColors.border,
|
|
},
|
|
listeningText: {
|
|
flex: 1,
|
|
marginLeft: Spacing.sm,
|
|
fontSize: FontSizes.sm,
|
|
color: AppColors.textSecondary,
|
|
fontStyle: 'italic',
|
|
},
|
|
stopButton: {
|
|
padding: Spacing.xs,
|
|
},
|
|
// Voice Picker Modal styles
|
|
modalOverlay: {
|
|
flex: 1,
|
|
backgroundColor: 'rgba(0, 0, 0, 0.5)',
|
|
justifyContent: 'flex-end',
|
|
},
|
|
modalContent: {
|
|
backgroundColor: AppColors.background,
|
|
borderTopLeftRadius: BorderRadius.xl,
|
|
borderTopRightRadius: BorderRadius.xl,
|
|
paddingTop: Spacing.lg,
|
|
paddingBottom: Spacing.xl + 20,
|
|
maxHeight: '60%',
|
|
},
|
|
modalHeader: {
|
|
flexDirection: 'row',
|
|
justifyContent: 'space-between',
|
|
alignItems: 'center',
|
|
paddingHorizontal: Spacing.lg,
|
|
marginBottom: Spacing.sm,
|
|
},
|
|
modalTitle: {
|
|
fontSize: FontSizes.xl,
|
|
fontWeight: '600',
|
|
color: AppColors.textPrimary,
|
|
},
|
|
modalCloseButton: {
|
|
padding: Spacing.xs,
|
|
},
|
|
modalSubtitle: {
|
|
fontSize: FontSizes.sm,
|
|
color: AppColors.textSecondary,
|
|
paddingHorizontal: Spacing.lg,
|
|
marginBottom: Spacing.md,
|
|
},
|
|
voiceList: {
|
|
paddingHorizontal: Spacing.lg,
|
|
},
|
|
voiceItem: {
|
|
flexDirection: 'row',
|
|
alignItems: 'center',
|
|
padding: Spacing.md,
|
|
backgroundColor: AppColors.surface,
|
|
borderRadius: BorderRadius.lg,
|
|
marginBottom: Spacing.sm,
|
|
borderWidth: 2,
|
|
borderColor: 'transparent',
|
|
},
|
|
voiceItemSelected: {
|
|
borderColor: AppColors.primary,
|
|
backgroundColor: `${AppColors.primary}15`,
|
|
},
|
|
voiceIcon: {
|
|
width: 48,
|
|
height: 48,
|
|
borderRadius: BorderRadius.full,
|
|
backgroundColor: AppColors.primary + '20',
|
|
justifyContent: 'center',
|
|
alignItems: 'center',
|
|
marginRight: Spacing.md,
|
|
},
|
|
voiceInfo: {
|
|
flex: 1,
|
|
},
|
|
voiceName: {
|
|
fontSize: FontSizes.base,
|
|
fontWeight: '600',
|
|
color: AppColors.textPrimary,
|
|
marginBottom: 2,
|
|
},
|
|
voiceNameSelected: {
|
|
color: AppColors.primary,
|
|
},
|
|
voiceDescription: {
|
|
fontSize: FontSizes.sm,
|
|
color: AppColors.textSecondary,
|
|
},
|
|
voiceDescriptionSelected: {
|
|
color: AppColors.primary,
|
|
},
|
|
voiceHint: {
|
|
fontSize: FontSizes.xs,
|
|
color: AppColors.textSecondary,
|
|
marginTop: 4,
|
|
},
|
|
});
|