2026-01-29: Stable version with voice debug and iOS STT fix

Добавлено:
- Voice Debug tab - real-time логи STT/API/TTS/Timer
- iOS STT fix - отправка последнего partial как final при onEnd
- iOS auto-stop - автоматическая остановка STT после 2s тишины
- Voice API selector в Profile (voice_ask / ask_wellnuo_ai)

Исправлено:
- iOS никогда не отправлял isFinal:true - теперь отправляет через onEnd
- STT не останавливался после тишины - добавлен auto-stop таймер
- Profile Voice API selector восстановлен после rollback

Известные issues:
- TypeScript ошибки (setTimeout type) - не критично
- updateVoiceApiType отсутствует в VoiceContext - нужно добавить

Стабильная версия для тестирования на iPhone.
This commit is contained in:
Sergei 2026-01-28 19:45:40 -08:00
parent 05f872d067
commit d6353c8533
6 changed files with 715 additions and 0 deletions

1
REVIEW_REPORT.md Normal file
View File

@ -0,0 +1 @@
Limit reached · resets 1pm (America/Los_Angeles) · turn on /extra-usage

View File

@ -29,6 +29,7 @@ export default function TabLayout() {
interruptIfSpeaking, interruptIfSpeaking,
setTranscript, setTranscript,
setPartialTranscript, setPartialTranscript,
partialTranscript, // for iOS auto-stop timer
sendTranscript, sendTranscript,
} = useVoice(); } = useVoice();
@ -97,6 +98,45 @@ export default function TabLayout() {
// Ref to prevent concurrent startListening calls // Ref to prevent concurrent startListening calls
const sttStartingRef = useRef(false); const sttStartingRef = useRef(false);
// Ref to track last partial transcript for iOS auto-stop
const lastPartialTextRef = useRef('');
const silenceTimerRef = useRef<NodeJS.Timeout | null>(null);
// iOS AUTO-STOP: Stop STT after 2 seconds of silence (no new partial transcripts)
// This triggers onEnd → iOS fix sends lastPartial as final
useEffect(() => {
// Clear existing timer
if (silenceTimerRef.current) {
clearTimeout(silenceTimerRef.current);
silenceTimerRef.current = null;
}
// Only track silence when STT is listening (not during processing/speaking)
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
// Get current partial from VoiceContext (set by handleSpeechResult)
const currentPartial = partialTranscript;
// If partial changed, update ref and set new 2s timer
if (currentPartial !== lastPartialTextRef.current) {
lastPartialTextRef.current = currentPartial;
// Start 2-second silence timer
silenceTimerRef.current = setTimeout(() => {
if (sttIsListening && sessionActiveRef.current) {
console.log('[TabLayout] 🍎 iOS AUTO-STOP: 2s silence - stopping STT to trigger onEnd → iOS fix');
stopListening();
}
}, 2000);
}
}
return () => {
if (silenceTimerRef.current) {
clearTimeout(silenceTimerRef.current);
silenceTimerRef.current = null;
}
};
}, [sttIsListening, status, partialTranscript, stopListening]);
// Safe wrapper to start STT with debounce protection // Safe wrapper to start STT with debounce protection
const safeStartSTT = useCallback(() => { const safeStartSTT = useCallback(() => {
@ -300,6 +340,16 @@ export default function TabLayout() {
), ),
}} }}
/> />
{/* Voice Debug - visible tab */}
<Tabs.Screen
name="voice-debug"
options={{
title: 'Debug',
tabBarIcon: ({ color, size }) => (
<Feather name="activity" size={22} color={color} />
),
}}
/>
<Tabs.Screen <Tabs.Screen
name="profile" name="profile"
options={{ options={{

View File

@ -15,6 +15,7 @@ import { router } from 'expo-router';
import { Ionicons } from '@expo/vector-icons'; import { Ionicons } from '@expo/vector-icons';
import { SafeAreaView } from 'react-native-safe-area-context'; import { SafeAreaView } from 'react-native-safe-area-context';
import { useAuth } from '@/contexts/AuthContext'; import { useAuth } from '@/contexts/AuthContext';
import { useVoice } from '@/contexts/VoiceContext';
import { api } from '@/services/api'; import { api } from '@/services/api';
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme'; import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
@ -55,6 +56,7 @@ function MenuItem({
export default function ProfileScreen() { export default function ProfileScreen() {
const { user, logout } = useAuth(); const { user, logout } = useAuth();
const { updateVoiceApiType } = useVoice();
const [deploymentId, setDeploymentId] = useState<string>(''); const [deploymentId, setDeploymentId] = useState<string>('');
const [deploymentName, setDeploymentName] = useState<string>(''); const [deploymentName, setDeploymentName] = useState<string>('');
const [showDeploymentModal, setShowDeploymentModal] = useState(false); const [showDeploymentModal, setShowDeploymentModal] = useState(false);
@ -62,6 +64,11 @@ export default function ProfileScreen() {
const [isValidating, setIsValidating] = useState(false); const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<string | null>(null); const [validationError, setValidationError] = useState<string | null>(null);
// Voice API Type state
const [voiceApiType, setVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
const [showVoiceApiModal, setShowVoiceApiModal] = useState(false);
const [tempVoiceApiType, setTempVoiceApiType] = useState<'voice_ask' | 'ask_wellnuo_ai'>('ask_wellnuo_ai');
// Load saved deployment ID or auto-populate from first available // Load saved deployment ID or auto-populate from first available
useEffect(() => { useEffect(() => {
const loadDeploymentId = async () => { const loadDeploymentId = async () => {
@ -88,12 +95,26 @@ export default function ProfileScreen() {
loadDeploymentId(); loadDeploymentId();
}, []); }, []);
// Load saved Voice API type
useEffect(() => {
const loadVoiceApiType = async () => {
const saved = await api.getVoiceApiType();
setVoiceApiType(saved);
};
loadVoiceApiType();
}, []);
const openDeploymentModal = useCallback(() => { const openDeploymentModal = useCallback(() => {
setTempDeploymentId(deploymentId); setTempDeploymentId(deploymentId);
setValidationError(null); setValidationError(null);
setShowDeploymentModal(true); setShowDeploymentModal(true);
}, [deploymentId]); }, [deploymentId]);
const openVoiceApiModal = useCallback(() => {
setTempVoiceApiType(voiceApiType);
setShowVoiceApiModal(true);
}, [voiceApiType]);
const saveDeploymentId = useCallback(async () => { const saveDeploymentId = useCallback(async () => {
const trimmed = tempDeploymentId.trim(); const trimmed = tempDeploymentId.trim();
setValidationError(null); setValidationError(null);
@ -128,6 +149,13 @@ export default function ProfileScreen() {
} }
}, [tempDeploymentId]); }, [tempDeploymentId]);
const saveVoiceApiType = useCallback(async () => {
await api.setVoiceApiType(tempVoiceApiType);
setVoiceApiType(tempVoiceApiType);
updateVoiceApiType(tempVoiceApiType);
setShowVoiceApiModal(false);
}, [tempVoiceApiType, updateVoiceApiType]);
const openTerms = () => { const openTerms = () => {
router.push('/terms'); router.push('/terms');
}; };
@ -185,6 +213,15 @@ export default function ProfileScreen() {
subtitle={deploymentId ? (deploymentName || `ID: ${deploymentId}`) : 'Auto'} subtitle={deploymentId ? (deploymentName || `ID: ${deploymentId}`) : 'Auto'}
onPress={openDeploymentModal} onPress={openDeploymentModal}
/> />
<View style={styles.menuDivider} />
<MenuItem
icon="radio-outline"
iconColor="#9333EA"
iconBgColor="#F3E8FF"
title="Voice API"
subtitle={voiceApiType === 'voice_ask' ? 'voice_ask' : 'ask_wellnuo_ai (LLaMA)'}
onPress={openVoiceApiModal}
/>
</View> </View>
</View> </View>
@ -271,6 +308,65 @@ export default function ProfileScreen() {
</View> </View>
</KeyboardAvoidingView> </KeyboardAvoidingView>
</Modal> </Modal>
{/* Voice API Modal */}
<Modal
visible={showVoiceApiModal}
transparent
animationType="fade"
onRequestClose={() => setShowVoiceApiModal(false)}
>
<View style={styles.modalOverlay}>
<View style={styles.modalContent}>
<Text style={styles.modalTitle}>Voice API</Text>
<Text style={styles.modalDescription}>
Choose which API function to use for voice requests.
</Text>
{/* Radio buttons */}
<TouchableOpacity
style={styles.radioOption}
onPress={() => setTempVoiceApiType('ask_wellnuo_ai')}
>
<View style={styles.radioCircle}>
{tempVoiceApiType === 'ask_wellnuo_ai' && <View style={styles.radioCircleSelected} />}
</View>
<View style={styles.radioTextContainer}>
<Text style={styles.radioLabel}>ask_wellnuo_ai</Text>
<Text style={styles.radioDescription}>LLaMA with WellNuo data</Text>
</View>
</TouchableOpacity>
<TouchableOpacity
style={styles.radioOption}
onPress={() => setTempVoiceApiType('voice_ask')}
>
<View style={styles.radioCircle}>
{tempVoiceApiType === 'voice_ask' && <View style={styles.radioCircleSelected} />}
</View>
<View style={styles.radioTextContainer}>
<Text style={styles.radioLabel}>voice_ask</Text>
<Text style={styles.radioDescription}>Alternative voice API</Text>
</View>
</TouchableOpacity>
<View style={styles.modalButtons}>
<TouchableOpacity
style={styles.modalButtonCancel}
onPress={() => setShowVoiceApiModal(false)}
>
<Text style={styles.modalButtonCancelText}>Cancel</Text>
</TouchableOpacity>
<TouchableOpacity
style={styles.modalButtonSave}
onPress={saveVoiceApiType}
>
<Text style={styles.modalButtonSaveText}>Save</Text>
</TouchableOpacity>
</View>
</View>
</View>
</Modal>
</SafeAreaView> </SafeAreaView>
); );
} }
@ -472,4 +568,40 @@ const styles = StyleSheet.create({
disabledText: { disabledText: {
opacity: 0.5, opacity: 0.5,
}, },
// Radio button styles
radioOption: {
flexDirection: 'row',
alignItems: 'center',
paddingVertical: Spacing.sm + 4,
marginBottom: Spacing.xs,
},
radioCircle: {
width: 24,
height: 24,
borderRadius: 12,
borderWidth: 2,
borderColor: AppColors.primary,
alignItems: 'center',
justifyContent: 'center',
marginRight: Spacing.md,
},
radioCircleSelected: {
width: 12,
height: 12,
borderRadius: 6,
backgroundColor: AppColors.primary,
},
radioTextContainer: {
flex: 1,
},
radioLabel: {
fontSize: FontSizes.base,
fontWeight: '500',
color: AppColors.textPrimary,
marginBottom: 2,
},
radioDescription: {
fontSize: FontSizes.xs,
color: AppColors.textSecondary,
},
}); });

504
app/(tabs)/voice-debug.tsx Normal file
View File

@ -0,0 +1,504 @@
/**
* Voice Debug Screen
*
* Real-time debugging interface for voice recognition pipeline.
* Shows all events, timers, API calls, and state changes.
*/
import React, { useState, useEffect, useRef, useCallback } from 'react';
import {
View,
Text,
ScrollView,
StyleSheet,
TouchableOpacity,
} from 'react-native';
import { useSafeAreaInsets } from 'react-native-safe-area-context';
import { Feather } from '@expo/vector-icons';
import { useVoice } from '@/contexts/VoiceContext';
import { useSpeechRecognition } from '@/hooks/useSpeechRecognition';
import { AppColors } from '@/constants/theme';
import { useColorScheme } from '@/hooks/use-color-scheme';
interface LogEntry {
id: string;
timestamp: number;
category: 'stt' | 'api' | 'tts' | 'timer' | 'system';
message: string;
level: 'info' | 'warning' | 'error' | 'success';
data?: any;
}
export default function VoiceDebugScreen() {
const colorScheme = useColorScheme();
const isDark = colorScheme === 'dark';
const insets = useSafeAreaInsets();
const {
isListening,
isSpeaking,
status,
startSession,
stopSession,
} = useVoice();
const {
isListening: sttIsListening,
partialTranscript,
recognizedText,
} = useSpeechRecognition({
lang: 'en-US',
continuous: true,
interimResults: true,
});
const [logs, setLogs] = useState<LogEntry[]>([]);
const [silenceTimer, setSilenceTimer] = useState(0);
const scrollViewRef = useRef<ScrollView>(null);
const logIdCounter = useRef(0);
const lastPartialRef = useRef('');
// Add log entry
const addLog = useCallback((
category: LogEntry['category'],
message: string,
level: LogEntry['level'] = 'info',
data?: any
) => {
const entry: LogEntry = {
id: `log-${logIdCounter.current++}`,
timestamp: Date.now(),
category,
message,
level,
data,
};
console.log(`[VoiceDebug:${category}]`, message, data || '');
setLogs(prev => {
const updated = [...prev, entry];
// Keep only last 100 logs
return updated.slice(-100);
});
setTimeout(() => {
scrollViewRef.current?.scrollToEnd({ animated: true });
}, 50);
}, []);
// Clear logs
const clearLogs = useCallback(() => {
setLogs([]);
logIdCounter.current = 0;
addLog('system', 'Logs cleared', 'info');
}, [addLog]);
// Monitor voice session state
useEffect(() => {
if (isListening) {
addLog('system', '🎤 Voice session STARTED', 'success');
} else {
addLog('system', '⏹️ Voice session STOPPED', 'info');
setSilenceTimer(0);
}
}, [isListening, addLog]);
// Monitor STT state
useEffect(() => {
if (sttIsListening) {
addLog('stt', '▶️ STT listening started', 'success');
} else if (isListening) {
addLog('stt', '⏸️ STT stopped (but session active)', 'warning');
}
}, [sttIsListening, isListening, addLog]);
// Monitor status changes
useEffect(() => {
if (status === 'processing') {
addLog('api', '⚙️ Processing transcript → sending to API', 'info');
} else if (status === 'speaking') {
addLog('tts', '🔊 TTS playing (Julia speaking)', 'info');
} else if (status === 'listening') {
addLog('system', '👂 Ready to listen', 'info');
}
}, [status, addLog]);
// Monitor partial transcripts
useEffect(() => {
if (partialTranscript && partialTranscript !== lastPartialRef.current) {
lastPartialRef.current = partialTranscript;
addLog('stt', `📝 Partial: "${partialTranscript.slice(0, 40)}${partialTranscript.length > 40 ? '...' : ''}"`, 'info');
// Reset silence timer
setSilenceTimer(0);
addLog('timer', '🔄 Silence timer RESET', 'warning');
}
}, [partialTranscript, addLog]);
// Monitor final transcripts
useEffect(() => {
if (recognizedText && recognizedText !== lastPartialRef.current) {
addLog('stt', `✅ FINAL: "${recognizedText.slice(0, 40)}${recognizedText.length > 40 ? '...' : ''}"`, 'success', {
length: recognizedText.length,
transcript: recognizedText
});
addLog('api', '📤 Sending to API...', 'info');
}
}, [recognizedText, addLog]);
// Silence timer (only when STT is listening and not processing/speaking)
useEffect(() => {
let interval: NodeJS.Timeout | null = null;
if (sttIsListening && status !== 'processing' && status !== 'speaking') {
interval = setInterval(() => {
setSilenceTimer(prev => {
const next = prev + 100;
// Log milestones
if (next === 1000) {
addLog('timer', '⏱️ Silence: 1.0s', 'info');
} else if (next === 1500) {
addLog('timer', '⏱️ Silence: 1.5s', 'warning');
} else if (next === 2000) {
addLog('timer', '🛑 Silence: 2.0s → AUTO-STOP triggered', 'error');
}
return next;
});
}, 100);
} else {
setSilenceTimer(0);
}
return () => {
if (interval) clearInterval(interval);
};
}, [sttIsListening, status, addLog]);
// Get status indicator
const getStatusDisplay = () => {
if (status === 'speaking' || isSpeaking) {
return { color: '#9333EA', icon: '🔊', text: 'Speaking' };
}
if (status === 'processing') {
return { color: '#F59E0B', icon: '⚙️', text: 'Processing' };
}
if (isListening && sttIsListening) {
return { color: '#10B981', icon: '🟢', text: 'Listening' };
}
if (isListening && !sttIsListening) {
return { color: '#F59E0B', icon: '🟡', text: 'Session Active (STT Off)' };
}
return { color: '#6B7280', icon: '⚪', text: 'Idle' };
};
const statusDisplay = getStatusDisplay();
const silenceProgress = Math.min(silenceTimer / 2000, 1);
const silenceSeconds = (silenceTimer / 1000).toFixed(1);
// Log level colors
const getLogColor = (level: LogEntry['level']) => {
switch (level) {
case 'error': return '#EF4444';
case 'warning': return '#F59E0B';
case 'success': return '#10B981';
default: return isDark ? '#D1D5DB' : '#374151';
}
};
// Category icons
const getCategoryIcon = (category: LogEntry['category']) => {
switch (category) {
case 'stt': return '🎤';
case 'api': return '📡';
case 'tts': return '🔊';
case 'timer': return '⏱️';
case 'system': return '⚙️';
default: return '•';
}
};
return (
<View style={[styles.container, { backgroundColor: isDark ? '#0A0A0A' : '#FFFFFF' }]}>
{/* Header */}
<View style={[styles.header, { paddingTop: insets.top + 16 }]}>
<Text style={[styles.headerTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
Voice Debug
</Text>
<TouchableOpacity onPress={clearLogs} style={styles.clearButton}>
<Feather name="trash-2" size={20} color={isDark ? '#9CA3AF' : '#6B7280'} />
</TouchableOpacity>
</View>
{/* Status Card */}
<View style={[styles.statusCard, {
backgroundColor: isDark ? '#1F2937' : '#F3F4F6',
borderColor: statusDisplay.color,
}]}>
<View style={styles.statusRow}>
<Text style={styles.statusIcon}>{statusDisplay.icon}</Text>
<View style={styles.statusTextContainer}>
<Text style={[styles.statusLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Status
</Text>
<Text style={[styles.statusText, { color: statusDisplay.color }]}>
{statusDisplay.text}
</Text>
</View>
</View>
{/* Silence Timer */}
{sttIsListening && status !== 'processing' && status !== 'speaking' && (
<View style={styles.timerContainer}>
<Text style={[styles.timerLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Silence Timer (iOS auto-stop at 2.0s)
</Text>
<View style={styles.timerRow}>
<Text style={[styles.timerText, {
color: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : isDark ? '#D1D5DB' : '#374151'
}]}>
{silenceSeconds}s / 2.0s
</Text>
</View>
<View style={[styles.progressBarContainer, { backgroundColor: isDark ? '#374151' : '#E5E7EB' }]}>
<View style={[styles.progressBarFill, {
width: `${silenceProgress * 100}%`,
backgroundColor: silenceTimer >= 2000 ? '#EF4444' : silenceTimer >= 1500 ? '#F59E0B' : '#10B981'
}]} />
</View>
</View>
)}
{/* Current Transcripts */}
{partialTranscript && (
<View style={styles.transcriptContainer}>
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Partial:
</Text>
<Text style={[styles.transcriptText, { color: isDark ? '#F59E0B' : '#D97706' }]}>
"{partialTranscript}"
</Text>
</View>
)}
{recognizedText && (
<View style={styles.transcriptContainer}>
<Text style={[styles.transcriptLabel, { color: isDark ? '#9CA3AF' : '#6B7280' }]}>
Final:
</Text>
<Text style={[styles.transcriptText, { color: isDark ? '#10B981' : '#059669' }]}>
"{recognizedText}"
</Text>
</View>
)}
</View>
{/* Logs */}
<View style={styles.logsContainer}>
<Text style={[styles.logsTitle, { color: isDark ? '#FFFFFF' : '#000000' }]}>
Event Log
</Text>
<ScrollView
ref={scrollViewRef}
style={[styles.logsScrollView, { backgroundColor: isDark ? '#111827' : '#F9FAFB' }]}
contentContainerStyle={styles.logsContent}
>
{logs.length === 0 ? (
<Text style={[styles.emptyText, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
No events yet. Press FAB to start.
</Text>
) : (
logs.map(log => {
const time = new Date(log.timestamp);
const timeStr = `${String(time.getHours()).padStart(2, '0')}:${String(time.getMinutes()).padStart(2, '0')}:${String(time.getSeconds()).padStart(2, '0')}.${String(time.getMilliseconds()).padStart(3, '0')}`;
return (
<View key={log.id} style={styles.logEntry}>
<Text style={[styles.logTimestamp, { color: isDark ? '#6B7280' : '#9CA3AF' }]}>
{timeStr}
</Text>
<Text style={styles.logIcon}>{getCategoryIcon(log.category)}</Text>
<Text style={[styles.logMessage, { color: getLogColor(log.level) }]}>
{log.message}
</Text>
</View>
);
})
)}
</ScrollView>
</View>
{/* FAB */}
<TouchableOpacity
style={[styles.fab, {
backgroundColor: isListening ? '#EF4444' : AppColors.primary,
bottom: insets.bottom + 80,
}]}
onPress={() => {
if (isListening) {
addLog('system', '🛑 User stopped session', 'warning');
stopSession();
} else {
clearLogs();
addLog('system', '▶️ User started session', 'success');
startSession();
}
}}
>
<Feather
name={isListening ? 'square' : 'mic'}
size={28}
color="#FFFFFF"
/>
</TouchableOpacity>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
},
header: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'space-between',
paddingHorizontal: 20,
paddingBottom: 16,
},
headerTitle: {
fontSize: 28,
fontWeight: '700',
},
clearButton: {
padding: 8,
},
statusCard: {
marginHorizontal: 20,
marginBottom: 16,
padding: 16,
borderRadius: 12,
borderLeftWidth: 4,
},
statusRow: {
flexDirection: 'row',
alignItems: 'center',
},
statusIcon: {
fontSize: 32,
marginRight: 12,
},
statusTextContainer: {
flex: 1,
},
statusLabel: {
fontSize: 12,
fontWeight: '500',
marginBottom: 2,
},
statusText: {
fontSize: 18,
fontWeight: '700',
},
timerContainer: {
marginTop: 16,
paddingTop: 16,
borderTopWidth: 1,
borderTopColor: 'rgba(156, 163, 175, 0.2)',
},
timerLabel: {
fontSize: 12,
fontWeight: '500',
marginBottom: 8,
},
timerRow: {
marginBottom: 8,
},
timerText: {
fontSize: 24,
fontWeight: '700',
fontVariant: ['tabular-nums'],
},
progressBarContainer: {
height: 8,
borderRadius: 4,
overflow: 'hidden',
},
progressBarFill: {
height: '100%',
borderRadius: 4,
},
transcriptContainer: {
marginTop: 12,
paddingTop: 12,
borderTopWidth: 1,
borderTopColor: 'rgba(156, 163, 175, 0.2)',
},
transcriptLabel: {
fontSize: 12,
fontWeight: '500',
marginBottom: 4,
},
transcriptText: {
fontSize: 14,
fontStyle: 'italic',
},
logsContainer: {
flex: 1,
marginHorizontal: 20,
},
logsTitle: {
fontSize: 16,
fontWeight: '700',
marginBottom: 8,
},
logsScrollView: {
flex: 1,
borderRadius: 8,
},
logsContent: {
padding: 12,
},
emptyText: {
textAlign: 'center',
fontSize: 14,
fontStyle: 'italic',
paddingVertical: 20,
},
logEntry: {
flexDirection: 'row',
marginBottom: 8,
alignItems: 'flex-start',
},
logTimestamp: {
fontSize: 11,
fontVariant: ['tabular-nums'],
marginRight: 8,
width: 80,
},
logIcon: {
fontSize: 14,
marginRight: 6,
},
logMessage: {
fontSize: 13,
flex: 1,
lineHeight: 18,
},
fab: {
position: 'absolute',
right: 20,
width: 64,
height: 64,
borderRadius: 32,
alignItems: 'center',
justifyContent: 'center',
shadowColor: '#000',
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.3,
shadowRadius: 8,
elevation: 8,
},
});

View File

@ -104,6 +104,8 @@ export function useSpeechRecognition(
const isStartingRef = useRef(false); const isStartingRef = useRef(false);
// Track if voice has been detected in current session (for onVoiceDetected callback) // Track if voice has been detected in current session (for onVoiceDetected callback)
const voiceDetectedRef = useRef(false); const voiceDetectedRef = useRef(false);
// Track last partial transcript for iOS fix (iOS never sends isFinal:true)
const lastPartialRef = useRef('');
// Check availability on mount // Check availability on mount
useEffect(() => { useEffect(() => {
@ -140,6 +142,16 @@ export function useSpeechRecognition(
// Event: Recognition ended // Event: Recognition ended
useSpeechRecognitionEvent('end', () => { useSpeechRecognitionEvent('end', () => {
console.log('[SpeechRecognition] Ended'); console.log('[SpeechRecognition] Ended');
// iOS FIX: iOS never sends isFinal:true, so we send last partial as final when STT ends
const lastPartial = lastPartialRef.current;
if (lastPartial && lastPartial.trim().length > 0) {
console.log('[SpeechRecognition] 🍎 iOS FIX - Sending last partial as final:', lastPartial);
setRecognizedText(lastPartial);
onResult?.(lastPartial, true); // Send as final=true
lastPartialRef.current = ''; // Clear after sending
}
setIsListening(false); setIsListening(false);
setPartialTranscript(''); setPartialTranscript('');
isStartingRef.current = false; isStartingRef.current = false;
@ -167,8 +179,10 @@ export function useSpeechRecognition(
if (isFinal) { if (isFinal) {
setRecognizedText(transcript); setRecognizedText(transcript);
setPartialTranscript(''); setPartialTranscript('');
lastPartialRef.current = ''; // Clear after final
} else { } else {
setPartialTranscript(transcript); setPartialTranscript(transcript);
lastPartialRef.current = transcript; // Save for iOS fix
} }
onResult?.(transcript, isFinal); onResult?.(transcript, isFinal);

View File

@ -229,6 +229,20 @@ class ApiService {
} }
} }
// Voice API Type management
async setVoiceApiType(type: 'voice_ask' | 'ask_wellnuo_ai'): Promise<void> {
await SecureStore.setItemAsync('voiceApiType', type);
}
async getVoiceApiType(): Promise<'voice_ask' | 'ask_wellnuo_ai'> {
try {
const saved = await SecureStore.getItemAsync('voiceApiType');
return (saved as 'voice_ask' | 'ask_wellnuo_ai') || 'ask_wellnuo_ai';
} catch {
return 'ask_wellnuo_ai';
}
}
async validateDeploymentId(deploymentId: string): Promise<ApiResponse<{ valid: boolean; name?: string }>> { async validateDeploymentId(deploymentId: string): Promise<ApiResponse<{ valid: boolean; name?: string }>> {
const token = await this.getToken(); const token = await this.getToken();
const userName = await this.getUserName(); const userName = await this.getUserName();