WellNuo/hooks/useSpeechRecognition.ts
Sergei 7105bb72f7 Stable Light version - App Store submission
WellNuo Lite architecture:
- Simplified navigation flow with NavigationController
- Profile editing with API sync (/auth/profile endpoint)
- OTP verification improvements
- ESP WiFi provisioning setup (espProvisioning.ts)
- E2E testing infrastructure (Playwright)
- Speech recognition hooks (web/native)
- Backend auth enhancements

This is the stable version submitted to App Store.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-12 20:28:18 -08:00

118 lines
3.8 KiB
TypeScript

import { useState, useEffect, useCallback, useRef } from 'react';
import { Platform, Alert } from 'react-native';
// Try to import native module
let ExpoSpeechRecognitionModule: any = null;
let SPEECH_RECOGNITION_AVAILABLE = false;
try {
const speechRecognition = require('expo-speech-recognition');
ExpoSpeechRecognitionModule = speechRecognition.ExpoSpeechRecognitionModule;
if (ExpoSpeechRecognitionModule) {
SPEECH_RECOGNITION_AVAILABLE = true;
}
} catch (e) {
console.log('[useSpeechRecognition] expo-speech-recognition not available');
}
export interface SpeechRecognitionResult {
transcript: string;
isFinal: boolean;
}
export interface UseSpeechRecognitionReturn {
isListening: boolean;
recognizedText: string;
startListening: (options?: { continuous?: boolean }) => Promise<void>;
stopListening: () => void;
isAvailable: boolean;
hasPermission: boolean;
requestPermission: () => Promise<boolean>;
}
export function useSpeechRecognition(): UseSpeechRecognitionReturn {
const [isListening, setIsListening] = useState(false);
const [recognizedText, setRecognizedText] = useState('');
const [hasPermission, setHasPermission] = useState(false);
// Callbacks
const onResultRef = useRef<((result: SpeechRecognitionResult) => void) | null>(null);
useEffect(() => {
if (!SPEECH_RECOGNITION_AVAILABLE || !ExpoSpeechRecognitionModule) return;
const subscriptions: any[] = [];
if (ExpoSpeechRecognitionModule.addListener) {
subscriptions.push(
ExpoSpeechRecognitionModule.addListener('start', () => setIsListening(true))
);
subscriptions.push(
ExpoSpeechRecognitionModule.addListener('end', () => setIsListening(false))
);
subscriptions.push(
ExpoSpeechRecognitionModule.addListener('result', (event: any) => {
const transcript = event.results?.[0]?.transcript || '';
setRecognizedText(transcript);
// We expose direct result via component state usually, but for hook we just update state
})
);
subscriptions.push(
ExpoSpeechRecognitionModule.addListener('error', (event: any) => {
setIsListening(false);
console.warn('[Speech] Error:', event);
})
);
}
return () => {
subscriptions.forEach(sub => sub.remove?.());
};
}, []);
const requestPermission = async () => {
if (!SPEECH_RECOGNITION_AVAILABLE) return false;
const result = await ExpoSpeechRecognitionModule.requestPermissionsAsync();
setHasPermission(result.granted);
return result.granted;
};
const startListening = async (options?: { continuous?: boolean }) => {
if (!SPEECH_RECOGNITION_AVAILABLE) {
Alert.alert('Not Available', 'Voice recognition is not available on this device.');
return;
}
try {
// Reset text
setRecognizedText('');
await ExpoSpeechRecognitionModule.start({
lang: 'en-US',
interimResults: true,
maxAlternatives: 1,
continuous: options?.continuous ?? false,
});
} catch (e) {
console.error('Failed to start listening', e);
setIsListening(false);
}
};
const stopListening = () => {
if (SPEECH_RECOGNITION_AVAILABLE) {
ExpoSpeechRecognitionModule.stop();
}
setIsListening(false);
};
return {
isListening,
recognizedText,
startListening,
stopListening,
isAvailable: SPEECH_RECOGNITION_AVAILABLE,
hasPermission,
requestPermission
};
}