WellNuo Lite architecture: - Simplified navigation flow with NavigationController - Profile editing with API sync (/auth/profile endpoint) - OTP verification improvements - ESP WiFi provisioning setup (espProvisioning.ts) - E2E testing infrastructure (Playwright) - Speech recognition hooks (web/native) - Backend auth enhancements This is the stable version submitted to App Store. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
92 lines
2.9 KiB
TypeScript
92 lines
2.9 KiB
TypeScript
import { useState, useEffect, useRef, useCallback } from 'react';
|
|
import { Platform } from 'react-native';
|
|
|
|
export interface UseSpeechRecognitionReturn {
|
|
isListening: boolean;
|
|
recognizedText: string;
|
|
startListening: (options?: { continuous?: boolean }) => Promise<void>;
|
|
stopListening: () => void;
|
|
isAvailable: boolean;
|
|
hasPermission: boolean;
|
|
requestPermission: () => Promise<boolean>;
|
|
}
|
|
|
|
export function useSpeechRecognition(): UseSpeechRecognitionReturn {
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [recognizedText, setRecognizedText] = useState('');
|
|
const [isAvailable, setIsAvailable] = useState(false);
|
|
|
|
const recognitionRef = useRef<any>(null);
|
|
|
|
useEffect(() => {
|
|
if (typeof window !== 'undefined') {
|
|
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition;
|
|
if (SpeechRecognition) {
|
|
setIsAvailable(true);
|
|
const recognition = new SpeechRecognition();
|
|
recognition.continuous = false; // Default
|
|
recognition.interimResults = true;
|
|
recognition.lang = 'en-US';
|
|
|
|
recognition.onstart = () => {
|
|
setIsListening(true);
|
|
};
|
|
|
|
recognition.onend = () => {
|
|
setIsListening(false);
|
|
};
|
|
|
|
recognition.onresult = (event: any) => {
|
|
const transcript = Array.from(event.results)
|
|
.map((result: any) => result[0])
|
|
.map((result: any) => result.transcript)
|
|
.join('');
|
|
|
|
setRecognizedText(transcript);
|
|
};
|
|
|
|
recognition.onerror = (event: any) => {
|
|
console.warn('Speech recognition error', event.error);
|
|
setIsListening(false);
|
|
};
|
|
|
|
recognitionRef.current = recognition;
|
|
}
|
|
}
|
|
}, []);
|
|
|
|
const requestPermission = async () => {
|
|
// Web API usually requests permission on start(), but we can return true if available
|
|
return isAvailable;
|
|
};
|
|
|
|
const startListening = async (options?: { continuous?: boolean }) => {
|
|
if (!recognitionRef.current) return;
|
|
|
|
try {
|
|
recognitionRef.current.continuous = options?.continuous ?? false;
|
|
setRecognizedText('');
|
|
recognitionRef.current.start();
|
|
} catch (e) {
|
|
console.error('Failed to start speech recognition', e);
|
|
setIsListening(false);
|
|
}
|
|
};
|
|
|
|
const stopListening = () => {
|
|
if (recognitionRef.current) {
|
|
recognitionRef.current.stop();
|
|
}
|
|
};
|
|
|
|
return {
|
|
isListening,
|
|
recognizedText,
|
|
startListening,
|
|
stopListening,
|
|
isAvailable,
|
|
hasPermission: isAvailable, // Assume permission handled by browser
|
|
requestPermission
|
|
};
|
|
}
|