- Backend: Update Legacy API credentials to robster/rob2 - Frontend: ROOM_LOCATIONS with icons and legacyCode mapping - Device Settings: Modal picker for room selection - api.ts: Bidirectional conversion (code ↔ name) - Various UI/UX improvements across screens PRD-DEPLOYMENT.md completed (Score: 9/10) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
116 lines
3.7 KiB
TypeScript
116 lines
3.7 KiB
TypeScript
import { useState, useEffect, useCallback, useRef } from 'react';
|
|
import { Platform, Alert } from 'react-native';
|
|
|
|
// Try to import native module
|
|
let ExpoSpeechRecognitionModule: any = null;
|
|
let SPEECH_RECOGNITION_AVAILABLE = false;
|
|
try {
|
|
const speechRecognition = require('expo-speech-recognition');
|
|
ExpoSpeechRecognitionModule = speechRecognition.ExpoSpeechRecognitionModule;
|
|
if (ExpoSpeechRecognitionModule) {
|
|
SPEECH_RECOGNITION_AVAILABLE = true;
|
|
}
|
|
} catch (e) {
|
|
// expo-speech-recognition not available
|
|
}
|
|
|
|
export interface SpeechRecognitionResult {
|
|
transcript: string;
|
|
isFinal: boolean;
|
|
}
|
|
|
|
export interface UseSpeechRecognitionReturn {
|
|
isListening: boolean;
|
|
recognizedText: string;
|
|
startListening: (options?: { continuous?: boolean }) => Promise<void>;
|
|
stopListening: () => void;
|
|
isAvailable: boolean;
|
|
hasPermission: boolean;
|
|
requestPermission: () => Promise<boolean>;
|
|
}
|
|
|
|
export function useSpeechRecognition(): UseSpeechRecognitionReturn {
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [recognizedText, setRecognizedText] = useState('');
|
|
const [hasPermission, setHasPermission] = useState(false);
|
|
|
|
// Callbacks
|
|
const onResultRef = useRef<((result: SpeechRecognitionResult) => void) | null>(null);
|
|
|
|
useEffect(() => {
|
|
if (!SPEECH_RECOGNITION_AVAILABLE || !ExpoSpeechRecognitionModule) return;
|
|
|
|
const subscriptions: any[] = [];
|
|
|
|
if (ExpoSpeechRecognitionModule.addListener) {
|
|
subscriptions.push(
|
|
ExpoSpeechRecognitionModule.addListener('start', () => setIsListening(true))
|
|
);
|
|
subscriptions.push(
|
|
ExpoSpeechRecognitionModule.addListener('end', () => setIsListening(false))
|
|
);
|
|
subscriptions.push(
|
|
ExpoSpeechRecognitionModule.addListener('result', (event: any) => {
|
|
const transcript = event.results?.[0]?.transcript || '';
|
|
setRecognizedText(transcript);
|
|
// We expose direct result via component state usually, but for hook we just update state
|
|
})
|
|
);
|
|
subscriptions.push(
|
|
ExpoSpeechRecognitionModule.addListener('error', (event: any) => {
|
|
setIsListening(false);
|
|
})
|
|
);
|
|
}
|
|
|
|
return () => {
|
|
subscriptions.forEach(sub => sub.remove?.());
|
|
};
|
|
}, []);
|
|
|
|
const requestPermission = async () => {
|
|
if (!SPEECH_RECOGNITION_AVAILABLE) return false;
|
|
const result = await ExpoSpeechRecognitionModule.requestPermissionsAsync();
|
|
setHasPermission(result.granted);
|
|
return result.granted;
|
|
};
|
|
|
|
const startListening = async (options?: { continuous?: boolean }) => {
|
|
if (!SPEECH_RECOGNITION_AVAILABLE) {
|
|
Alert.alert('Not Available', 'Voice recognition is not available on this device.');
|
|
return;
|
|
}
|
|
|
|
try {
|
|
// Reset text
|
|
setRecognizedText('');
|
|
|
|
await ExpoSpeechRecognitionModule.start({
|
|
lang: 'en-US',
|
|
interimResults: true,
|
|
maxAlternatives: 1,
|
|
continuous: options?.continuous ?? false,
|
|
});
|
|
} catch (e) {
|
|
setIsListening(false);
|
|
}
|
|
};
|
|
|
|
const stopListening = () => {
|
|
if (SPEECH_RECOGNITION_AVAILABLE) {
|
|
ExpoSpeechRecognitionModule.stop();
|
|
}
|
|
setIsListening(false);
|
|
};
|
|
|
|
return {
|
|
isListening,
|
|
recognizedText,
|
|
startListening,
|
|
stopListening,
|
|
isAvailable: SPEECH_RECOGNITION_AVAILABLE,
|
|
hasPermission,
|
|
requestPermission
|
|
};
|
|
}
|