diff --git a/app/_layout.tsx b/app/_layout.tsx
index 3c2be2d..15c88bf 100644
--- a/app/_layout.tsx
+++ b/app/_layout.tsx
@@ -12,6 +12,7 @@ import { AuthProvider, useAuth } from '@/contexts/AuthContext';
import { BeneficiaryProvider } from '@/contexts/BeneficiaryContext';
import { VoiceTranscriptProvider } from '@/contexts/VoiceTranscriptContext';
import { VoiceCallProvider } from '@/contexts/VoiceCallContext';
+import { VoiceProvider } from '@/contexts/VoiceContext';
import { LoadingSpinner } from '@/components/ui/LoadingSpinner';
import { FloatingCallBubble } from '@/components/FloatingCallBubble';
@@ -67,7 +68,9 @@ export default function RootLayout() {
-
+
+
+
diff --git a/contexts/VoiceContext.tsx b/contexts/VoiceContext.tsx
new file mode 100644
index 0000000..28b6036
--- /dev/null
+++ b/contexts/VoiceContext.tsx
@@ -0,0 +1,382 @@
+/**
+ * Voice Context - Local STT/TTS integration with WellNuo API
+ *
+ * Provides voice session management:
+ * - STT (Speech-to-Text) via expo-speech-recognition
+ * - API calls to WellNuo ask_wellnuo_ai
+ * - TTS (Text-to-Speech) via expo-speech
+ *
+ * Flow: User speaks → STT → API → Response → TTS → Continue listening
+ */
+
+import React, {
+ createContext,
+ useContext,
+ useState,
+ useCallback,
+ useRef,
+ ReactNode,
+} from 'react';
+import * as Speech from 'expo-speech';
+import { api } from '@/services/api';
+
+// WellNuo API configuration (same as chat.tsx)
+const API_URL = 'https://eluxnetworks.net/function/well-api/api';
+const WELLNUO_USER = 'anandk';
+const WELLNUO_PASSWORD = 'anandk_8';
+
+// Single deployment mode - sends only deployment_id (no beneficiary_names_dict)
+const SINGLE_DEPLOYMENT_MODE = true;
+
+// Keywords for question normalization (same as chat.tsx)
+const STATUS_KEYWORDS = [
+ /\bhow\s+is\b/i,
+ /\bhow'?s\b/i,
+ /\bhow\s+are\b/i,
+ /\btell\s+me\s+about\b/i,
+ /\bwhat'?s\s+up\s+with\b/i,
+ /\bupdate\s+on\b/i,
+ /\bstatus\b/i,
+ /\bdoing\b/i,
+ /\bfeeling\b/i,
+ /\bcheck\s+on\b/i,
+ /\bis\s+\w+\s+okay\b/i,
+ /\bis\s+\w+\s+alright\b/i,
+ /\bis\s+\w+\s+fine\b/i,
+ /\bokay\?\b/i,
+ /\balright\?\b/i,
+];
+
+const SUBJECT_KEYWORDS = [
+ /\bdad\b/i,
+ /\bfather\b/i,
+ /\bferdinand\b/i,
+ /\bhim\b/i,
+ /\bhe\b/i,
+ /\bmy\s+dad\b/i,
+ /\bmy\s+father\b/i,
+ /\bthe\s+patient\b/i,
+ /\bloved\s+one\b/i,
+ /\bparent\b/i,
+ /\bgrandpa\b/i,
+ /\bgrandfather\b/i,
+];
+
+/**
+ * Normalize question for WellNuo API (same logic as chat.tsx)
+ */
+function normalizeQuestion(userMessage: string): string {
+ const msgLower = userMessage.toLowerCase().trim();
+
+ const isStatusQuery = STATUS_KEYWORDS.some((pattern) => pattern.test(msgLower));
+ const isAboutRecipient = SUBJECT_KEYWORDS.some((pattern) => pattern.test(msgLower));
+
+ if (isStatusQuery && isAboutRecipient) {
+ console.log(`[VoiceContext] Normalized '${userMessage}' -> 'how is dad doing'`);
+ return 'how is dad doing';
+ }
+
+ if (isStatusQuery && !isAboutRecipient) {
+ console.log(
+ `[VoiceContext] Normalized '${userMessage}' -> 'how is dad doing' (assumed recipient)`
+ );
+ return 'how is dad doing';
+ }
+
+ console.log(`[VoiceContext] No normalization applied to: '${userMessage}'`);
+ return userMessage;
+}
+
+export type VoiceStatus = 'idle' | 'listening' | 'processing' | 'speaking';
+
+interface VoiceContextValue {
+ // Current status of the voice session
+ status: VoiceStatus;
+ // Whether voice session is active (not idle)
+ isActive: boolean;
+ // Whether STT is currently listening
+ isListening: boolean;
+ // Whether TTS is currently speaking
+ isSpeaking: boolean;
+ // Whether processing API request
+ isProcessing: boolean;
+ // Current/last transcript from STT
+ transcript: string;
+ // Partial transcript (real-time preview)
+ partialTranscript: string;
+ // Last API response
+ lastResponse: string | null;
+ // Error message if any
+ error: string | null;
+
+ // Start voice session (begin listening)
+ startSession: () => void;
+ // Stop voice session
+ stopSession: () => void;
+
+ // Send transcript to API and get response with TTS
+ // Called automatically when STT detects speech end, or manually
+ sendTranscript: (text: string) => Promise;
+
+ // Update transcript from external STT hook
+ setTranscript: (text: string) => void;
+ setPartialTranscript: (text: string) => void;
+
+ // Set status from external STT/TTS hooks
+ setStatus: (status: VoiceStatus) => void;
+ setIsListening: (listening: boolean) => void;
+ setIsSpeaking: (speaking: boolean) => void;
+
+ // Speak text using TTS
+ speak: (text: string) => Promise;
+ // Stop TTS
+ stopSpeaking: () => void;
+}
+
+const VoiceContext = createContext(undefined);
+
+export function VoiceProvider({ children }: { children: ReactNode }) {
+ const [status, setStatus] = useState('idle');
+ const [transcript, setTranscript] = useState('');
+ const [partialTranscript, setPartialTranscript] = useState('');
+ const [lastResponse, setLastResponse] = useState(null);
+ const [error, setError] = useState(null);
+ const [isListening, setIsListening] = useState(false);
+ const [isSpeaking, setIsSpeaking] = useState(false);
+
+ // API token cache
+ const apiTokenRef = useRef(null);
+
+ // Deployment ID from settings
+ const deploymentIdRef = useRef(null);
+
+ // Load deployment ID on mount
+ React.useEffect(() => {
+ const loadDeploymentId = async () => {
+ const savedId = await api.getDeploymentId();
+ deploymentIdRef.current = savedId;
+ console.log('[VoiceContext] Loaded deployment ID:', savedId);
+ };
+ loadDeploymentId();
+ }, []);
+
+ /**
+ * Get WellNuo API token (same as chat.tsx)
+ */
+ const getWellNuoToken = useCallback(async (): Promise => {
+ if (apiTokenRef.current) {
+ return apiTokenRef.current;
+ }
+
+ const nonce = Math.floor(Math.random() * 1000000).toString();
+ const response = await fetch(API_URL, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
+ body: new URLSearchParams({
+ function: 'credentials',
+ clientId: 'MA_001',
+ user_name: WELLNUO_USER,
+ ps: WELLNUO_PASSWORD,
+ nonce: nonce,
+ }).toString(),
+ });
+
+ const data = await response.json();
+ if (data.status === '200 OK' && data.access_token) {
+ apiTokenRef.current = data.access_token;
+ console.log('[VoiceContext] WellNuo token obtained');
+ return data.access_token;
+ }
+ throw new Error('Failed to authenticate with WellNuo API');
+ }, []);
+
+ /**
+ * Send transcript to WellNuo API and speak the response
+ */
+ const sendTranscript = useCallback(
+ async (text: string): Promise => {
+ const trimmedText = text.trim();
+ if (!trimmedText) {
+ console.log('[VoiceContext] Empty transcript, skipping API call');
+ return null;
+ }
+
+ console.log('[VoiceContext] Sending transcript to API:', trimmedText);
+ setStatus('processing');
+ setError(null);
+
+ try {
+ // Get API token
+ const token = await getWellNuoToken();
+
+ // Normalize question
+ const normalizedQuestion = normalizeQuestion(trimmedText);
+
+ // Get deployment ID
+ const deploymentId = deploymentIdRef.current || '21';
+
+ // Build request params
+ const requestParams: Record = {
+ function: 'ask_wellnuo_ai',
+ clientId: 'MA_001',
+ user_name: WELLNUO_USER,
+ token: token,
+ question: normalizedQuestion,
+ deployment_id: deploymentId,
+ };
+
+ // Only add beneficiary_names_dict if NOT in single deployment mode
+ if (!SINGLE_DEPLOYMENT_MODE) {
+ // For full app, would include beneficiary names dict
+ // Currently single deployment mode only
+ }
+
+ const response = await fetch(API_URL, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
+ body: new URLSearchParams(requestParams).toString(),
+ });
+
+ const data = await response.json();
+
+ if (data.ok && data.response?.body) {
+ const responseText = data.response.body;
+ console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
+ setLastResponse(responseText);
+
+ // Speak the response
+ await speak(responseText);
+
+ return responseText;
+ } else {
+ // Token might be expired
+ if (data.status === '401 Unauthorized') {
+ apiTokenRef.current = null;
+ throw new Error('Session expired, please try again');
+ }
+ throw new Error(data.message || 'Could not get response');
+ }
+ } catch (err) {
+ const errorMsg = err instanceof Error ? err.message : 'Unknown error';
+ console.error('[VoiceContext] API error:', errorMsg);
+ setError(errorMsg);
+ setStatus('idle');
+ return null;
+ }
+ },
+ [getWellNuoToken]
+ );
+
+ /**
+ * Speak text using TTS
+ */
+ const speak = useCallback(async (text: string): Promise => {
+ if (!text.trim()) return;
+
+ console.log('[VoiceContext] Speaking:', text.slice(0, 50) + '...');
+ setStatus('speaking');
+ setIsSpeaking(true);
+
+ return new Promise((resolve) => {
+ Speech.speak(text, {
+ language: 'en-US',
+ rate: 0.9,
+ pitch: 1.0,
+ onStart: () => {
+ console.log('[VoiceContext] TTS started');
+ },
+ onDone: () => {
+ console.log('[VoiceContext] TTS completed');
+ setIsSpeaking(false);
+ // Return to listening state after speaking (if session is active)
+ setStatus('listening');
+ resolve();
+ },
+ onError: (error) => {
+ console.error('[VoiceContext] TTS error:', error);
+ setIsSpeaking(false);
+ setStatus('listening');
+ resolve();
+ },
+ onStopped: () => {
+ console.log('[VoiceContext] TTS stopped');
+ setIsSpeaking(false);
+ setStatus('listening');
+ resolve();
+ },
+ });
+ });
+ }, []);
+
+ /**
+ * Stop TTS playback
+ */
+ const stopSpeaking = useCallback(() => {
+ Speech.stop();
+ setIsSpeaking(false);
+ }, []);
+
+ /**
+ * Start voice session
+ */
+ const startSession = useCallback(() => {
+ console.log('[VoiceContext] Starting voice session');
+ setStatus('listening');
+ setIsListening(true);
+ setError(null);
+ setTranscript('');
+ setPartialTranscript('');
+ }, []);
+
+ /**
+ * Stop voice session
+ */
+ const stopSession = useCallback(() => {
+ console.log('[VoiceContext] Stopping voice session');
+ Speech.stop();
+ setStatus('idle');
+ setIsListening(false);
+ setIsSpeaking(false);
+ setError(null);
+ }, []);
+
+ // Computed values
+ const isActive = status !== 'idle';
+ const isProcessing = status === 'processing';
+
+ return (
+
+ {children}
+
+ );
+}
+
+export function useVoice() {
+ const context = useContext(VoiceContext);
+ if (!context) {
+ throw new Error('useVoice must be used within VoiceProvider');
+ }
+ return context;
+}