Display Julia's voice responses in chat
When user speaks via voice mode, both their question and Julia's response are now shown in the text chat. This provides a unified conversation history for both voice and text interactions. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
8f64a6e6af
commit
88d4afcdfd
@ -24,6 +24,7 @@ import { SafeAreaView } from 'react-native-safe-area-context';
|
|||||||
import { useRouter, useFocusEffect } from 'expo-router';
|
import { useRouter, useFocusEffect } from 'expo-router';
|
||||||
import { api } from '@/services/api';
|
import { api } from '@/services/api';
|
||||||
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
import { useBeneficiary } from '@/contexts/BeneficiaryContext';
|
||||||
|
import { useVoiceTranscript } from '@/contexts/VoiceTranscriptContext';
|
||||||
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
import { AppColors, BorderRadius, FontSizes, Spacing } from '@/constants/theme';
|
||||||
import type { Message, Beneficiary } from '@/types';
|
import type { Message, Beneficiary } from '@/types';
|
||||||
|
|
||||||
@ -109,6 +110,7 @@ function normalizeQuestion(userMessage: string): string {
|
|||||||
export default function ChatScreen() {
|
export default function ChatScreen() {
|
||||||
const router = useRouter();
|
const router = useRouter();
|
||||||
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
const { currentBeneficiary, setCurrentBeneficiary } = useBeneficiary();
|
||||||
|
const { transcript, hasNewTranscript, markTranscriptAsShown, getTranscriptAsMessages } = useVoiceTranscript();
|
||||||
|
|
||||||
// Helper to create initial message with beneficiary name
|
// Helper to create initial message with beneficiary name
|
||||||
const createInitialMessage = useCallback((beneficiaryName?: string | null): Message => ({
|
const createInitialMessage = useCallback((beneficiaryName?: string | null): Message => ({
|
||||||
@ -195,6 +197,26 @@ export default function ChatScreen() {
|
|||||||
}
|
}
|
||||||
}, [deploymentName, createInitialMessage]);
|
}, [deploymentName, createInitialMessage]);
|
||||||
|
|
||||||
|
// Add voice transcript messages to chat when new ones arrive
|
||||||
|
useEffect(() => {
|
||||||
|
if (hasNewTranscript && transcript.length > 0) {
|
||||||
|
const voiceMessages = getTranscriptAsMessages();
|
||||||
|
if (voiceMessages.length > 0) {
|
||||||
|
setMessages(prev => {
|
||||||
|
// Filter out messages that are already in the chat (by id)
|
||||||
|
const existingIds = new Set(prev.map(m => m.id));
|
||||||
|
const newMessages = voiceMessages.filter(m => !existingIds.has(m.id));
|
||||||
|
if (newMessages.length > 0) {
|
||||||
|
console.log('[Chat] Adding', newMessages.length, 'voice messages to chat');
|
||||||
|
return [...prev, ...newMessages];
|
||||||
|
}
|
||||||
|
return prev;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
markTranscriptAsShown();
|
||||||
|
}
|
||||||
|
}, [hasNewTranscript, transcript, getTranscriptAsMessages, markTranscriptAsShown]);
|
||||||
|
|
||||||
// Load beneficiaries
|
// Load beneficiaries
|
||||||
const loadBeneficiaries = useCallback(async () => {
|
const loadBeneficiaries = useCallback(async () => {
|
||||||
setLoadingBeneficiaries(true);
|
setLoadingBeneficiaries(true);
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import React, {
|
|||||||
} from 'react';
|
} from 'react';
|
||||||
import * as Speech from 'expo-speech';
|
import * as Speech from 'expo-speech';
|
||||||
import { api } from '@/services/api';
|
import { api } from '@/services/api';
|
||||||
|
import { useVoiceTranscript } from './VoiceTranscriptContext';
|
||||||
|
|
||||||
// WellNuo API configuration (same as chat.tsx)
|
// WellNuo API configuration (same as chat.tsx)
|
||||||
const API_URL = 'https://eluxnetworks.net/function/well-api/api';
|
const API_URL = 'https://eluxnetworks.net/function/well-api/api';
|
||||||
@ -146,6 +147,9 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
const [isListening, setIsListening] = useState(false);
|
const [isListening, setIsListening] = useState(false);
|
||||||
const [isSpeaking, setIsSpeaking] = useState(false);
|
const [isSpeaking, setIsSpeaking] = useState(false);
|
||||||
|
|
||||||
|
// Voice transcript context for chat display
|
||||||
|
const { addTranscriptEntry } = useVoiceTranscript();
|
||||||
|
|
||||||
// API token cache
|
// API token cache
|
||||||
const apiTokenRef = useRef<string | null>(null);
|
const apiTokenRef = useRef<string | null>(null);
|
||||||
|
|
||||||
@ -207,6 +211,9 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
setStatus('processing');
|
setStatus('processing');
|
||||||
setError(null);
|
setError(null);
|
||||||
|
|
||||||
|
// Add user message to transcript for chat display
|
||||||
|
addTranscriptEntry('user', trimmedText);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Get API token
|
// Get API token
|
||||||
const token = await getWellNuoToken();
|
const token = await getWellNuoToken();
|
||||||
@ -246,6 +253,9 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
|
console.log('[VoiceContext] API response:', responseText.slice(0, 100) + '...');
|
||||||
setLastResponse(responseText);
|
setLastResponse(responseText);
|
||||||
|
|
||||||
|
// Add Julia's response to transcript for chat display
|
||||||
|
addTranscriptEntry('assistant', responseText);
|
||||||
|
|
||||||
// Speak the response
|
// Speak the response
|
||||||
await speak(responseText);
|
await speak(responseText);
|
||||||
|
|
||||||
@ -266,7 +276,7 @@ export function VoiceProvider({ children }: { children: ReactNode }) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[getWellNuoToken]
|
[getWellNuoToken, addTranscriptEntry]
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user