import { create } from 'zustand'; import { persist, createJSONStorage } from 'zustand/middleware'; import { v4 as uuidv4 } from 'uuid'; import { LLMService } from '@/services/llm-service'; // --- Types --- export type MessageRole = 'user' | 'assistant' | 'system'; export type MessageType = 'text' | 'thought' | 'draft'; export interface Message { id: string; role: MessageRole; content: string; type?: MessageType; createdAt: string; } export type ChatPhase = 'idle' | 'input' | 'elicitation' | 'drafting' | 'review'; export interface DraftArtifact { title: string; insight: string; lesson: string; } interface ChatState { // State messages: Message[]; phase: ChatPhase; isTyping: boolean; currentDraft: DraftArtifact | null; // Actions addMessage: (role: MessageRole, content: string, type?: MessageType) => void; setPhase: (phase: ChatPhase) => void; resetSession: () => void; generateDraft: () => Promise; sendMessage: (content: string) => Promise; updateDraft: (draft: DraftArtifact) => void; } // --- Store --- export const useChatStore = create()( persist( (set, get) => ({ // Initial State messages: [], phase: 'idle', isTyping: false, currentDraft: null, // Actions addMessage: (role, content, type = 'text') => { const newMessage: Message = { id: uuidv4(), role, content, type, createdAt: new Date().toISOString(), }; set((state) => ({ messages: [...state.messages, newMessage] })); }, setPhase: (phase) => set({ phase }), resetSession: () => set({ messages: [], phase: 'idle', isTyping: false, currentDraft: null }), updateDraft: (draft) => set({ currentDraft: draft }), sendMessage: async (content) => { const { addMessage, messages } = get(); // 1. Add User Message addMessage('user', content); set({ isTyping: true, phase: 'elicitation' }); try { // 2. Call Teacher Agent // Use LLM Service to get response // We pass the history excluding the just added message which LLMService expects? // Actually LLMService usually expects full history or we construct it. // Let's pass the current messages (including the new one) // Note: In a real streaming implementation, we would update the message content incrementally. // For now, we wait for full response. await LLMService.getTeacherResponseStream( content, messages.map(m => ({ role: m.role, content: m.content })), // History before new msg? Or all? // LLMService.getTeacherResponseStream logic: // messages: [...history, { role: 'user', content }] { onToken: () => { }, onComplete: (fullText) => { addMessage('assistant', fullText); set({ isTyping: false }); }, onError: (error) => { console.error("Teacher Agent Error:", error); addMessage('assistant', "I'm having trouble connecting to my brain right now. Please check your settings."); set({ isTyping: false }); } } ); } catch (error) { set({ isTyping: false }); } }, generateDraft: async () => { const { messages, setPhase, updateDraft } = get(); setPhase('drafting'); set({ isTyping: true }); try { // Call Ghostwriter Agent via LLM Service const draft = await LLMService.generateDraft( messages.map(m => ({ role: m.role, content: m.content })) ); updateDraft(draft); setPhase('review'); set({ isTyping: false }); } catch (error) { console.error("Ghostwriter Error:", error); // Handle error state set({ isTyping: false, phase: 'idle' }); } } }), { name: 'test01-chat-storage', storage: createJSONStorage(() => localStorage), partialize: (state) => ({ // Persist messages and draft, but maybe reset phase on reload if stuck? // Let's persist everything for now to support refresh. messages: state.messages, phase: state.phase, currentDraft: state.currentDraft }), } ) );