fix: ChatBubble crash and DeepSeek API compatibility
- Fix ChatBubble to handle non-string content with String() wrapper - Fix API route to use generateText for non-streaming requests - Add @ai-sdk/openai-compatible for non-OpenAI providers (DeepSeek, etc.) - Use Chat Completions API instead of Responses API for compatible providers - Update ChatBubble tests and fix component exports to kebab-case - Remove stale PascalCase ChatBubble.tsx file
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect } from 'react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useSearchParams, useRouter } from 'next/navigation';
|
||||
import { ChatWindow } from '@/components/features/chat/chat-window';
|
||||
import { ChatInput } from '@/components/features/chat/chat-input';
|
||||
import { useSessionStore, useActiveSessionId, useTeacherStatus } from '@/store/use-session';
|
||||
@@ -9,8 +10,10 @@ import { toast } from 'sonner';
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { DraftViewSheet } from "@/components/features/draft/DraftViewSheet";
|
||||
import { useChatStore } from "@/lib/store/chat-store";
|
||||
import { CheckCircle, Loader2, ArrowLeft, Sparkles } from "lucide-react";
|
||||
import { Loader2, ArrowLeft, Sparkles, Bot } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
import { LLMService } from '@/services/llm-service';
|
||||
import { ProviderManagementService } from '@/services/provider-management-service';
|
||||
|
||||
export default function ChatPage() {
|
||||
const activeSessionId = useActiveSessionId();
|
||||
@@ -18,9 +21,55 @@ export default function ChatPage() {
|
||||
const { setActiveSession } = useSessionStore((s) => s.actions);
|
||||
const isDrafting = useChatStore((s) => s.isDrafting);
|
||||
|
||||
const searchParams = useSearchParams();
|
||||
const router = useRouter();
|
||||
|
||||
// Connection Status State
|
||||
const [connectionStatus, setConnectionStatus] = useState<'checking' | 'connected' | 'error'>('checking');
|
||||
|
||||
// Check for "new" param to force fresh session
|
||||
useEffect(() => {
|
||||
if (searchParams.get('new') === 'true') {
|
||||
// Clear current session to trigger re-initialization
|
||||
setActiveSession(null);
|
||||
// Clear chat UI state
|
||||
useChatStore.setState({ messages: [], currentDraft: null, showDraftView: false });
|
||||
// Clean URL
|
||||
router.replace('/chat');
|
||||
}
|
||||
}, [searchParams, router, setActiveSession]);
|
||||
|
||||
// Check Connection Status
|
||||
useEffect(() => {
|
||||
const checkConnection = async () => {
|
||||
setConnectionStatus('checking');
|
||||
const settings = ProviderManagementService.getActiveProviderSettings();
|
||||
|
||||
if (!settings.apiKey) {
|
||||
setConnectionStatus('error');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await LLMService.validateConnection(
|
||||
settings.baseUrl,
|
||||
settings.apiKey,
|
||||
settings.modelName
|
||||
);
|
||||
|
||||
if (result.isValid) {
|
||||
setConnectionStatus('connected');
|
||||
} else {
|
||||
setConnectionStatus('error');
|
||||
}
|
||||
};
|
||||
|
||||
checkConnection();
|
||||
}, []);
|
||||
|
||||
// Initialize Session on Mount
|
||||
useEffect(() => {
|
||||
const initSession = async () => {
|
||||
// If activeSessionId is null (either initial load or just cleared by above effect)
|
||||
if (!activeSessionId) {
|
||||
try {
|
||||
const newSessionId = await ChatService.createSession();
|
||||
@@ -77,8 +126,14 @@ export default function ChatPage() {
|
||||
<Link href="/" className="text-slate-500 hover:text-slate-700 transition-colors">
|
||||
<ArrowLeft className="w-5 h-5" />
|
||||
</Link>
|
||||
<div className="font-medium text-slate-700">
|
||||
Current Session
|
||||
<div className="flex items-center gap-2 font-medium text-slate-700">
|
||||
<div className="relative">
|
||||
<Bot className="w-5 h-5 text-indigo-600" />
|
||||
<div className={`absolute -bottom-0.5 -right-0.5 w-2.5 h-2.5 rounded-full border-2 border-white ${connectionStatus === 'connected' ? 'bg-green-500' :
|
||||
connectionStatus === 'checking' ? 'bg-yellow-400' : 'bg-red-500'
|
||||
}`} />
|
||||
</div>
|
||||
Teacher
|
||||
</div>
|
||||
</div>
|
||||
<Button
|
||||
@@ -103,7 +158,8 @@ export default function ChatPage() {
|
||||
</div>
|
||||
|
||||
{/* Chat Messages - Scrollable Area */}
|
||||
<div className="flex-1 overflow-hidden">
|
||||
{/* Fix: Added min-h-0 and relative for proper nested scrolling */}
|
||||
<div className="flex-1 flex flex-col min-h-0 overflow-hidden relative">
|
||||
<ChatWindow sessionId={activeSessionId} />
|
||||
</div>
|
||||
|
||||
|
||||
@@ -6,15 +6,12 @@
|
||||
* fast cold starts (<3s).
|
||||
*
|
||||
* Runtime: Edge (required by architecture)
|
||||
* Environment variables:
|
||||
* - OPENAI_API_KEY: OpenAI API key (required)
|
||||
* - LLM_MODEL: Model to use (default: gpt-4o-mini)
|
||||
* - LLM_TEMPERATURE: Temperature for responses (default: 0.7)
|
||||
*/
|
||||
|
||||
import { NextRequest } from 'next/server';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import { streamText } from 'ai';
|
||||
import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
|
||||
import { streamText, generateText } from 'ai';
|
||||
|
||||
// Edge Runtime is REQUIRED for this API route
|
||||
export const runtime = 'edge';
|
||||
@@ -23,8 +20,13 @@ export const runtime = 'edge';
|
||||
* POST handler for LLM requests
|
||||
*
|
||||
* Expects JSON body with:
|
||||
* - prompt: The prompt to send to the LLM
|
||||
* - prompt: The prompt to send to the LLM (legacy/simple mode)
|
||||
* - messages: Array of chat messages (standard mode)
|
||||
* - stream: Optional boolean to enable streaming (default: true)
|
||||
* - apiKey: Dynamic API key (BYOK)
|
||||
* - baseUrl: Custom base URL (optional)
|
||||
* - model: Model identifier
|
||||
* - temperature: Temperature parameter
|
||||
*
|
||||
* Returns:
|
||||
* - Streaming response if stream=true (default)
|
||||
@@ -34,16 +36,32 @@ export async function POST(request: NextRequest) {
|
||||
try {
|
||||
// Parse request body
|
||||
const body = await request.json();
|
||||
const { prompt, stream = true } = body as { prompt: string; stream?: boolean };
|
||||
const {
|
||||
prompt,
|
||||
messages,
|
||||
stream = true,
|
||||
apiKey: dynamicApiKey,
|
||||
baseUrl: dynamicBaseUrl,
|
||||
model: dynamicModel,
|
||||
temperature: dynamicTemperature
|
||||
} = body as {
|
||||
prompt?: string;
|
||||
messages?: Array<{ role: string, content: string }>;
|
||||
stream?: boolean;
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
temperature?: number;
|
||||
};
|
||||
|
||||
// Validate prompt
|
||||
if (!prompt || typeof prompt !== 'string') {
|
||||
// Validate prompt or messages
|
||||
if (!prompt && (!messages || messages.length === 0)) {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_PROMPT',
|
||||
message: 'Prompt is required and must be a string',
|
||||
code: 'INVALID_REQUEST',
|
||||
message: 'Either prompt or messages array is required',
|
||||
},
|
||||
timestamp: new Date().toISOString(),
|
||||
}),
|
||||
@@ -51,45 +69,63 @@ export async function POST(request: NextRequest) {
|
||||
);
|
||||
}
|
||||
|
||||
// Validate environment variables
|
||||
const apiKey = process.env.OPENAI_API_KEY;
|
||||
// Determine config priority: Request Body > Environment Variables > Defaults
|
||||
const apiKey = dynamicApiKey || process.env.OPENAI_API_KEY;
|
||||
const baseUrl = dynamicBaseUrl || process.env.OPENAI_API_BASE_URL; // Optional env var for base URL
|
||||
const modelName = dynamicModel || process.env.LLM_MODEL || 'gpt-4o-mini';
|
||||
const temperature = dynamicTemperature ?? parseFloat(process.env.LLM_TEMPERATURE || '0.7');
|
||||
|
||||
// Validate API Key presence
|
||||
if (!apiKey) {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MISSING_API_KEY',
|
||||
message: 'Server configuration error: API key not found',
|
||||
message: 'API key is required in request body or server configuration',
|
||||
},
|
||||
timestamp: new Date().toISOString(),
|
||||
}),
|
||||
{ status: 500, headers: { 'Content-Type': 'application/json' } }
|
||||
{ status: 401, headers: { 'Content-Type': 'application/json' } }
|
||||
);
|
||||
}
|
||||
|
||||
// Get model configuration
|
||||
const modelName = process.env.LLM_MODEL || 'gpt-4o-mini';
|
||||
const temperature = parseFloat(process.env.LLM_TEMPERATURE || '0.7');
|
||||
// Create provider based on whether it's OpenAI or a compatible provider
|
||||
// Use openai-compatible for non-OpenAI providers to ensure Chat Completions API is used
|
||||
const isOpenAI = !baseUrl || baseUrl.includes('api.openai.com');
|
||||
|
||||
// Create OpenAI client with API key
|
||||
const openaiClient = createOpenAI({
|
||||
apiKey,
|
||||
});
|
||||
const provider = isOpenAI
|
||||
? createOpenAI({ apiKey, baseURL: baseUrl })
|
||||
: createOpenAICompatible({
|
||||
name: 'custom-provider',
|
||||
baseURL: baseUrl,
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
// Generate response using AI SDK
|
||||
const result = streamText({
|
||||
model: openaiClient(modelName),
|
||||
prompt,
|
||||
temperature,
|
||||
});
|
||||
// Prepare messages
|
||||
// If messages array is provided, use it. Otherwise convert legacy prompt to message
|
||||
const chatMessages = (messages || [{ role: 'user', content: prompt || '' }]) as any;
|
||||
|
||||
// Return streaming response
|
||||
// Return streaming or non-streaming response based on flag
|
||||
if (stream) {
|
||||
// Generate streaming response using AI SDK
|
||||
const result = streamText({
|
||||
model: provider(modelName),
|
||||
messages: chatMessages,
|
||||
temperature,
|
||||
});
|
||||
return result.toTextStreamResponse();
|
||||
}
|
||||
|
||||
// For non-streaming, convert to text
|
||||
const { text } = await result;
|
||||
// For non-streaming, use generateText which returns the full text
|
||||
const { text } = await generateText({
|
||||
model: provider(modelName),
|
||||
messages: chatMessages,
|
||||
temperature,
|
||||
});
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
success: true,
|
||||
@@ -105,14 +141,16 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
// Check for specific error types
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
const isRateLimit = errorMessage.toLowerCase().includes('rate limit');
|
||||
const isRateLimit = errorMessage.toLowerCase().includes('rate limit') || errorMessage.includes('429');
|
||||
const isTimeout = errorMessage.toLowerCase().includes('timeout');
|
||||
const isInvalidKey = errorMessage.toLowerCase().includes('invalid api key');
|
||||
const isInvalidKey = errorMessage.toLowerCase().includes('invalid api key') || errorMessage.includes('401');
|
||||
const isNotFound = errorMessage.includes('404');
|
||||
|
||||
let errorCode = 'INTERNAL_ERROR';
|
||||
if (isRateLimit) errorCode = 'RATE_LIMIT';
|
||||
if (isTimeout) errorCode = 'TIMEOUT';
|
||||
if (isInvalidKey) errorCode = 'INVALID_API_KEY';
|
||||
if (isNotFound) errorCode = 'MODEL_NOT_FOUND'; // Often 404 means model or endpoint not found
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
@@ -123,7 +161,7 @@ export async function POST(request: NextRequest) {
|
||||
},
|
||||
timestamp: new Date().toISOString(),
|
||||
}),
|
||||
{ status: 500, headers: { 'Content-Type': 'application/json' } }
|
||||
{ status: isInvalidKey ? 401 : 500, headers: { 'Content-Type': 'application/json' } }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ export default function HomePage() {
|
||||
|
||||
{/* Floating Action Button - New Vent */}
|
||||
<Link
|
||||
href="/chat"
|
||||
href="/chat?new=true"
|
||||
className="fixed bottom-6 right-6 min-h-[56px] w-14 bg-slate-800 text-white rounded-full shadow-lg hover:bg-slate-700 transition-colors flex items-center justify-center"
|
||||
aria-label="Start new vent"
|
||||
>
|
||||
|
||||
@@ -1,103 +1,70 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { render, screen, within } from '@testing-library/react';
|
||||
import { ChatBubble } from './ChatBubble';
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import { ChatBubble } from './chat-bubble';
|
||||
|
||||
describe('ChatBubble', () => {
|
||||
it('renders user variant correctly', () => {
|
||||
it('renders user message correctly', () => {
|
||||
const { container } = render(
|
||||
<ChatBubble
|
||||
role="user"
|
||||
content="Hello world"
|
||||
timestamp={Date.now()}
|
||||
/>
|
||||
);
|
||||
const bubble = screen.getByText('Hello world');
|
||||
expect(bubble).toBeInTheDocument();
|
||||
expect(container.querySelector('.bg-slate-700')).toBeInTheDocument();
|
||||
expect(container.querySelector('.ml-auto')).toBeInTheDocument();
|
||||
expect(screen.getByText('Hello world')).toBeInTheDocument();
|
||||
// Check for user-specific classes (ShadCN primary color usually implies dark text on light or vice versa depending on theme, but we check justification)
|
||||
expect(container.querySelector('.justify-end')).toBeInTheDocument();
|
||||
expect(container.querySelector('.bg-primary')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders ai variant correctly', () => {
|
||||
it('renders assistant message correctly', () => {
|
||||
const { container } = render(
|
||||
<ChatBubble
|
||||
role="ai"
|
||||
role="assistant"
|
||||
content="AI response"
|
||||
timestamp={Date.now()}
|
||||
/>
|
||||
);
|
||||
const bubble = screen.getByText('AI response');
|
||||
expect(bubble).toBeInTheDocument();
|
||||
expect(container.querySelector('.bg-slate-100')).toBeInTheDocument();
|
||||
expect(container.querySelector('.mr-auto')).toBeInTheDocument();
|
||||
expect(screen.getByText('AI response')).toBeInTheDocument();
|
||||
expect(container.querySelector('.justify-start')).toBeInTheDocument();
|
||||
expect(container.querySelector('.bg-card')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders system variant correctly', () => {
|
||||
const { container } = render(
|
||||
it('renders system message correctly', () => {
|
||||
// System isn't explicitly handled differently in class logic other than being treated as "not user" (so left aligned),
|
||||
// but let's verify it renders.
|
||||
render(
|
||||
<ChatBubble
|
||||
role="system"
|
||||
content="System message"
|
||||
timestamp={Date.now()}
|
||||
/>
|
||||
);
|
||||
const bubble = screen.getByText('System message');
|
||||
expect(bubble).toBeInTheDocument();
|
||||
expect(container.querySelector('.text-center')).toBeInTheDocument();
|
||||
// System messages don't have timestamps
|
||||
expect(container.querySelector('.text-xs.opacity-70')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders markdown inline code', () => {
|
||||
render(
|
||||
<ChatBubble
|
||||
role="user"
|
||||
content="Check `const x = 1;` here"
|
||||
timestamp={Date.now()}
|
||||
/>
|
||||
);
|
||||
expect(screen.getByText('const x = 1;')).toBeInTheDocument();
|
||||
expect(screen.getByText('System message')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders markdown code blocks', () => {
|
||||
const { container } = render(
|
||||
<ChatBubble
|
||||
role="user"
|
||||
content="Check this code block:\n\n```\nconst x = 1;\n```"
|
||||
timestamp={Date.now()}
|
||||
role="assistant"
|
||||
content={"Check this code:\n\n```\nconst x = 1;\n```"}
|
||||
/>
|
||||
);
|
||||
// Verify content is rendered
|
||||
expect(container.textContent).toContain('const x = 1;');
|
||||
// Check for code element (code blocks have both pre and code)
|
||||
const codeElement = container.querySelector('code');
|
||||
expect(codeElement).toBeInTheDocument();
|
||||
expect(screen.getByText('const x = 1;')).toBeInTheDocument();
|
||||
// Check for pre tag
|
||||
expect(container.querySelector('pre')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('displays timestamp for non-system messages', () => {
|
||||
const timestamp = Date.now();
|
||||
const { container } = render(
|
||||
it('handles non-string content gracefully', () => {
|
||||
// Imitate the bug where content is an object (cast to any to bypass TS)
|
||||
const badContent = { foo: 'bar' } as any;
|
||||
|
||||
// This should NOT throw "Unexpected value" error
|
||||
render(
|
||||
<ChatBubble
|
||||
role="user"
|
||||
content="Test"
|
||||
timestamp={timestamp}
|
||||
role="assistant"
|
||||
content={badContent}
|
||||
/>
|
||||
);
|
||||
const timeString = new Date(timestamp).toLocaleTimeString();
|
||||
const timeElement = screen.getByText(timeString);
|
||||
expect(timeElement).toBeInTheDocument();
|
||||
expect(timeElement).toHaveClass('text-xs', 'opacity-70');
|
||||
});
|
||||
|
||||
it('applies correct color contrast for accessibility', () => {
|
||||
const { container: userContainer } = render(
|
||||
<ChatBubble role="user" content="User msg" timestamp={Date.now()} />
|
||||
);
|
||||
const { container: aiContainer } = render(
|
||||
<ChatBubble role="ai" content="AI msg" timestamp={Date.now()} />
|
||||
);
|
||||
|
||||
// User bubbles have white text on dark background
|
||||
expect(userContainer.querySelector('.bg-slate-700.text-white')).toBeInTheDocument();
|
||||
// AI bubbles have dark text on light background
|
||||
expect(aiContainer.querySelector('.bg-slate-100')).toBeInTheDocument();
|
||||
// It should render "[object Object]" literally
|
||||
expect(screen.getByText('[object Object]')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
import ReactMarkdown from 'react-markdown';
|
||||
import remarkGfm from 'remark-gfm';
|
||||
import { useMemo } from 'react';
|
||||
|
||||
type MessageRole = 'user' | 'ai' | 'system';
|
||||
|
||||
interface ChatBubbleProps {
|
||||
role: MessageRole;
|
||||
content: string;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
const bubbleStyles = {
|
||||
user: 'bg-slate-700 text-white ml-auto',
|
||||
ai: 'bg-slate-100 text-slate-800 mr-auto',
|
||||
system: 'bg-transparent text-slate-500 mx-auto text-center text-sm',
|
||||
};
|
||||
|
||||
export function ChatBubble({ role, content, timestamp }: ChatBubbleProps) {
|
||||
const baseClassName = 'p-3 rounded-lg max-w-[80%]';
|
||||
const roleClassName = bubbleStyles[role];
|
||||
|
||||
// Memoize markdown configuration to prevent re-creation on every render
|
||||
const markdownComponents = useMemo(() => ({
|
||||
// Style code blocks with dark theme - pre wraps code blocks
|
||||
pre: ({ children }: any) => (
|
||||
<pre className="bg-slate-900 text-white p-2 rounded overflow-x-auto my-2">
|
||||
{children}
|
||||
</pre>
|
||||
),
|
||||
// Inline code - code inside inline text
|
||||
code: ({ inline, className, children }: any) => {
|
||||
if (inline) {
|
||||
return (
|
||||
<code className="bg-slate-200 dark:bg-slate-700 px-1 rounded text-sm">
|
||||
{children}
|
||||
</code>
|
||||
);
|
||||
}
|
||||
return <code className={className}>{children}</code>;
|
||||
},
|
||||
}), []);
|
||||
|
||||
const markdownPlugins = useMemo(() => [remarkGfm], []);
|
||||
|
||||
return (
|
||||
<div className={`${baseClassName} ${roleClassName}`} data-testid={`chat-bubble-${role}`}>
|
||||
<ReactMarkdown
|
||||
remarkPlugins={markdownPlugins}
|
||||
components={markdownComponents}
|
||||
>
|
||||
{content}
|
||||
</ReactMarkdown>
|
||||
{role !== 'system' && (
|
||||
<div className="text-xs opacity-70 mt-1">
|
||||
{new Date(timestamp).toLocaleTimeString()}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,122 +1,69 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import { ChatWindow } from './chat-window';
|
||||
|
||||
// Mock scrollIntoView
|
||||
Element.prototype.scrollIntoView = vi.fn();
|
||||
|
||||
// Create a selector-based mock system
|
||||
let mockState = {
|
||||
messages: [] as any[],
|
||||
isLoading: false,
|
||||
hydrate: vi.fn(),
|
||||
addMessage: vi.fn(),
|
||||
isRefining: false,
|
||||
cancelRefinement: vi.fn(),
|
||||
showDraftView: false,
|
||||
isFastTrack: false,
|
||||
toggleFastTrack: vi.fn(),
|
||||
};
|
||||
|
||||
const mockUseChatStore = vi.fn((selector?: Function) => {
|
||||
return selector ? selector(mockState) : mockState;
|
||||
});
|
||||
|
||||
vi.mock('@/lib/store/chat-store', () => ({
|
||||
useChatStore: (selector?: Function) => {
|
||||
return selector ? selector(mockState) : mockState;
|
||||
},
|
||||
// Mock store hooks
|
||||
vi.mock('@/store/use-session', () => ({
|
||||
useTeacherStatus: vi.fn(() => 'idle'),
|
||||
}));
|
||||
|
||||
import { ChatWindow } from './ChatWindow';
|
||||
// Mock Dexie hooks
|
||||
const mockMessages = [
|
||||
{ id: 1, role: 'user', content: 'Hello', timestamp: 1000 },
|
||||
{ id: 2, role: 'assistant', content: 'Hi there!', timestamp: 2000 },
|
||||
];
|
||||
|
||||
vi.mock('dexie-react-hooks', () => ({
|
||||
useLiveQuery: vi.fn((cb) => {
|
||||
// If we wanted to test the callback, we'd mock db. But for UI testing,
|
||||
// we can just return what we want the hook to return.
|
||||
// However, existing check calls the callback.
|
||||
// Let's rely on a variable we can change, or just mock return value.
|
||||
// For simplicity in this file, let's assume it returns the global mockMessages var
|
||||
// initialized in test blocks.
|
||||
return (globalThis as any).mockLiveQueryValue;
|
||||
}),
|
||||
}));
|
||||
|
||||
// Mock db to avoid runtime errors if useLiveQuery callback is executed (though we mocked useLiveQuery)
|
||||
vi.mock('@/lib/db/db', () => ({
|
||||
db: {},
|
||||
}));
|
||||
|
||||
describe('ChatWindow', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
// Reset state
|
||||
mockState = {
|
||||
messages: [],
|
||||
isLoading: false,
|
||||
hydrate: vi.fn(),
|
||||
addMessage: vi.fn(),
|
||||
isRefining: false,
|
||||
cancelRefinement: vi.fn(),
|
||||
showDraftView: false,
|
||||
isFastTrack: false,
|
||||
toggleFastTrack: vi.fn(),
|
||||
};
|
||||
(globalThis as any).mockLiveQueryValue = [];
|
||||
});
|
||||
|
||||
it('renders messages from store using atomic selectors', () => {
|
||||
mockState.messages = [
|
||||
{ id: 1, role: 'user', content: 'Hello', timestamp: Date.now() },
|
||||
{ id: 2, role: 'assistant', content: 'Hi there!', timestamp: Date.now() },
|
||||
];
|
||||
it('renders loading state when no sessionId is provided', () => {
|
||||
render(<ChatWindow sessionId={null} />);
|
||||
expect(screen.getByText(/loading session/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
render(<ChatWindow />);
|
||||
it('renders empty state when sessionId is provided but no messages', () => {
|
||||
(globalThis as any).mockLiveQueryValue = [];
|
||||
render(<ChatWindow sessionId="123" />);
|
||||
// Updated text expectation
|
||||
expect(screen.getByText(/what do you want to record?/i)).toBeInTheDocument();
|
||||
expect(screen.getByText(/let me help you summarize your day/i)).toBeInTheDocument();
|
||||
// Verify theme class
|
||||
expect(screen.getByText(/what do you want to record?/i)).toHaveClass('text-foreground');
|
||||
});
|
||||
|
||||
it('renders messages when they exist', () => {
|
||||
(globalThis as any).mockLiveQueryValue = mockMessages;
|
||||
render(<ChatWindow sessionId="123" />);
|
||||
expect(screen.getByText('Hello')).toBeInTheDocument();
|
||||
expect(screen.getByText('Hi there!')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows typing indicator when isTyping is true', () => {
|
||||
render(<ChatWindow isTyping={true} />);
|
||||
expect(screen.getByText(/teacher is typing/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders messages container with proper data attribute', () => {
|
||||
const { container } = render(<ChatWindow />);
|
||||
const messagesContainer = container.querySelector('[data-testid="messages-container"]');
|
||||
expect(messagesContainer).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows loading state while hydrating', () => {
|
||||
mockState.isLoading = true;
|
||||
render(<ChatWindow />);
|
||||
expect(screen.getByText(/loading history/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows empty state when no messages', () => {
|
||||
render(<ChatWindow />);
|
||||
expect(screen.getByText(/start a conversation/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('applies Morning Mist theme classes', () => {
|
||||
const { container } = render(<ChatWindow />);
|
||||
expect(container.firstChild).toHaveClass('bg-slate-50');
|
||||
});
|
||||
|
||||
// Story 2.3: Refinement Mode Tests
|
||||
describe('Refinement Mode (Story 2.3)', () => {
|
||||
it('should not show refinement badge when isRefining is false', () => {
|
||||
mockState.isRefining = false;
|
||||
const { container } = render(<ChatWindow />);
|
||||
expect(screen.queryByText(/refining your draft/i)).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should show refinement badge when isRefining is true', () => {
|
||||
mockState.isRefining = true;
|
||||
mockState.cancelRefinement = vi.fn();
|
||||
const { container } = render(<ChatWindow />);
|
||||
expect(screen.getByText(/refining your draft/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should call cancelRefinement when cancel button is clicked', () => {
|
||||
mockState.isRefining = true;
|
||||
mockState.cancelRefinement = vi.fn();
|
||||
|
||||
const { container } = render(<ChatWindow />);
|
||||
const cancelButton = screen.getByRole('button', { name: /cancel refinement/i });
|
||||
cancelButton.click();
|
||||
|
||||
expect(mockState.cancelRefinement).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should disable chat input when refinement mode is active', () => {
|
||||
mockState.isRefining = true;
|
||||
mockState.showDraftView = true;
|
||||
|
||||
render(<ChatWindow />);
|
||||
const chatInput = screen.getByRole('textbox');
|
||||
expect(chatInput).toBeDisabled();
|
||||
});
|
||||
it('scrolls to bottom on new messages', () => {
|
||||
(globalThis as any).mockLiveQueryValue = mockMessages;
|
||||
render(<ChatWindow sessionId="123" />);
|
||||
expect(Element.prototype.scrollIntoView).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
'use client';
|
||||
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useChatStore } from '@/lib/store/chat-store';
|
||||
import { ChatBubble } from './ChatBubble';
|
||||
import { TypingIndicator } from './TypingIndicator';
|
||||
import { ChatInput } from './ChatInput';
|
||||
import { DraftViewSheet } from '../draft/DraftViewSheet';
|
||||
import { RefinementModeBadge } from './RefinementModeBadge';
|
||||
|
||||
interface ChatWindowProps {
|
||||
isTyping?: boolean;
|
||||
}
|
||||
|
||||
export function ChatWindow({ isTyping = false }: ChatWindowProps) {
|
||||
const messages = useChatStore((s) => s.messages);
|
||||
const isLoading = useChatStore((s) => s.isLoading);
|
||||
const sendMessage = useChatStore((s) => s.addMessage);
|
||||
const hydrate = useChatStore((s) => s.hydrate);
|
||||
const isFastTrack = useChatStore((s) => s.isFastTrack);
|
||||
const toggleFastTrack = useChatStore((s) => s.toggleFastTrack);
|
||||
const showDraftView = useChatStore((s) => s.showDraftView);
|
||||
// Refinement state (Story 2.3)
|
||||
const isRefining = useChatStore((s) => s.isRefining);
|
||||
const cancelRefinement = useChatStore((s) => s.cancelRefinement);
|
||||
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
const messagesContainerRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Hydrate messages on mount
|
||||
useEffect(() => {
|
||||
hydrate();
|
||||
}, [hydrate]);
|
||||
|
||||
// Auto-scroll to bottom when messages change or typing indicator shows
|
||||
useEffect(() => {
|
||||
if (messagesEndRef.current) {
|
||||
messagesEndRef.current.scrollIntoView({ behavior: 'smooth' });
|
||||
}
|
||||
}, [messages, isTyping]);
|
||||
|
||||
const handleSend = (content: string) => {
|
||||
sendMessage(content, 'user');
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="flex flex-col h-screen bg-slate-50 max-w-2xl mx-auto">
|
||||
{/* Header */}
|
||||
<header className="py-4 px-4 border-b bg-white">
|
||||
<h1 className="text-xl font-bold text-slate-800">Venting Session</h1>
|
||||
</header>
|
||||
|
||||
{/* Refinement Mode Badge (Story 2.3) */}
|
||||
{isRefining && <RefinementModeBadge onCancel={cancelRefinement || (() => {})} />}
|
||||
|
||||
{/* Messages Container */}
|
||||
<div
|
||||
ref={messagesContainerRef}
|
||||
data-testid="messages-container"
|
||||
className="flex-1 overflow-y-auto px-4 py-4 space-y-4 flex flex-col"
|
||||
>
|
||||
{isLoading ? (
|
||||
<p className="text-center text-slate-500">Loading history...</p>
|
||||
) : messages.length === 0 ? (
|
||||
<p className="text-center text-slate-400">
|
||||
Start a conversation by typing a message below
|
||||
</p>
|
||||
) : (
|
||||
messages.map((msg) => (
|
||||
<ChatBubble
|
||||
key={msg.id || msg.timestamp}
|
||||
role={msg.role === 'assistant' ? 'ai' : 'user'}
|
||||
content={msg.content}
|
||||
timestamp={msg.timestamp}
|
||||
/>
|
||||
))
|
||||
)}
|
||||
<div ref={messagesEndRef} />
|
||||
</div>
|
||||
|
||||
{/* Typing Indicator */}
|
||||
<TypingIndicator isTyping={isTyping} />
|
||||
|
||||
{/* Input */}
|
||||
<div className="px-4 pb-4">
|
||||
<ChatInput
|
||||
onSend={handleSend}
|
||||
disabled={isLoading || showDraftView}
|
||||
isFastTrack={isFastTrack}
|
||||
onToggleFastTrack={toggleFastTrack}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Draft View Sheet */}
|
||||
<DraftViewSheet />
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -18,8 +18,8 @@ export function ChatBubble({ role, content }: ChatBubbleProps) {
|
||||
<div className={cn(
|
||||
"max-w-[80%] rounded-2xl px-4 py-3 text-sm leading-relaxed shadow-sm",
|
||||
isUser
|
||||
? "bg-blue-600 text-white rounded-tr-sm"
|
||||
: "bg-white border border-slate-200 text-slate-800 rounded-tl-sm"
|
||||
? "bg-primary text-primary-foreground rounded-tr-sm"
|
||||
: "bg-card border border-border text-card-foreground rounded-tl-sm"
|
||||
)}>
|
||||
{/* Render Markdown safely */}
|
||||
<div className="prose prose-sm dark:prose-invert max-w-none break-words">
|
||||
@@ -44,7 +44,7 @@ export function ChatBubble({ role, content }: ChatBubbleProps) {
|
||||
)
|
||||
}}
|
||||
>
|
||||
{content}
|
||||
{String(content)}
|
||||
</ReactMarkdown>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -36,22 +36,22 @@ export function ChatInput({ onSend, isLoading }: ChatInputProps) {
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="p-4 bg-white/80 backdrop-blur-md border-t border-slate-200 sticky bottom-0">
|
||||
<div className="p-4 bg-card/80 backdrop-blur-md border-t border-border sticky bottom-0">
|
||||
<div className="flex gap-2 items-center max-w-3xl mx-auto">
|
||||
<Textarea
|
||||
ref={textareaRef}
|
||||
value={input}
|
||||
onChange={(e) => setInput(e.target.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder="What's specifically frustrating you right now?"
|
||||
className="resize-none min-h-[44px] max-h-[120px] py-3 rounded-xl border-slate-300 focus:ring-blue-500"
|
||||
placeholder="Record your thoughts..."
|
||||
className="resize-none min-h-[44px] max-h-[120px] py-3 rounded-xl border-input focus:ring-ring"
|
||||
rows={1}
|
||||
/>
|
||||
<Button
|
||||
onClick={handleSend}
|
||||
disabled={!input.trim() || isLoading}
|
||||
size="icon"
|
||||
className="h-11 w-11 rounded-xl shrink-0 bg-blue-600 hover:bg-blue-700 transition-colors"
|
||||
className="h-11 w-11 rounded-xl shrink-0 bg-slate-800 hover:bg-slate-700 transition-colors"
|
||||
>
|
||||
{isLoading ? <StopCircle className="h-5 w-5 animate-pulse" /> : <Send className="h-5 w-5" />}
|
||||
</Button>
|
||||
|
||||
@@ -6,6 +6,7 @@ import { db } from '@/lib/db/db';
|
||||
import { ChatBubble } from './chat-bubble';
|
||||
import { TypingIndicator } from './typing-indicator';
|
||||
import { useTeacherStatus } from '@/store/use-session';
|
||||
import { BookOpen, Sparkles } from 'lucide-react';
|
||||
|
||||
interface ChatWindowProps {
|
||||
sessionId: string | null;
|
||||
@@ -38,17 +39,28 @@ export function ChatWindow({ sessionId }: ChatWindowProps) {
|
||||
|
||||
if (!messages || messages.length === 0) {
|
||||
return (
|
||||
<div className="flex-1 flex flex-col items-center justify-center text-center p-8 space-y-4">
|
||||
<h2 className="text-xl font-semibold text-slate-700">What's specifically frustrating you right now?</h2>
|
||||
<p className="text-slate-500 max-w-sm">
|
||||
Don't hold back. I'll help you turn that annoyance into a valuable insight.
|
||||
</p>
|
||||
<div className="flex-1 flex flex-col items-center justify-center text-center p-8 space-y-6">
|
||||
<div className="relative">
|
||||
<div className="w-32 h-32 bg-gradient-to-br from-secondary to-muted rounded-full flex items-center justify-center">
|
||||
<BookOpen className="w-16 h-16 text-muted-foreground/50" aria-hidden="true" />
|
||||
</div>
|
||||
<Sparkles className="w-8 h-8 text-amber-400 absolute -top-2 -right-2" aria-hidden="true" />
|
||||
</div>
|
||||
|
||||
<div className="space-y-2 max-w-md">
|
||||
<h2 className="text-2xl font-bold font-serif text-foreground">
|
||||
What do you want to record?
|
||||
</h2>
|
||||
<p className="text-muted-foreground font-sans">
|
||||
Let me help you summarize your day.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex-1 overflow-y-auto px-4 py-6 scroll-smooth">
|
||||
<div className="h-full flex-1 overflow-y-auto px-4 py-6 scroll-smooth">
|
||||
<div className="max-w-3xl mx-auto space-y-4">
|
||||
{messages.map((msg) => (
|
||||
<ChatBubble key={msg.id} role={msg.role} content={msg.content} />
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
export { ChatBubble } from './ChatBubble';
|
||||
export { ChatInput } from './ChatInput';
|
||||
export { ChatWindow } from './ChatWindow';
|
||||
export { TypingIndicator } from './TypingIndicator';
|
||||
export { ChatBubble } from './chat-bubble';
|
||||
export { ChatInput } from './chat-input';
|
||||
export { ChatWindow } from './chat-window';
|
||||
export { TypingIndicator } from './typing-indicator';
|
||||
export { RefinementModeBadge } from './RefinementModeBadge';
|
||||
export { RefinementIndicator } from './RefinementIndicator';
|
||||
|
||||
@@ -60,7 +60,7 @@ export function DraftActions({ onApprove, onReject, onCopyOnly }: DraftActionsPr
|
||||
<button
|
||||
onClick={onApprove}
|
||||
type="button"
|
||||
className="flex-1 min-h-[44px] px-4 py-3 bg-slate-700 hover:bg-slate-800 text-white rounded-md transition-colors flex items-center justify-center gap-2"
|
||||
className="flex-1 min-h-[44px] px-4 py-3 bg-slate-800 hover:bg-slate-700 text-white rounded-md transition-colors flex items-center justify-center gap-2"
|
||||
aria-label="Approve, copy to clipboard, and mark as completed"
|
||||
>
|
||||
<ThumbsUp className="w-5 h-5" aria-hidden="true" />
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use client';
|
||||
|
||||
import { useState } from 'react';
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Trash2 } from 'lucide-react';
|
||||
import { useChatStore } from '@/lib/store/chat-store';
|
||||
import { Sheet } from './Sheet';
|
||||
@@ -40,6 +40,11 @@ export function DraftViewSheet() {
|
||||
const [toastShow, setToastShow] = useState(false);
|
||||
const [toastMessage, setToastMessage] = useState('');
|
||||
|
||||
// Fix: Reset toast when opening a new draft
|
||||
useEffect(() => {
|
||||
setToastShow(false);
|
||||
}, [currentDraft, showDraftView]);
|
||||
|
||||
const showCopyToast = (message: string = 'Copied to clipboard!') => {
|
||||
setToastMessage(message);
|
||||
setToastShow(true);
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
'use client';
|
||||
|
||||
import { useState } from 'react';
|
||||
import { Copy, Check, X } from 'lucide-react';
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Copy, Check, X, Trash2 } from 'lucide-react';
|
||||
import { useHistoryStore } from '@/lib/store/history-store';
|
||||
import { DraftContent } from '@/components/features/draft/DraftContent';
|
||||
import { CopySuccessToast } from '@/components/features/feedback/CopySuccessToast';
|
||||
import { useChatStore } from '@/lib/store/chat-store';
|
||||
import { Sheet } from '@/components/features/draft/Sheet';
|
||||
import { DeleteConfirmDialog } from './DeleteConfirmDialog';
|
||||
|
||||
/**
|
||||
* HistoryDetailSheet Component
|
||||
@@ -17,11 +18,13 @@ import { Sheet } from '@/components/features/draft/Sheet';
|
||||
* - Sheet component from DraftViewSheet (Story 2.2)
|
||||
* - DraftContent component (Story 2.2)
|
||||
* - CopyButton functionality (Story 2.4)
|
||||
* - Delete functionality (Story 3.2.1)
|
||||
*
|
||||
* Features:
|
||||
* - Displays full draft with Merriweather font
|
||||
* - Copy button for clipboard export
|
||||
* - Close button
|
||||
* - Delete button
|
||||
* - Swipe-to-dismiss support (via Sheet)
|
||||
*
|
||||
* Architecture Compliance:
|
||||
@@ -31,14 +34,23 @@ import { Sheet } from '@/components/features/draft/Sheet';
|
||||
export function HistoryDetailSheet() {
|
||||
const selectedDraft = useHistoryStore((s) => s.selectedDraft);
|
||||
const closeDetail = useHistoryStore((s) => s.closeDetail);
|
||||
const deleteDraft = useHistoryStore((s) => s.deleteDraft);
|
||||
|
||||
// Reuse copy action from ChatStore
|
||||
const copyDraftToClipboard = useChatStore((s) => s.copyDraftToClipboard);
|
||||
|
||||
// Dialog state
|
||||
const [showDeleteDialog, setShowDeleteDialog] = useState(false);
|
||||
|
||||
// Toast state
|
||||
const [toastShow, setToastShow] = useState(false);
|
||||
const [toastMessage, setToastMessage] = useState('');
|
||||
|
||||
// Fix: Reset toast when opening a new draft
|
||||
useEffect(() => {
|
||||
setToastShow(false);
|
||||
}, [selectedDraft]);
|
||||
|
||||
const showCopyToast = (message: string = 'Copied to clipboard!') => {
|
||||
setToastMessage(message);
|
||||
setToastShow(true);
|
||||
@@ -51,6 +63,19 @@ export function HistoryDetailSheet() {
|
||||
}
|
||||
};
|
||||
|
||||
const handleDelete = async () => {
|
||||
if (selectedDraft) {
|
||||
const success = await deleteDraft(selectedDraft.id);
|
||||
if (success) {
|
||||
setShowDeleteDialog(false);
|
||||
showCopyToast('Post deleted successfully');
|
||||
} else {
|
||||
setShowDeleteDialog(false);
|
||||
showCopyToast('Failed to delete post');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleClose = () => {
|
||||
closeDetail();
|
||||
};
|
||||
@@ -64,8 +89,19 @@ export function HistoryDetailSheet() {
|
||||
<Sheet open={!!selectedDraft} onClose={handleClose}>
|
||||
<DraftContent draft={selectedDraft} />
|
||||
|
||||
{/* Footer with copy and close buttons */}
|
||||
{/* Footer with copy, delete and close buttons */}
|
||||
<nav className="sticky bottom-0 flex gap-3 p-4 bg-white border-t border-slate-200">
|
||||
{/* Delete button (Story 3.2.1) */}
|
||||
<button
|
||||
onClick={() => setShowDeleteDialog(true)}
|
||||
type="button"
|
||||
className="min-h-[44px] px-4 py-3 border border-destructive text-destructive rounded-md hover:bg-destructive/10 transition-colors flex items-center justify-center gap-2"
|
||||
aria-label="Delete this draft"
|
||||
>
|
||||
<Trash2 className="w-5 h-5" aria-hidden="true" />
|
||||
<span className="sr-only">Delete</span>
|
||||
</button>
|
||||
|
||||
{/* Copy button */}
|
||||
<button
|
||||
onClick={handleCopy}
|
||||
@@ -90,7 +126,15 @@ export function HistoryDetailSheet() {
|
||||
</nav>
|
||||
</Sheet>
|
||||
|
||||
{/* Toast for copy feedback */}
|
||||
{/* Delete Confirmation Dialog */}
|
||||
<DeleteConfirmDialog
|
||||
open={showDeleteDialog}
|
||||
onOpenChange={setShowDeleteDialog}
|
||||
onConfirm={handleDelete}
|
||||
draftTitle={selectedDraft.title}
|
||||
/>
|
||||
|
||||
{/* Toast for feedack */}
|
||||
<CopySuccessToast
|
||||
show={toastShow}
|
||||
message={toastMessage}
|
||||
|
||||
@@ -29,6 +29,7 @@ interface HistoryState {
|
||||
selectDraft: (draft: Draft) => void;
|
||||
closeDetail: () => void;
|
||||
clearError: () => void;
|
||||
deleteDraft: (draftId: number) => Promise<boolean>;
|
||||
}
|
||||
|
||||
export const useHistoryStore = create<HistoryState>((set, get) => ({
|
||||
@@ -39,6 +40,29 @@ export const useHistoryStore = create<HistoryState>((set, get) => ({
|
||||
hasMore: true,
|
||||
error: null,
|
||||
|
||||
/**
|
||||
* Delete a draft from history
|
||||
*/
|
||||
deleteDraft: async (draftId: number) => {
|
||||
try {
|
||||
const success = await DraftService.deleteDraft(draftId);
|
||||
|
||||
if (success) {
|
||||
set(state => ({
|
||||
drafts: state.drafts.filter(d => d.id !== draftId),
|
||||
// Close detail if the deleted draft was selected
|
||||
selectedDraft: state.selectedDraft?.id === draftId ? null : state.selectedDraft
|
||||
}));
|
||||
}
|
||||
return success;
|
||||
} catch (error) {
|
||||
set({
|
||||
error: error instanceof Error ? error.message : 'Failed to delete draft'
|
||||
});
|
||||
return false;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Load more drafts (pagination)
|
||||
* Appends to existing drafts for infinite scroll
|
||||
|
||||
@@ -25,6 +25,13 @@ interface ApiErrorResponse {
|
||||
}
|
||||
|
||||
export class LLMService {
|
||||
/**
|
||||
* Validate connection to LLM provider with detailed error information
|
||||
* @param baseUrl - The API base URL
|
||||
* @param apiKey - The API key for authentication
|
||||
* @param model - The model name to test
|
||||
* @returns Promise resolving to ConnectionValidationResult with detailed error info
|
||||
*/
|
||||
/**
|
||||
* Validate connection to LLM provider with detailed error information
|
||||
* @param baseUrl - The API base URL
|
||||
@@ -38,32 +45,30 @@ export class LLMService {
|
||||
model: string
|
||||
): Promise<ConnectionValidationResult> {
|
||||
try {
|
||||
const response = await fetch(`${baseUrl}/chat/completions`, {
|
||||
// Use our own server-side proxy to avoid CORS and Mixed Content issues
|
||||
const response = await fetch('/api/llm', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${apiKey}`,
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: model,
|
||||
apiKey,
|
||||
baseUrl,
|
||||
model,
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 1,
|
||||
stream: false, // Don't stream for validation
|
||||
// We don't need max_tokens as we just want to see if it works
|
||||
}),
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
|
||||
if (response.ok && data.success) {
|
||||
return createValidationSuccess();
|
||||
}
|
||||
|
||||
// Parse error response for detailed error type
|
||||
let errorBody: unknown = null;
|
||||
try {
|
||||
errorBody = await response.json();
|
||||
} catch {
|
||||
// If response body is not JSON, continue without it
|
||||
}
|
||||
|
||||
return this.parseApiError(response, errorBody);
|
||||
// Handle proxy errors or upstream errors returned by proxy
|
||||
return this.parseApiError(response, data);
|
||||
} catch (error) {
|
||||
// Handle network errors, timeouts, etc.
|
||||
if (this.isNetworkError(error)) {
|
||||
@@ -86,16 +91,17 @@ export class LLMService {
|
||||
): ConnectionValidationResult {
|
||||
const status = response.status;
|
||||
const errorData = body as ApiErrorResponse;
|
||||
// Proxy returns structured error in errorData.error
|
||||
const errorCode = errorData?.error?.code?.toLowerCase() || '';
|
||||
const errorMessage = errorData?.error?.message || '';
|
||||
|
||||
// 401 Unauthorized / 403 Forbidden -> Invalid API Key
|
||||
if (status === 401 || status === 403) {
|
||||
if (status === 401 || status === 403 || errorCode === 'invalid_api_key') {
|
||||
return createValidationError(ApiErrorType.INVALID_KEY, errorData);
|
||||
}
|
||||
|
||||
// 404 Not Found -> Could be model or URL
|
||||
if (status === 404) {
|
||||
if (status === 404 || errorCode === 'model_not_found') {
|
||||
if (errorCode.includes('model') || errorMessage.toLowerCase().includes('model')) {
|
||||
return createValidationError(ApiErrorType.MODEL_NOT_FOUND, errorData);
|
||||
}
|
||||
@@ -103,7 +109,7 @@ export class LLMService {
|
||||
}
|
||||
|
||||
// 429 Too Many Requests -> Quota exceeded
|
||||
if (status === 429) {
|
||||
if (status === 429 || errorCode === 'rate_limit') {
|
||||
return createValidationError(ApiErrorType.QUOTA_EXCEEDED, errorData);
|
||||
}
|
||||
|
||||
@@ -132,26 +138,29 @@ export class LLMService {
|
||||
const { apiKey, baseUrl, model, messages } = request;
|
||||
|
||||
try {
|
||||
const response = await fetch(`${baseUrl}/chat/completions`, {
|
||||
// Use our own server-side proxy
|
||||
const response = await fetch('/api/llm', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${apiKey}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: model,
|
||||
apiKey,
|
||||
baseUrl,
|
||||
model,
|
||||
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
||||
temperature: 0.7
|
||||
temperature: 0.7,
|
||||
stream: false // Non-streaming for this method
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
throw new Error(errorData.error?.message || `API Error: ${response.statusText}`);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok || !data.success) {
|
||||
throw new Error(data.error?.message || `API Error: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.choices[0]?.message?.content || '';
|
||||
return data.data?.text || '';
|
||||
} catch (error) {
|
||||
console.error('LLM Generation failed:', error);
|
||||
throw error;
|
||||
|
||||
@@ -24,10 +24,14 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
const testModel = 'gpt-4o';
|
||||
|
||||
it('should return success result for valid connection', async () => {
|
||||
// Mock proxy success response
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
status: 200,
|
||||
json: async () => ({ choices: [{ message: { content: 'hi' } }] }),
|
||||
json: async () => ({
|
||||
success: true,
|
||||
data: { text: 'hi' }
|
||||
}),
|
||||
});
|
||||
|
||||
const result = await LLMService.validateConnection(testBaseUrl, testApiKey, testModel);
|
||||
@@ -38,10 +42,12 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
});
|
||||
|
||||
it('should return INVALID_KEY for 401 Unauthorized', async () => {
|
||||
// Mock proxy error response
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 401,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'invalid_api_key', message: 'Invalid API key' }
|
||||
}),
|
||||
});
|
||||
@@ -59,6 +65,7 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
ok: false,
|
||||
status: 403,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'access_denied', message: 'Access denied' }
|
||||
}),
|
||||
});
|
||||
@@ -74,6 +81,7 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
ok: false,
|
||||
status: 404,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'model_not_found', message: 'Model not found' }
|
||||
}),
|
||||
});
|
||||
@@ -89,6 +97,7 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
ok: false,
|
||||
status: 404,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'not_found', message: 'Endpoint not found' }
|
||||
}),
|
||||
});
|
||||
@@ -104,6 +113,7 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
ok: false,
|
||||
status: 429,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'rate_limit_exceeded', message: 'Rate limit exceeded' }
|
||||
}),
|
||||
});
|
||||
@@ -140,6 +150,7 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
ok: false,
|
||||
status: 500,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'internal_error', message: 'Internal server error' }
|
||||
}),
|
||||
});
|
||||
@@ -155,6 +166,7 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
ok: false,
|
||||
status: 400,
|
||||
json: async () => ({
|
||||
success: false,
|
||||
error: { code: 'invalid_request', message: 'Invalid request' }
|
||||
}),
|
||||
});
|
||||
@@ -167,7 +179,10 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
});
|
||||
|
||||
it('should include raw error in result for debugging', async () => {
|
||||
const rawErrorResponse = { error: { code: 'invalid_api_key', message: 'Invalid key' } };
|
||||
const rawErrorResponse = {
|
||||
success: false,
|
||||
error: { code: 'invalid_api_key', message: 'Invalid key' }
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
@@ -181,24 +196,28 @@ describe('LLM Service - Connection Validation (Story 4.2)', () => {
|
||||
});
|
||||
|
||||
it('should make correct API request with minimal payload', async () => {
|
||||
// Mock success response
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
status: 200,
|
||||
json: async () => ({ choices: [{ message: { content: 'hi' } }] }),
|
||||
json: async () => ({
|
||||
success: true,
|
||||
data: { text: 'hi' }
|
||||
}),
|
||||
});
|
||||
|
||||
await LLMService.validateConnection(testBaseUrl, testApiKey, testModel);
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledTimes(1);
|
||||
// Expect call to Proxy
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/chat/completions'),
|
||||
expect.stringContaining('/api/llm'),
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.objectContaining({
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${testApiKey}`
|
||||
}),
|
||||
body: expect.stringContaining('"model":"gpt-4o"')
|
||||
body: expect.stringContaining('"apiKey":"sk-test-key"')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user