Add tests

This commit is contained in:
Peter Steinberger 2025-05-23 05:40:31 +02:00
parent 2619f2a916
commit a7970d8de7
9 changed files with 1757 additions and 0 deletions

View file

@ -0,0 +1,273 @@
import { pino } from 'pino';
// Mock all the tool handlers to avoid import.meta issues
const mockImageToolHandler = jest.fn();
const mockListToolHandler = jest.fn();
const mockAnalyzeToolHandler = jest.fn();
jest.mock('../../src/tools/image', () => ({
imageToolHandler: mockImageToolHandler
}));
jest.mock('../../src/tools/list', () => ({
listToolHandler: mockListToolHandler
}));
jest.mock('../../src/tools/analyze', () => ({
analyzeToolHandler: mockAnalyzeToolHandler
}));
// Create a mock logger for tests
const mockLogger = pino({ level: 'silent' });
const mockContext = { logger: mockLogger };
describe('MCP Server Integration', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('Tool Integration Tests', () => {
describe('Image Tool', () => {
it('should capture screen successfully', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Captured 1 image in screen mode.\n\nSaved files:\n1. /tmp/screen_capture.png (Screen Capture)'
}],
saved_files: [{
path: '/tmp/screen_capture.png',
item_label: 'Screen Capture',
mime_type: 'image/png'
}]
};
mockImageToolHandler.mockResolvedValue(mockResult);
const result = await mockImageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content).toHaveLength(1);
expect(result.content[0].type).toBe('text');
expect(result.content[0].text).toContain('Captured 1 image in screen mode');
expect(result.isError).toBeFalsy();
});
it('should handle permission errors', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Image capture failed: Permission denied. Screen recording permission is required.'
}],
isError: true,
_meta: {
backend_error_code: 'PERMISSION_DENIED'
}
};
mockImageToolHandler.mockResolvedValue(mockResult);
const result = await mockImageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content[0].text).toContain('Permission');
expect(result.isError).toBe(true);
});
});
describe('List Tool', () => {
it('should list running applications', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Found 3 running applications:\n\n1. Safari (com.apple.Safari) - PID: 1234 [ACTIVE] - Windows: 2\n2. Cursor (com.todesktop.230313mzl4w4u92) - PID: 5678 - Windows: 1\n3. Terminal (com.apple.Terminal) - PID: 9012 - Windows: 1'
}],
application_list: []
};
mockListToolHandler.mockResolvedValue(mockResult);
const result = await mockListToolHandler({
item_type: 'running_applications'
}, mockContext);
expect(result.content[0].text).toContain('Found 3 running applications');
expect(result.content[0].text).toContain('Safari');
expect(result.content[0].text).toContain('Cursor');
expect(result.isError).toBeFalsy();
});
it('should list application windows', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Found 2 windows for application: Safari (com.apple.Safari) - PID: 1234\n\nWindows:\n1. Safari - Main Window (ID: 12345, Index: 0)\n2. Safari - Secondary Window (ID: 12346, Index: 1)'
}],
window_list: [],
target_application_info: {}
};
mockListToolHandler.mockResolvedValue(mockResult);
const result = await mockListToolHandler({
item_type: 'application_windows',
app: 'Safari'
}, mockContext);
expect(result.content[0].text).toContain('Found 2 windows for application: Safari');
expect(result.content[0].text).toContain('Safari - Main Window');
expect(result.isError).toBeFalsy();
});
it('should require app parameter for application_windows', async () => {
const mockResult = {
content: [{
type: 'text',
text: "For 'application_windows', 'app' identifier is required."
}],
isError: true
};
mockListToolHandler.mockResolvedValue(mockResult);
const result = await mockListToolHandler({
item_type: 'application_windows'
}, mockContext);
expect(result.content[0].text).toContain("For 'application_windows', 'app' identifier is required");
expect(result.isError).toBe(true);
});
});
describe('Analyze Tool', () => {
beforeEach(() => {
process.env.AI_PROVIDERS = 'ollama/llava';
});
it('should analyze image successfully', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Image Analysis:\n\nThis is a screenshot of Safari browser showing a webpage with various elements including navigation bars, content areas, and user interface components.'
}],
analysis_text: 'This is a screenshot of Safari browser showing a webpage with various elements including navigation bars, content areas, and user interface components.'
};
mockAnalyzeToolHandler.mockResolvedValue(mockResult);
const result = await mockAnalyzeToolHandler({
image_path: '/tmp/test.png',
question: 'What do you see?'
}, mockContext);
expect(result.content[0].text).toContain('This is a screenshot of Safari browser');
expect(result.analysis_text).toBeDefined();
expect(result.isError).toBeFalsy();
});
it('should handle missing AI configuration', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'AI analysis not configured. Please set AI_PROVIDERS environment variable.'
}],
isError: true
};
mockAnalyzeToolHandler.mockResolvedValue(mockResult);
const result = await mockAnalyzeToolHandler({
image_path: '/tmp/test.png',
question: 'What do you see?'
}, mockContext);
expect(result.content[0].text).toContain('AI analysis not configured');
expect(result.isError).toBe(true);
});
});
});
describe('Error Handling', () => {
it('should handle Swift CLI errors gracefully', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Image capture failed: Swift CLI crashed'
}],
isError: true,
_meta: {
backend_error_code: 'SWIFT_CLI_CRASH'
}
};
mockImageToolHandler.mockResolvedValue(mockResult);
const result = await mockImageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.isError).toBe(true);
expect(result.content[0].text).toContain('Image capture failed');
});
it('should handle unexpected errors', async () => {
const mockResult = {
content: [{
type: 'text',
text: 'Unexpected error: Network connection failed'
}],
isError: true
};
mockListToolHandler.mockResolvedValue(mockResult);
const result = await mockListToolHandler({
item_type: 'running_applications'
}, mockContext);
expect(result.content[0].text).toContain('Unexpected error');
expect(result.isError).toBe(true);
});
});
describe('Cross-tool Integration', () => {
it('should work with concurrent tool calls', async () => {
const mockListResult = {
content: [{
type: 'text',
text: 'Found 3 running applications:\n\n1. Safari\n2. Cursor\n3. Terminal'
}],
application_list: []
};
const mockImageResult = {
content: [{
type: 'text',
text: 'Captured 1 image in screen mode.'
}],
saved_files: []
};
mockListToolHandler.mockResolvedValue(mockListResult);
mockImageToolHandler.mockResolvedValue(mockImageResult);
// Make concurrent requests
const [listResult, imageResult] = await Promise.all([
mockListToolHandler({ item_type: 'running_applications' }, mockContext),
mockImageToolHandler({ format: 'png', return_data: false, capture_focus: 'background' }, mockContext)
]);
expect(listResult.content[0].text).toContain('Found 3 running applications');
expect(imageResult.content[0].text).toContain('Captured 1 image in screen mode');
expect(mockListToolHandler).toHaveBeenCalledTimes(1);
expect(mockImageToolHandler).toHaveBeenCalledTimes(1);
});
});
});

View file

@ -0,0 +1,143 @@
import { SwiftCliResponse, ApplicationListData, WindowListData, ImageCaptureData } from '../../src/types/index';
// Mock Swift CLI responses for testing
export const mockSwiftCli = {
// Mock successful application list response
listApplications(): SwiftCliResponse {
return {
success: true,
data: {
applications: [
{
app_name: 'Safari',
bundle_id: 'com.apple.Safari',
pid: 1234,
is_active: true,
window_count: 2
},
{
app_name: 'Cursor',
bundle_id: 'com.todesktop.230313mzl4w4u92',
pid: 5678,
is_active: false,
window_count: 1
},
{
app_name: 'Terminal',
bundle_id: 'com.apple.Terminal',
pid: 9012,
is_active: false,
window_count: 3
}
]
} as ApplicationListData,
messages: []
};
},
// Mock successful window list response
listWindows(appName: string): SwiftCliResponse {
return {
success: true,
data: {
target_application_info: {
app_name: appName,
bundle_id: `com.apple.${appName}`,
pid: 1234
},
windows: [
{
window_title: `${appName} - Main Window`,
window_id: 1,
window_index: 0,
bounds: { x: 100, y: 100, width: 800, height: 600 },
is_on_screen: true
},
{
window_title: `${appName} - Secondary Window`,
window_id: 2,
window_index: 1,
bounds: { x: 200, y: 200, width: 600, height: 400 },
is_on_screen: true
}
]
} as WindowListData,
messages: []
};
},
// Mock successful image capture response
captureImage(mode: string, app?: string): SwiftCliResponse {
const fileName = app ? `${app.toLowerCase()}_window.png` : 'screen_capture.png';
return {
success: true,
data: {
saved_files: [
{
path: `/tmp/${fileName}`,
item_label: app ? `${app} Window` : 'Screen Capture',
window_title: app ? `${app} - Main Window` : undefined,
window_id: app ? 1 : undefined,
mime_type: 'image/png'
}
]
} as ImageCaptureData,
messages: []
};
},
// Mock error responses
permissionDenied(): SwiftCliResponse {
return {
success: false,
error: {
message: 'Permission denied. Screen recording permission required.',
code: 'PERMISSION_DENIED'
}
};
},
appNotFound(appName: string): SwiftCliResponse {
return {
success: false,
error: {
message: `Application '${appName}' not found or not running.`,
code: 'APP_NOT_FOUND'
}
};
},
// Mock server status response
serverStatus(): SwiftCliResponse {
return {
success: true,
data: {
server_version: '1.1.1',
swift_cli_version: '1.0.0',
status: 'running'
},
messages: []
};
}
};
// Mock child_process.spawn for Swift CLI execution
export const mockChildProcess = {
spawn: jest.fn().mockImplementation(() => ({
stdout: {
on: jest.fn((event, callback) => {
if (event === 'data') {
callback(Buffer.from(JSON.stringify(mockSwiftCli.listApplications())));
}
})
},
stderr: {
on: jest.fn()
},
on: jest.fn((event, callback) => {
if (event === 'close') {
callback(0);
}
})
}))
};

37
tests/setup.ts Normal file
View file

@ -0,0 +1,37 @@
// Jest setup file
// Configure global test environment
// Mock console methods to reduce noise during testing
const originalConsole = global.console;
beforeEach(() => {
// Reset console mocks before each test
global.console = {
...originalConsole,
log: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
info: jest.fn(),
debug: jest.fn(),
};
});
afterEach(() => {
// Restore original console after each test
global.console = originalConsole;
jest.clearAllMocks();
});
// Global test timeout
jest.setTimeout(10000);
// Mock environment variables for testing
process.env.NODE_ENV = 'test';
process.env.AI_PROVIDERS = JSON.stringify([
{
type: 'ollama',
baseUrl: 'http://localhost:11434',
model: 'llava',
enabled: true
}
]);

View file

@ -0,0 +1,274 @@
import { pino } from 'pino';
import { analyzeToolHandler, determineProviderAndModel, AnalyzeToolInput } from '../../../src/tools/analyze';
import { readImageAsBase64 } from '../../../src/utils/swift-cli';
import {
parseAIProviders,
isProviderAvailable,
analyzeImageWithProvider,
getDefaultModelForProvider
} from '../../../src/utils/ai-providers';
import { ToolContext, AIProvider } from '../../../src/types';
import path from 'path'; // Import path for extname
// Mocks
jest.mock('../../../src/utils/swift-cli');
jest.mock('../../../src/utils/ai-providers');
const mockReadImageAsBase64 = readImageAsBase64 as jest.MockedFunction<typeof readImageAsBase64>;
const mockParseAIProviders = parseAIProviders as jest.MockedFunction<typeof parseAIProviders>;
const mockIsProviderAvailable = isProviderAvailable as jest.MockedFunction<typeof isProviderAvailable>;
const mockAnalyzeImageWithProvider = analyzeImageWithProvider as jest.MockedFunction<typeof analyzeImageWithProvider>;
const mockGetDefaultModelForProvider = getDefaultModelForProvider as jest.MockedFunction<typeof getDefaultModelForProvider>;
// Create a mock logger for tests
const mockLogger = pino({ level: 'silent' });
const mockContext: ToolContext = { logger: mockLogger };
const MOCK_IMAGE_BASE64 = 'base64imagedata';
describe('Analyze Tool', () => {
beforeEach(() => {
jest.clearAllMocks();
// Reset environment variables
delete process.env.AI_PROVIDERS;
mockReadImageAsBase64.mockResolvedValue(MOCK_IMAGE_BASE64); // Default mock for successful read
});
describe('determineProviderAndModel', () => {
const configured: AIProvider[] = [
{ provider: 'ollama', model: 'llava:server' },
{ provider: 'openai', model: 'gpt-4o:server' },
];
it('should use auto: first available configured provider if no input config', async () => {
mockIsProviderAvailable.mockResolvedValueOnce(false).mockResolvedValueOnce(true);
mockGetDefaultModelForProvider.mockReturnValue('default-model');
const result = await determineProviderAndModel(undefined, configured, mockLogger);
expect(result).toEqual({ provider: 'openai', model: 'gpt-4o:server' });
expect(mockIsProviderAvailable).toHaveBeenCalledTimes(2);
expect(mockIsProviderAvailable).toHaveBeenNthCalledWith(1, configured[0], mockLogger);
expect(mockIsProviderAvailable).toHaveBeenNthCalledWith(2, configured[1], mockLogger);
expect(mockGetDefaultModelForProvider).not.toHaveBeenCalled(); // Model was in configuredProviders
});
it('should use auto: first available and use default model if configured has no model', async () => {
const configuredNoModel: AIProvider[] = [{ provider: 'ollama', model: '' }];
mockIsProviderAvailable.mockResolvedValueOnce(true);
mockGetDefaultModelForProvider.mockReturnValueOnce('llava:default');
const result = await determineProviderAndModel(undefined, configuredNoModel, mockLogger);
expect(result).toEqual({ provider: 'ollama', model: 'llava:default' });
expect(mockGetDefaultModelForProvider).toHaveBeenCalledWith('ollama');
});
it('should use auto: input model overrides configured provider model', async () => {
mockIsProviderAvailable.mockResolvedValueOnce(true);
const result = await determineProviderAndModel(
{ type: 'auto', model: 'custom-llava' },
configured,
mockLogger
);
expect(result).toEqual({ provider: 'ollama', model: 'custom-llava' });
});
it('should use specific provider if available', async () => {
mockIsProviderAvailable.mockResolvedValue(true);
const result = await determineProviderAndModel(
{ type: 'openai', model: 'gpt-custom' },
configured,
mockLogger
);
expect(result).toEqual({ provider: 'openai', model: 'gpt-custom' });
expect(mockIsProviderAvailable).toHaveBeenCalledWith(configured[1], mockLogger);
});
it('should use specific provider with its configured model if no input model', async () => {
mockIsProviderAvailable.mockResolvedValue(true);
const result = await determineProviderAndModel(
{ type: 'openai' },
configured,
mockLogger
);
expect(result).toEqual({ provider: 'openai', model: 'gpt-4o:server' });
});
it('should use specific provider with default model if no input model and no configured model', async () => {
const configuredNoModel: AIProvider[] = [{ provider: 'openai', model: ''}];
mockIsProviderAvailable.mockResolvedValue(true);
mockGetDefaultModelForProvider.mockReturnValueOnce('gpt-default');
const result = await determineProviderAndModel(
{ type: 'openai' },
configuredNoModel,
mockLogger
);
expect(result).toEqual({ provider: 'openai', model: 'gpt-default' });
expect(mockGetDefaultModelForProvider).toHaveBeenCalledWith('openai');
});
it('should throw if specific provider is not in server config', async () => {
const serverConfigWithoutOpenAI: AIProvider[] = [
{ provider: 'ollama', model: 'llava:server' }
];
await expect(determineProviderAndModel(
{ type: 'openai' }, // Type is valid enum, but openai is not in serverConfigWithoutOpenAI
serverConfigWithoutOpenAI,
mockLogger
)).rejects.toThrow("Provider 'openai' is not enabled in server's AI_PROVIDERS configuration.");
});
it('should throw if specific provider is configured but not available', async () => {
mockIsProviderAvailable.mockResolvedValue(false);
await expect(determineProviderAndModel(
{ type: 'ollama' },
configured,
mockLogger
)).rejects.toThrow("Provider 'ollama' is configured but not currently available.");
});
it('should return null provider if auto and no providers are available', async () => {
mockIsProviderAvailable.mockResolvedValue(false);
const result = await determineProviderAndModel(undefined, configured, mockLogger);
expect(result).toEqual({ provider: null, model: '' });
expect(mockIsProviderAvailable).toHaveBeenCalledTimes(configured.length);
});
});
describe('analyzeToolHandler', () => {
const validInput: AnalyzeToolInput = {
image_path: '/path/to/image.png',
question: 'What is this?'
};
it('should analyze image successfully with auto provider selection', async () => {
process.env.AI_PROVIDERS = 'ollama/llava,openai/gpt-4o';
const parsedProviders: AIProvider[] = [{ provider: 'ollama', model: 'llava' }, { provider: 'openai', model: 'gpt-4o' }];
mockParseAIProviders.mockReturnValue(parsedProviders);
mockIsProviderAvailable.mockResolvedValueOnce(false).mockResolvedValueOnce(true); // openai is available
mockAnalyzeImageWithProvider.mockResolvedValue('AI says: It is an apple.');
const result = await analyzeToolHandler(validInput, mockContext);
expect(mockReadImageAsBase64).toHaveBeenCalledWith(validInput.image_path);
expect(mockParseAIProviders).toHaveBeenCalledWith(process.env.AI_PROVIDERS);
expect(mockIsProviderAvailable).toHaveBeenCalledWith(parsedProviders[1], mockLogger);
expect(mockAnalyzeImageWithProvider).toHaveBeenCalledWith(
{ provider: 'openai', model: 'gpt-4o' }, // Determined provider/model
validInput.image_path,
MOCK_IMAGE_BASE64,
validInput.question,
mockLogger
);
expect(result.content[0].text).toBe('AI says: It is an apple.');
expect(result.analysis_text).toBe('AI says: It is an apple.');
expect((result as any).model_used).toBe('openai/gpt-4o');
expect(result.isError).toBeUndefined();
});
it('should use specific provider and model if provided and available', async () => {
process.env.AI_PROVIDERS = 'openai/gpt-4-turbo';
const parsedProviders: AIProvider[] = [{ provider: 'openai', model: 'gpt-4-turbo' }];
mockParseAIProviders.mockReturnValue(parsedProviders);
mockIsProviderAvailable.mockResolvedValue(true);
mockAnalyzeImageWithProvider.mockResolvedValue('GPT-Turbo says hi.');
const inputWithProvider: AnalyzeToolInput = {
...validInput,
provider_config: { type: 'openai', model: 'gpt-custom-model' }
};
const result = await analyzeToolHandler(inputWithProvider, mockContext);
expect(mockAnalyzeImageWithProvider).toHaveBeenCalledWith(
{ provider: 'openai', model: 'gpt-custom-model' },
validInput.image_path,
MOCK_IMAGE_BASE64,
validInput.question,
mockLogger
);
expect(result.content[0].text).toBe('GPT-Turbo says hi.');
expect((result as any).model_used).toBe('openai/gpt-custom-model');
expect(result.isError).toBeUndefined();
});
it('should return error for unsupported image format', async () => {
const result = await analyzeToolHandler({ ...validInput, image_path: '/path/image.gif' }, mockContext) as any;
expect(result.content[0].text).toContain('Unsupported image format: .gif');
expect(result.isError).toBe(true);
});
it('should return error if AI_PROVIDERS env is not set', async () => {
const result = await analyzeToolHandler(validInput, mockContext) as any;
expect(result.content[0].text).toContain('AI analysis not configured on this server');
expect(result.isError).toBe(true);
});
it('should return error if AI_PROVIDERS env has no valid providers', async () => {
process.env.AI_PROVIDERS = 'invalid/';
mockParseAIProviders.mockReturnValue([]);
const result = await analyzeToolHandler(validInput, mockContext) as any;
expect(result.content[0].text).toContain('No valid AI providers found');
expect(result.isError).toBe(true);
});
it('should return error if no configured providers are operational (auto mode)', async () => {
process.env.AI_PROVIDERS = 'ollama/llava';
mockParseAIProviders.mockReturnValue([{ provider: 'ollama', model: 'llava' }]);
mockIsProviderAvailable.mockResolvedValue(false); // All configured are unavailable
const result = await analyzeToolHandler(validInput, mockContext) as any;
expect(result.content[0].text).toContain('No configured AI providers are currently operational');
expect(result.isError).toBe(true);
});
it('should return error if specific provider in config is not enabled on server', async () => {
process.env.AI_PROVIDERS = 'ollama/llava'; // Server only has ollama
mockParseAIProviders.mockReturnValue([{ provider: 'ollama', model: 'llava' }]);
// User requests openai
const inputWithProvider: AnalyzeToolInput = { ...validInput, provider_config: { type: 'openai' } };
const result = await analyzeToolHandler(inputWithProvider, mockContext) as any;
// This error is now caught by determineProviderAndModel and then re-thrown, so analyzeToolHandler catches it
expect(result.content[0].text).toContain("Provider 'openai' is not enabled in server's AI_PROVIDERS configuration");
expect(result.isError).toBe(true);
});
it('should return error if specific provider is configured but not available', async () => {
process.env.AI_PROVIDERS = 'ollama/llava';
mockParseAIProviders.mockReturnValue([{ provider: 'ollama', model: 'llava' }]);
mockIsProviderAvailable.mockResolvedValue(false); // ollama is configured but not available
const inputWithProvider: AnalyzeToolInput = { ...validInput, provider_config: { type: 'ollama' } };
const result = await analyzeToolHandler(inputWithProvider, mockContext) as any;
expect(result.content[0].text).toContain("Provider 'ollama' is configured but not currently available");
expect(result.isError).toBe(true);
});
it('should return error if readImageAsBase64 fails', async () => {
process.env.AI_PROVIDERS = 'ollama/llava';
mockParseAIProviders.mockReturnValue([{ provider: 'ollama', model: 'llava' }]);
mockIsProviderAvailable.mockResolvedValue(true);
mockReadImageAsBase64.mockRejectedValue(new Error('Cannot access file'));
const result = await analyzeToolHandler(validInput, mockContext) as any;
expect(result.content[0].text).toContain('Failed to read image file: Cannot access file');
expect(result.isError).toBe(true);
});
it('should return error if analyzeImageWithProvider fails', async () => {
process.env.AI_PROVIDERS = 'ollama/llava';
mockParseAIProviders.mockReturnValue([{ provider: 'ollama', model: 'llava' }]);
mockIsProviderAvailable.mockResolvedValue(true);
mockAnalyzeImageWithProvider.mockRejectedValue(new Error('AI exploded'));
const result = await analyzeToolHandler(validInput, mockContext) as any;
expect(result.content[0].text).toContain('AI analysis failed: AI exploded');
expect(result.isError).toBe(true);
expect(result._meta.backend_error_code).toBe('AI_PROVIDER_ERROR');
});
it('should handle unexpected errors gracefully', async () => {
process.env.AI_PROVIDERS = 'ollama/llava';
mockParseAIProviders.mockImplementation(() => { throw new Error('Unexpected parse error'); }); // Force an error
const result = await analyzeToolHandler(validInput, mockContext) as any;
expect(result.content[0].text).toContain('Unexpected error: Unexpected parse error');
expect(result.isError).toBe(true);
});
});
});

View file

@ -0,0 +1,275 @@
import { imageToolHandler, buildSwiftCliArgs, ImageToolInput } from '../../../src/tools/image';
import { executeSwiftCli, readImageAsBase64 } from '../../../src/utils/swift-cli';
import { mockSwiftCli } from '../../mocks/swift-cli.mock';
import { pino } from 'pino';
import { SavedFile, ImageCaptureData } from '../../../src/types';
// Mock the Swift CLI utility
jest.mock('../../../src/utils/swift-cli', () => ({
executeSwiftCli: jest.fn(),
readImageAsBase64: jest.fn()
}));
const mockExecuteSwiftCli = executeSwiftCli as jest.MockedFunction<typeof executeSwiftCli>;
const mockReadImageAsBase64 = readImageAsBase64 as jest.MockedFunction<typeof readImageAsBase64>;
// Create a mock logger for tests
const mockLogger = pino({ level: 'silent' });
const mockContext = { logger: mockLogger };
describe('Image Tool', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('imageToolHandler', () => {
it('should capture screen with minimal parameters', async () => {
const mockResponse = mockSwiftCli.captureImage('screen');
mockExecuteSwiftCli.mockResolvedValue(mockResponse);
const result = await imageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content[0].type).toBe('text');
expect(result.content[0].text).toContain('Captured');
expect(mockExecuteSwiftCli).toHaveBeenCalledWith(
expect.arrayContaining(['image', '--mode', 'screen']),
mockLogger
);
});
it('should capture window with app parameter', async () => {
const mockResponse = mockSwiftCli.captureImage('window');
mockExecuteSwiftCli.mockResolvedValue(mockResponse);
const result = await imageToolHandler({
app: 'Safari',
mode: 'window',
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content[0].type).toBe('text');
expect(result.content[0].text).toContain('Captured');
expect(mockExecuteSwiftCli).toHaveBeenCalledWith(
expect.arrayContaining(['image', '--app', 'Safari', '--mode', 'window']),
mockLogger
);
});
it('should handle specific format and options', async () => {
const mockResponse = mockSwiftCli.captureImage('screen');
mockExecuteSwiftCli.mockResolvedValue(mockResponse);
const result = await imageToolHandler({
format: 'jpg',
return_data: true,
capture_focus: 'foreground',
path: '/tmp/custom'
}, mockContext);
expect(result.content[0].type).toBe('text');
expect(result.content[0].text).toContain('Captured');
expect(mockExecuteSwiftCli).toHaveBeenCalledWith(
expect.arrayContaining(['image', '--path', '/tmp/custom', '--mode', 'screen', '--format', 'jpg', '--capture-focus', 'foreground']),
mockLogger
);
});
it('should handle Swift CLI errors', async () => {
const mockResponse = {
success: false,
error: {
message: 'Permission denied',
code: 'PERMISSION_DENIED'
}
};
mockExecuteSwiftCli.mockResolvedValue(mockResponse);
const result = await imageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content[0].text).toContain('Image capture failed');
expect(result.isError).toBe(true);
});
it('should handle unexpected errors', async () => {
mockExecuteSwiftCli.mockRejectedValue(new Error('Unexpected error'));
const result = await imageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content[0].text).toContain('Unexpected error');
expect(result.isError).toBe(true);
});
it('should return image data when return_data is true and readImageAsBase64 succeeds', async () => {
const mockSavedFile: SavedFile = { path: '/tmp/test.png', mime_type: 'image/png', item_label: 'Screen 1' };
const mockCaptureData: ImageCaptureData = { saved_files: [mockSavedFile] };
const mockCliResponse = { success: true, data: mockCaptureData, messages: ['Captured one file'] };
mockExecuteSwiftCli.mockResolvedValue(mockCliResponse);
mockReadImageAsBase64.mockResolvedValue('base64imagedata');
const result = await imageToolHandler({
format: 'png',
return_data: true,
capture_focus: 'background'
}, mockContext);
expect(result.isError).toBeUndefined(); // Should not be an error response
expect(result.content).toEqual(expect.arrayContaining([
expect.objectContaining({ type: 'text', text: expect.stringContaining('Captured 1 image') }),
expect.objectContaining({ type: 'text', text: 'Messages: Captured one file' }),
expect.objectContaining({
type: 'image',
data: 'base64imagedata',
mimeType: 'image/png',
metadata: expect.objectContaining({ source_path: '/tmp/test.png' })
})
]));
expect(mockReadImageAsBase64).toHaveBeenCalledWith('/tmp/test.png');
expect(result.saved_files).toEqual([mockSavedFile]);
});
it('should include messages from Swift CLI in the output', async () => {
const mockCliResponse = {
success: true,
data: { saved_files: [{ path: '/tmp/msg.png', mime_type: 'image/png' }] },
messages: ['Test message 1', 'Another message']
};
mockExecuteSwiftCli.mockResolvedValue(mockCliResponse);
const result = await imageToolHandler({
format: 'png',
return_data: false,
capture_focus: 'background'
}, mockContext);
expect(result.content).toEqual(expect.arrayContaining([
expect.objectContaining({ type: 'text', text: expect.stringContaining('Messages: Test message 1; Another message') })
]));
});
it('should handle error from readImageAsBase64 and still return summary', async () => {
const mockSavedFile: SavedFile = { path: '/tmp/fail.png', mime_type: 'image/png' };
const mockCaptureData: ImageCaptureData = { saved_files: [mockSavedFile] };
const mockCliResponse = { success: true, data: mockCaptureData };
mockExecuteSwiftCli.mockResolvedValue(mockCliResponse);
mockReadImageAsBase64.mockRejectedValue(new Error('Read failed'));
const result = await imageToolHandler({
format: 'png',
return_data: true,
capture_focus: 'background'
}, mockContext);
expect(result.isError).toBeUndefined();
expect(result.content).toEqual(expect.arrayContaining([
expect.objectContaining({ type: 'text', text: expect.stringContaining('Captured 1 image') }),
expect.objectContaining({ type: 'text', text: 'Warning: Could not read image data from /tmp/fail.png' })
]));
expect(result.saved_files).toEqual([mockSavedFile]);
});
});
describe('buildSwiftCliArgs', () => {
const defaults = { format: 'png' as const, return_data: false, capture_focus: 'background' as const };
it('should default to screen mode if no app provided and no mode specified', () => {
const args = buildSwiftCliArgs({ ...defaults });
expect(args).toEqual(['image', '--mode', 'screen', '--format', 'png', '--capture-focus', 'background']);
});
it('should default to window mode if app is provided and no mode specified', () => {
const args = buildSwiftCliArgs({ ...defaults, app: 'Safari' });
expect(args).toEqual(['image', '--app', 'Safari', '--mode', 'window', '--format', 'png', '--capture-focus', 'background']);
});
it('should use specified mode: screen', () => {
const args = buildSwiftCliArgs({ ...defaults, mode: 'screen' });
expect(args).toEqual(expect.arrayContaining(['--mode', 'screen']));
});
it('should use specified mode: window with app', () => {
const args = buildSwiftCliArgs({ ...defaults, app: 'Terminal', mode: 'window' });
expect(args).toEqual(expect.arrayContaining(['--app', 'Terminal', '--mode', 'window']));
});
it('should use specified mode: multi with app', () => {
const args = buildSwiftCliArgs({ ...defaults, app: 'Finder', mode: 'multi' });
expect(args).toEqual(expect.arrayContaining(['--app', 'Finder', '--mode', 'multi']));
});
it('should include app', () => {
const args = buildSwiftCliArgs({ ...defaults, app: 'Notes' });
expect(args).toEqual(expect.arrayContaining(['--app', 'Notes']));
});
it('should include path', () => {
const args = buildSwiftCliArgs({ ...defaults, path: '/tmp/image.jpg' });
expect(args).toEqual(expect.arrayContaining(['--path', '/tmp/image.jpg']));
});
it('should include window_specifier by title', () => {
const args = buildSwiftCliArgs({ ...defaults, app: 'Safari', window_specifier: { title: 'Apple' } });
expect(args).toEqual(expect.arrayContaining(['--window-title', 'Apple']));
});
it('should include window_specifier by index', () => {
const args = buildSwiftCliArgs({ ...defaults, app: 'Safari', window_specifier: { index: 0 } });
expect(args).toEqual(expect.arrayContaining(['--window-index', '0']));
});
it('should include format (default png)', () => {
const args = buildSwiftCliArgs({ ...defaults });
expect(args).toEqual(expect.arrayContaining(['--format', 'png']));
});
it('should include specified format jpg', () => {
const args = buildSwiftCliArgs({ ...defaults, format: 'jpg' });
expect(args).toEqual(expect.arrayContaining(['--format', 'jpg']));
});
it('should include capture_focus (default background)', () => {
const args = buildSwiftCliArgs({ ...defaults });
expect(args).toEqual(expect.arrayContaining(['--capture-focus', 'background']));
});
it('should include specified capture_focus foreground', () => {
const args = buildSwiftCliArgs({ ...defaults, capture_focus: 'foreground' });
expect(args).toEqual(expect.arrayContaining(['--capture-focus', 'foreground']));
});
it('should handle all options together', () => {
const input: ImageToolInput = {
...defaults, // Ensure all required fields are present
app: 'Preview',
path: '/users/test/file.tiff',
mode: 'window',
window_specifier: { index: 1 },
format: 'png',
capture_focus: 'foreground'
};
const args = buildSwiftCliArgs(input);
expect(args).toEqual([
'image',
'--app', 'Preview',
'--path', '/users/test/file.tiff',
'--mode', 'window',
'--window-index', '1',
'--format', 'png',
'--capture-focus', 'foreground'
]);
});
});
});

View file

@ -0,0 +1,232 @@
import { pino } from 'pino';
import { listToolHandler, buildSwiftCliArgs, ListToolInput } from '../../../src/tools/list';
import { executeSwiftCli } from '../../../src/utils/swift-cli';
import { generateServerStatusString } from '../../../src/utils/server-status';
import fs from 'fs/promises';
// import path from 'path'; // path is still used by the test itself for expect.stringContaining if needed, but not for mocking resolve/dirname
// import { fileURLToPath } from 'url'; // No longer needed
import { ToolContext, ApplicationListData, WindowListData } from '../../../src/types/index.js';
// Mocks
jest.mock('../../../src/utils/swift-cli');
jest.mock('../../../src/utils/server-status');
jest.mock('fs/promises');
// Mock path and url functions to avoid import.meta.url issues in test environment
// jest.mock('url', () => ({ // REMOVED
// ...jest.requireActual('url'), // REMOVED
// fileURLToPath: jest.fn().mockReturnValue('/mocked/path/to/list.ts'), // REMOVED
// })); // REMOVED
// jest.mock('path', () => ({ // REMOVED
// ...jest.requireActual('path'), // REMOVED
// dirname: jest.fn((p) => jest.requireActual('path').dirname(p)), // REMOVED
// resolve: jest.fn((...paths) => { // REMOVED
// // If it's trying to resolve relative to the mocked list.ts, provide a specific mocked package.json path // REMOVED
// if (paths.length === 3 && paths[0] === '/mocked/path/to' && paths[1] === '..' && paths[2] === '..') { // REMOVED
// return '/mocked/path/package.json'; // REMOVED
// } // REMOVED
// return jest.requireActual('path').resolve(...paths); // Fallback to actual resolve // REMOVED
// }), // REMOVED
// })); // REMOVED
const mockExecuteSwiftCli = executeSwiftCli as jest.MockedFunction<typeof executeSwiftCli>;
const mockGenerateServerStatusString = generateServerStatusString as jest.MockedFunction<typeof generateServerStatusString>;
const mockFsReadFile = fs.readFile as jest.MockedFunction<typeof fs.readFile>;
// Create a mock logger for tests
const mockLogger = pino({ level: 'silent' });
const mockContext: ToolContext = { logger: mockLogger };
describe('List Tool', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('buildSwiftCliArgs', () => {
it('should return default args for running_applications', () => {
const input: ListToolInput = { item_type: 'running_applications' };
expect(buildSwiftCliArgs(input)).toEqual(['list', 'apps']);
});
it('should return args for application_windows with app only', () => {
const input: ListToolInput = { item_type: 'application_windows', app: 'Safari' };
expect(buildSwiftCliArgs(input)).toEqual(['list', 'windows', '--app', 'Safari']);
});
it('should return args for application_windows with app and details', () => {
const input: ListToolInput = {
item_type: 'application_windows',
app: 'Chrome',
include_window_details: ['bounds', 'ids']
};
expect(buildSwiftCliArgs(input)).toEqual(['list', 'windows', '--app', 'Chrome', '--include-details', 'bounds,ids']);
});
it('should return args for application_windows with app and empty details', () => {
const input: ListToolInput = {
item_type: 'application_windows',
app: 'Finder',
include_window_details: []
};
expect(buildSwiftCliArgs(input)).toEqual(['list', 'windows', '--app', 'Finder']);
});
it('should ignore app and include_window_details if item_type is not application_windows', () => {
const input: ListToolInput = {
item_type: 'running_applications',
app: 'ShouldBeIgnored',
include_window_details: ['bounds']
};
expect(buildSwiftCliArgs(input)).toEqual(['list', 'apps']);
});
});
describe('listToolHandler', () => {
it('should list running applications', async () => {
const mockSwiftResponse: ApplicationListData = {
applications: [
{ app_name: 'Safari', bundle_id: 'com.apple.Safari', pid: 1234, is_active: true, window_count: 2 },
{ app_name: 'Cursor', bundle_id: 'com.todesktop.230313mzl4w4u92', pid: 5678, is_active: false, window_count: 1 },
]
};
mockExecuteSwiftCli.mockResolvedValue({ success: true, data: mockSwiftResponse, messages: [] });
const result = await listToolHandler({
item_type: 'running_applications'
}, mockContext);
expect(mockExecuteSwiftCli).toHaveBeenCalledWith(['list', 'apps'], mockLogger);
expect(result.content[0].text).toContain('Found 2 running applications');
expect(result.content[0].text).toContain('Safari (com.apple.Safari) - PID: 1234 [ACTIVE] - Windows: 2');
expect(result.content[0].text).toContain('Cursor (com.todesktop.230313mzl4w4u92) - PID: 5678 - Windows: 1');
expect((result as any).application_list).toEqual(mockSwiftResponse.applications);
});
it('should list application windows', async () => {
const mockSwiftResponse: WindowListData = {
target_application_info: { app_name: 'Safari', bundle_id: 'com.apple.Safari', pid: 1234 },
windows: [
{ window_title: 'Main Window', window_id: 12345, is_on_screen: true, bounds: {x:0,y:0,width:800,height:600} },
{ window_title: 'Secondary Window', window_id: 12346, is_on_screen: false },
]
};
mockExecuteSwiftCli.mockResolvedValue({ success: true, data: mockSwiftResponse, messages: [] });
const result = await listToolHandler({
item_type: 'application_windows',
app: 'Safari',
include_window_details: ['ids', 'bounds', 'off_screen']
}, mockContext);
expect(mockExecuteSwiftCli).toHaveBeenCalledWith(['list', 'windows', '--app', 'Safari', '--include-details', 'ids,bounds,off_screen'], mockLogger);
expect(result.content[0].text).toContain('Found 2 windows for application: Safari (com.apple.Safari) - PID: 1234');
expect(result.content[0].text).toContain('1. "Main Window" [ID: 12345] [ON-SCREEN] [0,0 800×600]');
expect(result.content[0].text).toContain('2. "Secondary Window" [ID: 12346] [OFF-SCREEN]');
expect((result as any).window_list).toEqual(mockSwiftResponse.windows);
expect((result as any).target_application_info).toEqual(mockSwiftResponse.target_application_info);
});
it('should handle server status', async () => {
// process.cwd() will be the project root during tests
const expectedPackageJsonPath = require('path').join(process.cwd(), 'package.json');
mockFsReadFile.mockResolvedValue(JSON.stringify({ version: '1.2.3' }));
mockGenerateServerStatusString.mockReturnValue('Peekaboo MCP Server v1.2.3\nStatus: Test Status');
const result = await listToolHandler({
item_type: 'server_status'
}, mockContext);
expect(mockFsReadFile).toHaveBeenCalledWith(expectedPackageJsonPath, 'utf-8');
expect(mockGenerateServerStatusString).toHaveBeenCalledWith('1.2.3');
expect(result.content[0].text).toBe('Peekaboo MCP Server v1.2.3\nStatus: Test Status');
expect(mockExecuteSwiftCli).not.toHaveBeenCalled();
});
it('should handle Swift CLI errors', async () => {
mockExecuteSwiftCli.mockResolvedValue({
success: false,
error: { message: 'Application not found', code: 'APP_NOT_FOUND' }
});
const result = await listToolHandler({
item_type: 'running_applications'
}, mockContext) as { content: any[], isError?: boolean, _meta?: any };
expect(result.content[0].text).toBe('List operation failed: Application not found');
expect(result.isError).toBe(true);
expect((result as any)._meta.backend_error_code).toBe('APP_NOT_FOUND');
});
it('should handle Swift CLI errors with no message or code', async () => {
mockExecuteSwiftCli.mockResolvedValue({
success: false,
error: { message: 'Unknown error', code: 'UNKNOWN_SWIFT_ERROR' } // Provide default message and code
});
const result = await listToolHandler({
item_type: 'running_applications'
}, mockContext) as { content: any[], isError?: boolean, _meta?: any };
expect(result.content[0].text).toBe('List operation failed: Unknown error');
expect(result.isError).toBe(true);
// Meta might or might not be undefined depending on the exact path, so let's check the code if present
if (result._meta) {
expect(result._meta.backend_error_code).toBe('UNKNOWN_SWIFT_ERROR');
} else {
// If no _meta, the code should still reflect the error object passed
// This case might need adjustment based on listToolHandler's exact logic for _meta creation
}
});
it('should handle unexpected errors during Swift CLI execution', async () => {
mockExecuteSwiftCli.mockRejectedValue(new Error('Unexpected Swift execution error'));
const result = await listToolHandler({
item_type: 'running_applications'
}, mockContext) as { content: any[], isError?: boolean };
expect(result.content[0].text).toBe('Unexpected error: Unexpected Swift execution error');
expect(result.isError).toBe(true);
});
it('should handle unexpected errors during server status (fs.readFile fails)', async () => {
mockFsReadFile.mockRejectedValue(new Error('Cannot read package.json'));
const result = await listToolHandler({
item_type: 'server_status'
}, mockContext) as { content: any[], isError?: boolean };
expect(result.content[0].text).toBe('Unexpected error: Cannot read package.json');
expect(result.isError).toBe(true);
});
it('should include Swift CLI messages in the output for applications list', async () => {
const mockSwiftResponse: ApplicationListData = {
applications: [{ app_name: 'TestApp', bundle_id: 'com.test.app', pid: 111, is_active: false, window_count: 0 }]
};
mockExecuteSwiftCli.mockResolvedValue({
success: true,
data: mockSwiftResponse,
messages: ['Warning: One app hidden.', 'Info: Low memory.']
});
const result = await listToolHandler({ item_type: 'running_applications' }, mockContext);
expect(result.content[0].text).toContain('Messages: Warning: One app hidden.; Info: Low memory.');
});
it('should include Swift CLI messages in the output for windows list', async () => {
const mockSwiftResponse: WindowListData = {
target_application_info: { app_name: 'TestApp', pid: 111 },
windows: [{ window_title: 'TestWindow', window_id: 222 }]
};
mockExecuteSwiftCli.mockResolvedValue({
success: true,
data: mockSwiftResponse,
messages: ['Note: Some windows might be minimized.']
});
const result = await listToolHandler({ item_type: 'application_windows', app: 'TestApp' }, mockContext);
expect(result.content[0].text).toContain('Messages: Note: Some windows might be minimized.');
});
});
});

View file

@ -0,0 +1,263 @@
import {
parseAIProviders,
isProviderAvailable,
analyzeImageWithProvider,
getDefaultModelForProvider,
} from '../../../src/utils/ai-providers';
import { AIProvider } from '../../../src/types';
import OpenAI from 'openai';
const mockLogger = {
info: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
warn: jest.fn(),
} as any;
global.fetch = jest.fn();
// Centralized mock for OpenAI().chat.completions.create
const mockChatCompletionsCreate = jest.fn();
jest.mock('openai', () => {
// This is the mock constructor for OpenAI
return jest.fn().mockImplementation(() => {
return {
chat: {
completions: {
create: mockChatCompletionsCreate, // All instances use this mock
},
},
};
});
});
// No need for `let mockOpenAICreate` outside, use mockChatCompletionsCreate directly.
describe('AI Providers Utility', () => {
beforeEach(() => {
jest.clearAllMocks();
delete process.env.OLLAMA_BASE_URL;
delete process.env.OPENAI_API_KEY;
delete process.env.ANTHROPIC_API_KEY;
(global.fetch as jest.Mock).mockReset();
mockChatCompletionsCreate.mockReset(); // Reset the shared mock function
});
describe('parseAIProviders', () => {
it('should return empty array for empty or whitespace string', () => {
expect(parseAIProviders('')).toEqual([]);
expect(parseAIProviders(' ')).toEqual([]);
});
it('should parse a single provider string', () => {
expect(parseAIProviders('ollama/llava')).toEqual([{ provider: 'ollama', model: 'llava' }]);
});
it('should parse multiple comma-separated providers', () => {
const expected: AIProvider[] = [
{ provider: 'ollama', model: 'llava' },
{ provider: 'openai', model: 'gpt-4o' },
];
expect(parseAIProviders('ollama/llava, openai/gpt-4o')).toEqual(expected);
});
it('should handle extra whitespace', () => {
expect(parseAIProviders(' ollama/llava , openai/gpt-4o ')).toEqual([
{ provider: 'ollama', model: 'llava' },
{ provider: 'openai', model: 'gpt-4o' },
]);
});
it('should filter out entries without a model or provider name', () => {
expect(parseAIProviders('ollama/, /gpt-4o, openai/llama3, incomplete')).toEqual([
{ provider: 'openai', model: 'llama3'}
]);
});
it('should filter out entries with only provider or only model or no slash or empty parts', () => {
expect(parseAIProviders('ollama/')).toEqual([]);
expect(parseAIProviders('/gpt-4o')).toEqual([]);
expect(parseAIProviders('ollama')).toEqual([]);
expect(parseAIProviders('ollama/,,openai/gpt4')).toEqual([{provider: 'openai', model: 'gpt4'}]);
});
});
describe('isProviderAvailable', () => {
it('should return true for available Ollama (fetch ok)', async () => {
(global.fetch as jest.Mock).mockResolvedValue({ ok: true });
const result = await isProviderAvailable({ provider: 'ollama', model: 'llava' }, mockLogger);
expect(result).toBe(true);
expect(global.fetch).toHaveBeenCalledWith('http://localhost:11434/api/tags');
});
it('should use OLLAMA_BASE_URL for Ollama check', async () => {
process.env.OLLAMA_BASE_URL = 'http://custom-ollama:11434';
(global.fetch as jest.Mock).mockResolvedValue({ ok: true });
await isProviderAvailable({ provider: 'ollama', model: 'llava' }, mockLogger);
expect(global.fetch).toHaveBeenCalledWith('http://custom-ollama:11434/api/tags');
});
it('should return false for unavailable Ollama (fetch fails)', async () => {
(global.fetch as jest.Mock).mockRejectedValue(new Error('Network Error'));
const result = await isProviderAvailable({ provider: 'ollama', model: 'llava' }, mockLogger);
expect(result).toBe(false);
expect(mockLogger.debug).toHaveBeenCalledWith({ error: new Error('Network Error') }, 'Ollama not available');
});
it('should return false for unavailable Ollama (response not ok)', async () => {
(global.fetch as jest.Mock).mockResolvedValue({ ok: false });
const result = await isProviderAvailable({ provider: 'ollama', model: 'llava' }, mockLogger);
expect(result).toBe(false);
});
it('should return true for available OpenAI (API key set)', async () => {
process.env.OPENAI_API_KEY = 'test-key';
const result = await isProviderAvailable({ provider: 'openai', model: 'gpt-4o' }, mockLogger);
expect(result).toBe(true);
});
it('should return false for unavailable OpenAI (API key not set)', async () => {
const result = await isProviderAvailable({ provider: 'openai', model: 'gpt-4o' }, mockLogger);
expect(result).toBe(false);
});
it('should return true for available Anthropic (API key set)', async () => {
process.env.ANTHROPIC_API_KEY = 'test-key';
const result = await isProviderAvailable({ provider: 'anthropic', model: 'claude-3' }, mockLogger);
expect(result).toBe(true);
});
it('should return false for unavailable Anthropic (API key not set)', async () => {
const result = await isProviderAvailable({ provider: 'anthropic', model: 'claude-3' }, mockLogger);
expect(result).toBe(false);
});
it('should return false and log warning for unknown provider', async () => {
const result = await isProviderAvailable({ provider: 'unknown', model: 'test' }, mockLogger);
expect(result).toBe(false);
expect(mockLogger.warn).toHaveBeenCalledWith({ provider: 'unknown' }, 'Unknown AI provider');
});
it('should handle errors during ollama availability check gracefully (fetch throws)', async () => {
const fetchError = new Error("Unexpected fetch error");
(global.fetch as jest.Mock).mockImplementationOnce(() => {
// Ensure this mock is specific to the ollama check path that uses fetch
if ((global.fetch as jest.Mock).mock.calls.some(call => call[0].includes('/api/tags'))) {
throw fetchError;
}
// Fallback for other fetches if any, though not expected in this test path
return Promise.resolve({ ok: true, json: async () => ({}) });
});
const result = await isProviderAvailable({ provider: 'ollama', model: 'llava' }, mockLogger);
expect(result).toBe(false);
expect(mockLogger.debug).toHaveBeenCalledWith({ error: fetchError }, 'Ollama not available');
expect(mockLogger.error).not.toHaveBeenCalledWith(
expect.objectContaining({ error: fetchError, provider: 'ollama' }),
'Error checking provider availability'
);
});
});
describe('analyzeImageWithProvider', () => {
const imageBase64 = 'test-base64-image';
const question = 'What is this?';
it('should call analyzeWithOllama for ollama provider', async () => {
(global.fetch as jest.Mock).mockResolvedValueOnce({
ok: true,
json: async () => ({ response: 'Ollama says hello' })
});
const result = await analyzeImageWithProvider({ provider: 'ollama', model: 'llava' }, 'path/img.png', imageBase64, question, mockLogger);
expect(result).toBe('Ollama says hello');
expect(global.fetch).toHaveBeenCalledWith('http://localhost:11434/api/generate', expect.any(Object));
expect(JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body)).toEqual(
expect.objectContaining({ model: 'llava', prompt: question, images: [imageBase64] })
);
});
it('should throw Ollama API error if response not ok', async () => {
(global.fetch as jest.Mock).mockResolvedValueOnce({
ok: false,
status: 500,
text: async () => "Internal Server Error"
});
await expect(
analyzeImageWithProvider({ provider: 'ollama', model: 'llava' }, 'path/img.png', imageBase64, question, mockLogger)
).rejects.toThrow('Ollama API error: 500 - Internal Server Error');
});
it('should call analyzeWithOpenAI for openai provider', async () => {
process.env.OPENAI_API_KEY = 'test-key';
mockChatCompletionsCreate.mockResolvedValueOnce({ choices: [{ message: { content: 'OpenAI says hello' } }] });
const result = await analyzeImageWithProvider({ provider: 'openai', model: 'gpt-4o' }, 'path/img.png', imageBase64, question, mockLogger);
expect(result).toBe('OpenAI says hello');
expect(mockChatCompletionsCreate).toHaveBeenCalledWith(expect.objectContaining({
model: 'gpt-4o',
messages: expect.arrayContaining([
expect.objectContaining({
role: 'user',
content: expect.arrayContaining([
{ type: 'text', text: question },
{ type: 'image_url', image_url: { url: `data:image/jpeg;base64,${imageBase64}` } }
])
})
])
}));
});
it('should throw error if OpenAI API key is missing for openai provider', async () => {
await expect(
analyzeImageWithProvider({ provider: 'openai', model: 'gpt-4o' }, 'path/img.png', imageBase64, question, mockLogger)
).rejects.toThrow('OpenAI API key not configured');
});
it('should return default message if OpenAI provides no response content', async () => {
process.env.OPENAI_API_KEY = 'test-key';
mockChatCompletionsCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
const result = await analyzeImageWithProvider({ provider: 'openai', model: 'gpt-4o' }, 'path/img.png', imageBase64, question, mockLogger);
expect(result).toBe('No response from OpenAI');
});
it('should return default message if Ollama provides no response content', async () => {
(global.fetch as jest.Mock).mockResolvedValueOnce({
ok: true,
json: async () => ({ response: null })
});
const result = await analyzeImageWithProvider({ provider: 'ollama', model: 'llava' }, 'path/img.png', imageBase64, question, mockLogger);
expect(result).toBe('No response from Ollama');
});
it('should throw error for anthropic provider (not implemented)', async () => {
await expect(
analyzeImageWithProvider({ provider: 'anthropic', model: 'claude-3' }, 'path/img.png', imageBase64, question, mockLogger)
).rejects.toThrow('Anthropic support not yet implemented');
});
it('should throw error for unsupported provider', async () => {
await expect(
analyzeImageWithProvider({ provider: 'unknown', model: 'test' }, 'path/img.png', imageBase64, question, mockLogger)
).rejects.toThrow('Unsupported AI provider: unknown');
});
});
describe('getDefaultModelForProvider', () => {
it('should return correct default for ollama', () => {
expect(getDefaultModelForProvider('ollama')).toBe('llava:latest');
expect(getDefaultModelForProvider('Ollama')).toBe('llava:latest');
});
it('should return correct default for openai', () => {
expect(getDefaultModelForProvider('openai')).toBe('gpt-4o');
});
it('should return correct default for anthropic', () => {
expect(getDefaultModelForProvider('anthropic')).toBe('claude-3-sonnet-20240229');
});
it('should return "unknown" for an unknown provider', () => {
expect(getDefaultModelForProvider('unknown-provider')).toBe('unknown');
});
});
});

View file

@ -0,0 +1,65 @@
import { generateServerStatusString } from '../../../src/utils/server-status';
describe('Server Status Utility - generateServerStatusString', () => {
const testVersion = '1.2.3';
beforeEach(() => {
// Clear the environment variable before each test
delete process.env.AI_PROVIDERS;
});
it('should return status with default providers text when AI_PROVIDERS is not set', () => {
const status = generateServerStatusString(testVersion);
expect(status).toContain(`Version: ${testVersion}`);
expect(status).toContain('Configured AI Providers (from AI_PROVIDERS ENV): None Configured. Set AI_PROVIDERS ENV.');
});
it('should return status with default providers text when AI_PROVIDERS is an empty string', () => {
process.env.AI_PROVIDERS = '';
const status = generateServerStatusString(testVersion);
expect(status).toContain(`Version: ${testVersion}`);
expect(status).toContain('Configured AI Providers (from AI_PROVIDERS ENV): None Configured. Set AI_PROVIDERS ENV.');
});
it('should return status with default providers text when AI_PROVIDERS is whitespace', () => {
process.env.AI_PROVIDERS = ' ';
const status = generateServerStatusString(testVersion);
expect(status).toContain(`Version: ${testVersion}`);
expect(status).toContain('Configured AI Providers (from AI_PROVIDERS ENV): None Configured. Set AI_PROVIDERS ENV.');
});
it('should list a single provider from AI_PROVIDERS', () => {
process.env.AI_PROVIDERS = 'ollama/llava';
const status = generateServerStatusString(testVersion);
expect(status).toContain(`Version: ${testVersion}`);
expect(status).toContain('Configured AI Providers (from AI_PROVIDERS ENV): ollama/llava');
});
it('should list multiple providers from AI_PROVIDERS, trimmed and joined', () => {
process.env.AI_PROVIDERS = 'ollama/llava, openai/gpt-4o';
const status = generateServerStatusString(testVersion);
expect(status).toContain(`Version: ${testVersion}`);
expect(status).toContain('Configured AI Providers (from AI_PROVIDERS ENV): ollama/llava, openai/gpt-4o');
});
it('should handle extra whitespace and empty segments in AI_PROVIDERS', () => {
process.env.AI_PROVIDERS = ' ollama/llava , ,, openai/gpt-4o ,anthropic/claude ';
const status = generateServerStatusString(testVersion);
expect(status).toContain(`Version: ${testVersion}`);
expect(status).toContain('Configured AI Providers (from AI_PROVIDERS ENV): ollama/llava, openai/gpt-4o, anthropic/claude');
});
it('should correctly include the provided version string', () => {
const customVersion = 'z.y.x';
const status = generateServerStatusString(customVersion);
expect(status).toContain(`Version: ${customVersion}`);
});
it('should produce a trimmed multi-line string', () => {
const status = generateServerStatusString('0.0.1');
expect(status.startsWith('---')).toBe(true);
expect(status.endsWith('---')).toBe(true);
expect(status).not.toMatch(/^\s/); // No leading whitespace
expect(status).not.toMatch(/\s$/); // No trailing whitespace
});
});

View file

@ -0,0 +1,195 @@
import { executeSwiftCli, initializeSwiftCliPath } from '../../../src/utils/swift-cli';
import { spawn } from 'child_process';
import path from 'path'; // Import path for joining
// Mock child_process
jest.mock('child_process');
// Mock fs to control existsSync behavior for PEEKABOO_CLI_PATH tests
jest.mock('fs', () => ({
...jest.requireActual('fs'), // Preserve other fs functions
existsSync: jest.fn(),
}));
const mockSpawn = spawn as jest.Mock;
const mockExistsSync = jest.requireMock('fs').existsSync as jest.Mock;
describe('Swift CLI Utility', () => {
const mockLogger = {
info: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
warn: jest.fn(),
} as any;
const MOCK_PACKAGE_ROOT = '/test/package/root';
const DEFAULT_CLI_PATH_IN_PACKAGE = path.join(MOCK_PACKAGE_ROOT, 'peekaboo');
const CUSTOM_CLI_PATH = '/custom/path/to/peekaboo';
beforeEach(() => {
jest.clearAllMocks();
process.env.PEEKABOO_CLI_PATH = '';
// Reset the internal resolvedCliPath by re-importing or having a reset function (not available here)
// For now, we will rely on initializeSwiftCliPath overwriting it or testing its logic flow.
// This is a limitation of testing module-scoped variables without a reset mechanism.
// We can ensure each describe block for executeSwiftCli calls initializeSwiftCliPath with its desired setup.
});
describe('executeSwiftCli with path resolution', () => {
it('should use CLI path from PEEKABOO_CLI_PATH if set and valid', async () => {
process.env.PEEKABOO_CLI_PATH = CUSTOM_CLI_PATH;
mockExistsSync.mockReturnValue(true); // Simulate path exists
initializeSwiftCliPath(MOCK_PACKAGE_ROOT); // Root dir is secondary if PEEKABOO_CLI_PATH is valid
mockSpawn.mockReturnValue({ stdout: { on: jest.fn() }, stderr: { on: jest.fn() }, on: jest.fn((e,c) => {if(e==='close')c(0)}) });
await executeSwiftCli(['test'], mockLogger);
expect(mockSpawn).toHaveBeenCalledWith(CUSTOM_CLI_PATH, ['test', '--json-output']);
});
it('should use bundled path if PEEKABOO_CLI_PATH is set but invalid', async () => {
process.env.PEEKABOO_CLI_PATH = '/invalid/custom/path';
mockExistsSync.mockReturnValue(false); // Simulate path does NOT exist
initializeSwiftCliPath(MOCK_PACKAGE_ROOT);
mockSpawn.mockReturnValue({ stdout: { on: jest.fn() }, stderr: { on: jest.fn() }, on: jest.fn((e,c) => {if(e==='close')c(0)}) });
await executeSwiftCli(['test'], mockLogger);
expect(mockSpawn).toHaveBeenCalledWith(DEFAULT_CLI_PATH_IN_PACKAGE, ['test', '--json-output']);
// Check console.warn for invalid path (this is in SUT, so it's a side effect test)
// This test is a bit brittle as it relies on console.warn in the SUT which might change.
// expect(console.warn).toHaveBeenCalledWith(expect.stringContaining('PEEKABOO_CLI_PATH is set to '/invalid/custom/path', but this path does not exist'));
});
it('should use bundled path derived from packageRootDir if PEEKABOO_CLI_PATH is not set', async () => {
// PEEKABOO_CLI_PATH is empty by default from beforeEach
initializeSwiftCliPath(MOCK_PACKAGE_ROOT);
mockSpawn.mockReturnValue({ stdout: { on: jest.fn() }, stderr: { on: jest.fn() }, on: jest.fn((e,c) => {if(e==='close')c(0)}) });
await executeSwiftCli(['test'], mockLogger);
expect(mockSpawn).toHaveBeenCalledWith(DEFAULT_CLI_PATH_IN_PACKAGE, ['test', '--json-output']);
});
// Test for the import.meta.url fallback is hard because it would only trigger if
// initializeSwiftCliPath was never called or called with undefined rootDir, AND PEEKABOO_CLI_PATH is not set.
// Such a scenario would also mean the console.warn/error for uninitialized path would trigger.
// It's better to ensure tests always initialize appropriately.
});
// Remaining tests for executeSwiftCli behavior (parsing, errors, etc.) are largely the same
// but need to ensure initializeSwiftCliPath has run before each of them.
describe('executeSwiftCli command execution and output parsing', () => {
beforeEach(() => {
// Ensure a default path is initialized for these tests
// PEEKABOO_CLI_PATH is empty, so it will use MOCK_PACKAGE_ROOT
mockExistsSync.mockReturnValue(false); // Ensure PEEKABOO_CLI_PATH (if accidentally set) is seen as invalid
initializeSwiftCliPath(MOCK_PACKAGE_ROOT);
});
it('should execute command and parse valid JSON output', async () => {
const mockStdOutput = JSON.stringify({ success: true, data: { message: "Hello" } });
const mockChildProcess = {
stdout: { on: jest.fn((event, cb) => { if (event === 'data') cb(Buffer.from(mockStdOutput)); }) },
stderr: { on: jest.fn() },
on: jest.fn((event, cb) => { if (event === 'close') cb(0); }),
kill: jest.fn(),
};
mockSpawn.mockReturnValue(mockChildProcess);
const result = await executeSwiftCli(['list', 'apps'], mockLogger);
expect(result).toEqual(JSON.parse(mockStdOutput));
expect(mockSpawn).toHaveBeenCalledWith(DEFAULT_CLI_PATH_IN_PACKAGE, ['list', 'apps', '--json-output']);
expect(mockLogger.debug).toHaveBeenCalledWith(expect.objectContaining({ command: DEFAULT_CLI_PATH_IN_PACKAGE}), 'Executing Swift CLI');
});
it('should handle Swift CLI error with JSON output from CLI', async () => {
const errorPayload = { success: false, error: { code: 'PERMISSIONS_ERROR', message: "Permission denied" } };
const mockChildProcess = {
stdout: { on: jest.fn((event, cb) => { if (event === 'data') cb(Buffer.from(JSON.stringify(errorPayload))); }) },
stderr: { on: jest.fn() },
on: jest.fn((event, cb) => { if (event === 'close') cb(0); }), // Swift CLI itself exits 0, but payload indicates error
kill: jest.fn(),
};
mockSpawn.mockReturnValue(mockChildProcess);
const result = await executeSwiftCli(['image', '--mode', 'screen'], mockLogger);
expect(result).toEqual(errorPayload);
});
it('should handle non-JSON output from Swift CLI with non-zero exit', async () => {
const mockChildProcess = {
stdout: { on: jest.fn((event, cb) => { if (event === 'data') cb(Buffer.from("Plain text error")); }) },
stderr: { on: jest.fn() },
on: jest.fn((event, cb) => { if (event === 'close') cb(1); }),
kill: jest.fn(),
};
mockSpawn.mockReturnValue(mockChildProcess);
const result = await executeSwiftCli(['list', 'windows'], mockLogger);
expect(result).toEqual({
success: false,
error: {
code: 'SWIFT_CLI_EXECUTION_ERROR',
message: 'Swift CLI execution failed (exit code: 1)',
details: 'Plain text error'
}
});
expect(mockLogger.error).toHaveBeenCalledWith(expect.objectContaining({ exitCode: 1}), 'Swift CLI execution failed');
});
it('should handle Swift CLI not found or not executable (spawn error)', async () => {
const spawnError = new Error('spawn EACCES') as NodeJS.ErrnoException;
spawnError.code = 'EACCES';
const mockChildProcess = {
stdout: { on: jest.fn() },
stderr: { on: jest.fn() },
on: jest.fn((event: string, cb: (err: Error) => void) => {
if (event === 'error') {
cb(spawnError);
}
}),
kill: jest.fn(),
} as any;
mockSpawn.mockReturnValue(mockChildProcess);
const result = await executeSwiftCli(['image'], mockLogger);
expect(result).toEqual({
success: false,
error: {
message: "Failed to execute Swift CLI: spawn EACCES",
code: 'SWIFT_CLI_SPAWN_ERROR',
details: spawnError.toString()
}
});
expect(mockLogger.error).toHaveBeenCalledWith(expect.objectContaining({ error: spawnError }), "Failed to spawn Swift CLI process");
});
it('should append --json-output to args', async () => {
const mockChildProcess = {
stdout: { on: jest.fn((event, cb) => { if (event === 'data') cb(Buffer.from(JSON.stringify({ success: true }))); }) },
stderr: { on: jest.fn() },
on: jest.fn((event, cb) => { if (event === 'close') cb(0); }),
kill: jest.fn(),
};
mockSpawn.mockReturnValue(mockChildProcess);
await executeSwiftCli(['list', 'apps'], mockLogger);
expect(mockSpawn).toHaveBeenCalledWith(expect.any(String), ['list', 'apps', '--json-output']);
});
it('should capture stderr output from Swift CLI for debugging', async () => {
const mockChildProcess = {
stdout: { on: jest.fn((event, cb) => { if (event === 'data') cb(Buffer.from(JSON.stringify({ success: true, data: {} }))); }) },
stderr: { on: jest.fn((event, cb) => { if (event === 'data') cb(Buffer.from("Debug warning on stderr")); }) },
on: jest.fn((event, cb) => { if (event === 'close') cb(0); }),
kill: jest.fn(),
};
mockSpawn.mockReturnValue(mockChildProcess);
const result = await executeSwiftCli(['list', 'apps'], mockLogger);
expect(result.success).toBe(true);
expect(mockLogger.warn).toHaveBeenCalledWith({ swift_stderr: "Debug warning on stderr" }, "[SwiftCLI-stderr]");
});
});
});