=== NEW === - studio/ — MCPEngine Studio scaffold (Next.js monorepo, build plan) - docs/FACTORY-V2.md — Factory v2 architecture doc - docs/CALENDLY_MCP_BUILD_SUMMARY.md — Calendly MCP build report === UPDATED SERVERS === - fieldedge: Added jobs-tools, UI build script, main entry update - lightspeed: Updated main + server entry points - squarespace: Added collection-browser + page-manager apps - toast: Added main + server entry points === INFRA === - infra/command-center/state.json — Updated pipeline state - infra/command-center/FACTORY-V2.md — Factory v2 operator playbook
146 lines
4.8 KiB
TypeScript
146 lines
4.8 KiB
TypeScript
// Analyzer Service — streams API spec analysis via Claude
|
|
|
|
import Anthropic from '@anthropic-ai/sdk';
|
|
import { getSkill } from '../skills/loader';
|
|
import { parseStreamEvents } from '../streaming/parser';
|
|
import type {
|
|
PipelineEvent,
|
|
AnalysisResult,
|
|
ToolDefinition,
|
|
} from '../types';
|
|
|
|
const MODEL = 'claude-sonnet-4-5-20250514';
|
|
const MAX_TOKENS = 8192;
|
|
|
|
/**
|
|
* Analyze an API spec (OpenAPI/Swagger JSON or YAML string).
|
|
* Streams PipelineEvent objects as analysis progresses.
|
|
*/
|
|
export async function* analyzeSpec(spec: string): AsyncGenerator<PipelineEvent> {
|
|
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
const systemPrompt = getSkill('analyzer');
|
|
|
|
yield { type: 'analysis:progress', step: 'Starting API analysis', percent: 0 };
|
|
|
|
try {
|
|
const stream = client.messages.stream({
|
|
model: MODEL,
|
|
max_tokens: MAX_TOKENS,
|
|
system: systemPrompt,
|
|
messages: [
|
|
{
|
|
role: 'user',
|
|
content: `Analyze this API specification and produce a complete MCP tool mapping.\n\nReturn your analysis as a JSON object with this structure:\n{\n "service": "service name",\n "baseUrl": "base URL",\n "endpoints": [...],\n "authFlow": { "type": "api_key|oauth2|bearer|basic|custom", ... },\n "toolGroups": [{ "name": "...", "description": "...", "tools": [...] }],\n "appCandidates": [{ "name": "...", "pattern": "...", "description": "...", "dataSource": [...], "suggestedWidgets": [...] }],\n "rateLimits": { ... }\n}\n\nWrap the final JSON in a \`\`\`json code block.\n\nAPI Specification:\n\`\`\`\n${spec}\n\`\`\``,
|
|
},
|
|
],
|
|
});
|
|
|
|
let fullText = '';
|
|
let lastPercent = 0;
|
|
|
|
stream.on('text', (text) => {
|
|
fullText += text;
|
|
});
|
|
|
|
// Process the stream
|
|
for await (const event of stream) {
|
|
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
|
|
const newPercent = Math.min(90, Math.floor((fullText.length / 4000) * 90));
|
|
if (newPercent > lastPercent + 5) {
|
|
lastPercent = newPercent;
|
|
yield { type: 'analysis:progress', step: 'Analyzing endpoints and generating tools', percent: newPercent };
|
|
}
|
|
|
|
// Check for tool definitions appearing in stream
|
|
const toolEvents = parseStreamEvents(fullText, 'analysis');
|
|
for (const te of toolEvents) {
|
|
if (te.type === 'analysis:tool_found') {
|
|
yield te;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Get final message for token usage
|
|
const finalMessage = await stream.finalMessage();
|
|
const usage = {
|
|
inputTokens: finalMessage.usage.input_tokens,
|
|
outputTokens: finalMessage.usage.output_tokens,
|
|
};
|
|
|
|
yield { type: 'analysis:progress', step: 'Parsing analysis results', percent: 95 };
|
|
|
|
// Extract JSON from the full response
|
|
const result = extractAnalysisResult(fullText);
|
|
|
|
if (result) {
|
|
// Yield individual tools as they're found
|
|
for (const group of result.toolGroups) {
|
|
for (const tool of group.tools) {
|
|
yield { type: 'analysis:tool_found', tool: tool as ToolDefinition };
|
|
}
|
|
}
|
|
|
|
yield { type: 'analysis:complete', result };
|
|
} else {
|
|
yield {
|
|
type: 'error',
|
|
message: 'Failed to parse analysis result from Claude response',
|
|
recoverable: true,
|
|
};
|
|
}
|
|
} catch (error) {
|
|
const msg = error instanceof Error ? error.message : String(error);
|
|
yield {
|
|
type: 'error',
|
|
message: `Analysis failed: ${msg}`,
|
|
recoverable: error instanceof Anthropic.RateLimitError,
|
|
};
|
|
}
|
|
}
|
|
|
|
function extractAnalysisResult(text: string): AnalysisResult | null {
|
|
// Try to find JSON in code blocks
|
|
const jsonMatch = text.match(/```json\s*\n([\s\S]*?)\n```/);
|
|
if (jsonMatch) {
|
|
try {
|
|
const parsed = JSON.parse(jsonMatch[1]);
|
|
return {
|
|
id: crypto.randomUUID(),
|
|
service: parsed.service || 'unknown',
|
|
baseUrl: parsed.baseUrl || '',
|
|
endpoints: parsed.endpoints || [],
|
|
authFlow: parsed.authFlow || { type: 'api_key' },
|
|
toolGroups: parsed.toolGroups || [],
|
|
appCandidates: parsed.appCandidates || [],
|
|
rateLimits: parsed.rateLimits || {},
|
|
};
|
|
} catch {
|
|
// fall through
|
|
}
|
|
}
|
|
|
|
// Try raw JSON parse
|
|
const braceStart = text.indexOf('{');
|
|
const braceEnd = text.lastIndexOf('}');
|
|
if (braceStart !== -1 && braceEnd > braceStart) {
|
|
try {
|
|
const parsed = JSON.parse(text.slice(braceStart, braceEnd + 1));
|
|
return {
|
|
id: crypto.randomUUID(),
|
|
service: parsed.service || 'unknown',
|
|
baseUrl: parsed.baseUrl || '',
|
|
endpoints: parsed.endpoints || [],
|
|
authFlow: parsed.authFlow || { type: 'api_key' },
|
|
toolGroups: parsed.toolGroups || [],
|
|
appCandidates: parsed.appCandidates || [],
|
|
rateLimits: parsed.rateLimits || {},
|
|
};
|
|
} catch {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
return null;
|
|
}
|