Skip to content

Commit 8d64f56

Browse files
committed
feat(mpp-vscode): add Timeline-based Sketch Renderer architecture
Phase 7 - Sketch Renderer System: - Add Timeline component mirroring mpp-ui's ComposeRenderer - Add SketchRenderer with specialized sub-renderers: - CodeBlockRenderer: syntax highlighting with Copy/Insert/Apply actions - DiffRenderer: diff display with Accept/Reject/View actions - ThinkingRenderer: collapsible thinking blocks - TerminalRenderer: terminal command and output display - MarkdownRenderer: GFM markdown with react-markdown - ToolCallRenderer: tool call information display - Add codeFence parser mirroring mpp-core's CodeFence.parseAll() - Add timeline types (TimelineItem, AgentState, ToolCallInfo, etc.) ChatViewProvider Refactoring: - Replace LLMService with CodingAgent for agent-based interactions - Add VSCodeRenderer implementing JsCodingAgentRenderer interface - Forward agent events (toolCall, toolResult, terminalOutput) to webview - Add action handlers (insert, apply, run-command, accept-diff, etc.) - Remove API key requirement - use helpful guidance for non-DevIns input All 63 tests passing. Refs #31
1 parent e434dfe commit 8d64f56

24 files changed

+2687
-87
lines changed

mpp-vscode/src/bridge/mpp-core.ts

Lines changed: 168 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -313,33 +313,192 @@ export interface ToolResult {
313313
metadata: Record<string, string>;
314314
}
315315

316+
/**
317+
* VSCode Renderer - Forwards agent events to webview
318+
* Implements the JsCodingAgentRenderer interface
319+
*/
320+
export class VSCodeRenderer {
321+
constructor(private chatProvider: { postMessage: (msg: any) => void }) {}
322+
323+
renderIterationHeader(current: number, max: number): void {
324+
this.chatProvider.postMessage({
325+
type: 'iterationUpdate',
326+
data: { current, max }
327+
});
328+
}
329+
330+
renderLLMResponseStart(): void {
331+
this.chatProvider.postMessage({ type: 'startResponse' });
332+
}
333+
334+
renderLLMResponseChunk(chunk: string): void {
335+
this.chatProvider.postMessage({ type: 'responseChunk', content: chunk });
336+
}
337+
338+
renderLLMResponseEnd(): void {
339+
this.chatProvider.postMessage({ type: 'endResponse' });
340+
}
341+
342+
renderToolCall(toolName: string, paramsStr: string): void {
343+
this.chatProvider.postMessage({
344+
type: 'toolCall',
345+
data: {
346+
toolName,
347+
params: paramsStr,
348+
description: `Calling ${toolName}`,
349+
success: null
350+
}
351+
});
352+
}
353+
354+
renderToolResult(toolName: string, success: boolean, output: string | null, fullOutput: string | null): void {
355+
this.chatProvider.postMessage({
356+
type: 'toolResult',
357+
data: {
358+
toolName,
359+
success,
360+
output,
361+
fullOutput,
362+
summary: success ? 'Completed' : 'Failed'
363+
}
364+
});
365+
}
366+
367+
renderTaskComplete(): void {
368+
this.chatProvider.postMessage({
369+
type: 'taskComplete',
370+
data: { success: true, message: 'Task completed' }
371+
});
372+
}
373+
374+
renderFinalResult(success: boolean, message: string, iterations: number): void {
375+
this.chatProvider.postMessage({
376+
type: 'taskComplete',
377+
data: { success, message: `${message} (${iterations} iterations)` }
378+
});
379+
}
380+
381+
renderError(message: string): void {
382+
this.chatProvider.postMessage({ type: 'error', content: message });
383+
}
384+
385+
renderRepeatWarning(toolName: string, count: number): void {
386+
this.chatProvider.postMessage({
387+
type: 'error',
388+
content: `Warning: ${toolName} called ${count} times - consider different approach`
389+
});
390+
}
391+
392+
renderRecoveryAdvice(advice: string): void {
393+
this.chatProvider.postMessage({
394+
type: 'responseChunk',
395+
content: `\n\n💡 **Suggestion**: ${advice}\n`
396+
});
397+
}
398+
399+
renderUserConfirmationRequest(toolName: string, params: Record<string, any>): void {
400+
// Auto-approve for now
401+
this.chatProvider.postMessage({
402+
type: 'toolCall',
403+
data: {
404+
toolName,
405+
params: JSON.stringify(params),
406+
description: `Tool '${toolName}' needs approval (auto-approved)`,
407+
success: null
408+
}
409+
});
410+
}
411+
412+
forceStop(): void {
413+
this.chatProvider.postMessage({
414+
type: 'taskComplete',
415+
data: { success: false, message: 'Stopped by user' }
416+
});
417+
}
418+
}
419+
316420
/**
317421
* Coding Agent - AI-powered coding assistant
422+
* Wraps mpp-core's JsCodingAgent
318423
*/
319424
export class CodingAgent {
320425
private agent: any;
426+
private renderer: VSCodeRenderer;
321427

322428
constructor(
429+
config: ModelConfig,
430+
toolRegistry: ToolRegistry,
431+
renderer: VSCodeRenderer,
323432
projectPath: string,
324-
llmService: LLMService,
325433
options?: {
326434
maxIterations?: number;
327435
mcpServers?: Record<string, any>;
328436
}
329437
) {
330-
// Access internal koogService
331-
const internalService = (llmService as any).koogService;
438+
this.renderer = renderer;
439+
440+
// Create model config
441+
const providerName = ProviderTypes[config.provider.toLowerCase()] || config.provider.toUpperCase();
442+
const modelConfig = new JsModelConfig(
443+
providerName,
444+
config.model,
445+
config.apiKey,
446+
config.temperature ?? 0.7,
447+
config.maxTokens ?? 8192,
448+
config.baseUrl ?? ''
449+
);
332450

451+
// Create LLM service
452+
const llmService = new JsKoogLLMService(modelConfig);
453+
454+
// Create agent with renderer
333455
this.agent = new JsCodingAgent(
334456
projectPath,
335-
internalService,
457+
llmService,
336458
options?.maxIterations ?? 100,
337-
null, // renderer
459+
this.createKotlinRenderer(),
338460
options?.mcpServers ?? null,
339461
null // toolConfig
340462
);
341463
}
342464

465+
/**
466+
* Create a Kotlin-compatible renderer object
467+
*/
468+
private createKotlinRenderer(): any {
469+
const renderer = this.renderer;
470+
return {
471+
renderIterationHeader: (c: number, m: number) => renderer.renderIterationHeader(c, m),
472+
renderLLMResponseStart: () => renderer.renderLLMResponseStart(),
473+
renderLLMResponseChunk: (chunk: string) => renderer.renderLLMResponseChunk(chunk),
474+
renderLLMResponseEnd: () => renderer.renderLLMResponseEnd(),
475+
renderToolCall: (name: string, params: string) => renderer.renderToolCall(name, params),
476+
renderToolResult: (name: string, success: boolean, output: string | null, full: string | null) =>
477+
renderer.renderToolResult(name, success, output, full),
478+
renderTaskComplete: () => renderer.renderTaskComplete(),
479+
renderFinalResult: (success: boolean, msg: string, iters: number) =>
480+
renderer.renderFinalResult(success, msg, iters),
481+
renderError: (msg: string) => renderer.renderError(msg),
482+
renderRepeatWarning: (name: string, count: number) => renderer.renderRepeatWarning(name, count),
483+
renderRecoveryAdvice: (advice: string) => renderer.renderRecoveryAdvice(advice),
484+
renderUserConfirmationRequest: (name: string, params: any) =>
485+
renderer.renderUserConfirmationRequest(name, params),
486+
forceStop: () => renderer.forceStop()
487+
};
488+
}
489+
490+
/**
491+
* Execute a DevIns command or natural language task
492+
*/
493+
async execute(input: string): Promise<void> {
494+
try {
495+
await this.agent.execute(input);
496+
} catch (error) {
497+
const message = error instanceof Error ? error.message : String(error);
498+
this.renderer.renderError(message);
499+
}
500+
}
501+
343502
/**
344503
* Execute a coding task
345504
*/
@@ -367,17 +526,17 @@ export class CodingAgent {
367526
}
368527

369528
/**
370-
* Initialize workspace
529+
* Clear conversation history
371530
*/
372-
async initializeWorkspace(): Promise<void> {
373-
await this.agent.initializeWorkspace();
531+
clearHistory(): void {
532+
this.agent.clearHistory?.();
374533
}
375534

376535
/**
377536
* Get conversation history
378537
*/
379538
getConversationHistory(): ChatMessage[] {
380-
const history = this.agent.getConversationHistory();
539+
const history = this.agent.getConversationHistory?.() || [];
381540
return Array.from(history).map((msg: any) => ({
382541
role: msg.role as 'user' | 'assistant' | 'system',
383542
content: msg.content

0 commit comments

Comments
 (0)