Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions mpp-vscode/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,30 @@
{
"command": "autodev.runAgent",
"title": "AutoDev: Run Coding Agent"
},
{
"command": "autodev.codelens.quickChat",
"title": "AutoDev: Quick Chat"
},
{
"command": "autodev.codelens.explainCode",
"title": "AutoDev: Explain Code"
},
{
"command": "autodev.codelens.optimizeCode",
"title": "AutoDev: Optimize Code"
},
{
"command": "autodev.codelens.autoComment",
"title": "AutoDev: Auto Comment"
},
{
"command": "autodev.codelens.autoTest",
"title": "AutoDev: Auto Test"
},
{
"command": "autodev.codelens.autoMethod",
"title": "AutoDev: Auto Method"
Copy link

Copilot AI Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] The command autodev.codelens.showMenu is registered in codelens-commands.ts and used in codelens-provider.ts (line 198), but it's not declared in the commands section of package.json. While VS Code allows undeclared commands for internal use, it's better practice to declare all commands for discoverability and consistency.

Add the command declaration:

{
  "command": "autodev.codelens.showMenu",
  "title": "AutoDev: Show CodeLens Menu"
}
Suggested change
"title": "AutoDev: Auto Method"
"title": "AutoDev: Auto Method"
},
{
"command": "autodev.codelens.showMenu",
"title": "AutoDev: Show CodeLens Menu"

Copilot uses AI. Check for mistakes.
}
],
"viewsContainers": {
Expand Down Expand Up @@ -99,6 +123,44 @@
"type": "number",
"default": 23120,
"description": "Port for the IDE server (MCP protocol)"
},
"autodev.codelens.enable": {
"type": "boolean",
"default": true,
"description": "Enable CodeLens to show AI actions above functions and classes"
},
"autodev.codelens.displayMode": {
"type": "string",
"default": "expand",
"enum": [
"expand",
"collapse"
],
"enumDescriptions": [
"Show all actions separately",
"Show a collapsed menu"
],
"description": "CodeLens display mode"
},
"autodev.codelens.items": {
"type": "array",
"default": [
"quickChat",
"autoTest",
"autoComment"
],
"items": {
"type": "string",
"enum": [
"quickChat",
"explainCode",
"optimizeCode",
"autoComment",
"autoTest",
"autoMethod"
]
},
"description": "CodeLens items to display"
}
}
},
Expand Down Expand Up @@ -162,8 +224,10 @@
"dependencies": {
"@autodev/mpp-core": "file:../mpp-core/build/packages/js",
"@modelcontextprotocol/sdk": "^1.0.0",
"@unit-mesh/treesitter-artifacts": "^1.7.7",
"cors": "^2.8.5",
"express": "^4.18.2",
"web-tree-sitter": "^0.25.10",
Comment on lines +258 to +261
Copy link

Copilot AI Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The web-tree-sitter package requires WASM files to be loaded at runtime. The WASM path resolution in code-element-parser.ts uses require.resolve() which may not work correctly in a bundled extension environment.

Consider:

  1. Ensuring WASM files from @unit-mesh/treesitter-artifacts are copied to the dist folder during build
  2. Using vscode.Uri.joinPath(context.extensionUri, 'wasm', ...) for path resolution instead of require.resolve()
  3. Testing the CodeLens feature in a packaged extension to verify WASM loading works correctly

Copilot uses AI. Check for mistakes.
"yaml": "^2.8.2",
"zod": "^3.22.4"
}
Expand Down
254 changes: 254 additions & 0 deletions mpp-vscode/src/actions/auto-actions.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,254 @@
/**
* Auto Actions - AutoComment, AutoTest, AutoMethod implementations
*
* Based on autodev-vscode's action executors, adapted for mpp-vscode.
*/

import * as vscode from 'vscode';
import { CodeElement } from '../providers/code-element-parser';
import { LLMService, ModelConfig } from '../bridge/mpp-core';
import { DiffManager } from '../services/diff-manager';
import {
generateAutoDocPrompt,
generateAutoTestPrompt,
generateAutoMethodPrompt,
parseCodeBlock,
LANGUAGE_COMMENT_MAP,
getTestFramework,
getTestFilePath,
AutoDocContext,
AutoTestContext,
AutoMethodContext
} from '../prompts/prompt-templates';

export interface ActionContext {
document: vscode.TextDocument;
element: CodeElement;
config: ModelConfig;
log: (message: string) => void;
}

/**
* Execute AutoComment action - generates documentation comments
*/
export async function executeAutoComment(context: ActionContext): Promise<void> {
const { document, element, config, log } = context;
const language = document.languageId;

log(`AutoComment: Generating documentation for ${element.name}`);

const commentSymbols = LANGUAGE_COMMENT_MAP[language] || { start: '/**', end: '*/' };

const promptContext: AutoDocContext = {
language,
code: element.code,
startSymbol: commentSymbols.start,
endSymbol: commentSymbols.end,
};

const prompt = generateAutoDocPrompt(promptContext);

try {
await vscode.window.withProgress({
location: vscode.ProgressLocation.Notification,
title: `Generating documentation for ${element.name}...`,
cancellable: true
}, async (progress, token) => {
const llmService = new LLMService(config);

let response = '';
await llmService.streamMessage(prompt, (chunk) => {
response += chunk;
progress.report({ message: 'Generating...' });
});

if (token.isCancellationRequested) return;
Comment on lines +56 to +65
Copy link

Copilot AI Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The cancellation token is not properly checked during the streaming process. The token is only checked after llmService.streamMessage completes, but the streaming operation itself is not cancellable. If a user cancels the operation during streaming, the LLM service will continue processing until completion, wasting resources.

Consider passing the cancellation token to streamMessage or adding periodic checks within the stream callback to respect cancellation requests.

Copilot uses AI. Check for mistakes.

const docComment = parseCodeBlock(response, language);
if (!docComment) {
vscode.window.showWarningMessage('Failed to generate documentation');
return;
}

const formattedDoc = formatDocComment(docComment, document, element);
const insertPosition = new vscode.Position(element.bodyRange.start.line, 0);

const diffManager = new DiffManager(log);
const originalContent = document.getText();
const newContent = insertTextAtPosition(originalContent, formattedDoc, document.offsetAt(insertPosition));

await diffManager.showDiff(document.uri.fsPath, originalContent, newContent);
log(`AutoComment: Documentation generated for ${element.name}`);
});
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
log(`AutoComment error: ${message}`);
vscode.window.showErrorMessage(`Failed to generate documentation: ${message}`);
}
}

/**
* Execute AutoTest action - generates unit tests
*/
export async function executeAutoTest(context: ActionContext): Promise<void> {
const { document, element, config, log } = context;
const language = document.languageId;

log(`AutoTest: Generating tests for ${element.name}`);

const promptContext: AutoTestContext = {
language,
sourceCode: element.code,
className: element.type === 'structure' ? element.name : undefined,
methodName: element.type === 'method' ? element.name : undefined,
testFramework: getTestFramework(language),
isNewFile: true,
};

const prompt = generateAutoTestPrompt(promptContext);

try {
await vscode.window.withProgress({
location: vscode.ProgressLocation.Notification,
title: `Generating tests for ${element.name}...`,
cancellable: true
}, async (progress, token) => {
const llmService = new LLMService(config);

let response = '';
await llmService.streamMessage(prompt, (chunk) => {
response += chunk;
progress.report({ message: 'Generating...' });
});
Comment on lines +119 to +122
Copy link

Copilot AI Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The same cancellation token issue exists here. The token is checked after streaming completes, but the LLM service continues processing even if the user cancels. This applies to all three auto-action functions (AutoComment, AutoTest, AutoMethod).

Suggested change
await llmService.streamMessage(prompt, (chunk) => {
response += chunk;
progress.report({ message: 'Generating...' });
});
const abortController = new AbortController();
const cancellationListener = token.onCancellationRequested(() => {
abortController.abort();
});
try {
await llmService.streamMessage(
prompt,
(chunk) => {
response += chunk;
progress.report({ message: 'Generating...' });
},
{ abortSignal: abortController.signal }
);
} catch (err) {
if (abortController.signal.aborted) {
// Cancelled by user, exit early
return;
}
throw err;
} finally {
cancellationListener.dispose();
}

Copilot uses AI. Check for mistakes.

if (token.isCancellationRequested) return;

const testCode = parseCodeBlock(response, language);
if (!testCode) {
vscode.window.showWarningMessage('Failed to generate test code');
return;
}

const testFilePath = getTestFilePath(document.uri.fsPath, language);
const testFileUri = vscode.Uri.file(testFilePath);

let existingContent = '';
try {
const existingDoc = await vscode.workspace.openTextDocument(testFileUri);
existingContent = existingDoc.getText();
} catch { /* File doesn't exist */ }

const diffManager = new DiffManager(log);
if (existingContent) {
const newContent = existingContent + '\n\n' + testCode;
await diffManager.showDiff(testFilePath, existingContent, newContent);
} else {
await diffManager.showDiff(testFilePath, '', testCode);
}

log(`AutoTest: Tests generated for ${element.name}`);
});
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
log(`AutoTest error: ${message}`);
vscode.window.showErrorMessage(`Failed to generate tests: ${message}`);
}
}

/**
* Execute AutoMethod action - generates method implementation
*/
export async function executeAutoMethod(context: ActionContext): Promise<void> {
const { document, element, config, log } = context;
const language = document.languageId;

log(`AutoMethod: Generating implementation for ${element.name}`);

const promptContext: AutoMethodContext = {
language,
code: element.code,
methodSignature: extractMethodSignature(element.code),
className: findContainingClass(document, element),
};

const prompt = generateAutoMethodPrompt(promptContext);

try {
await vscode.window.withProgress({
location: vscode.ProgressLocation.Notification,
title: `Generating implementation for ${element.name}...`,
cancellable: true
}, async (progress, token) => {
const llmService = new LLMService(config);

let response = '';
await llmService.streamMessage(prompt, (chunk) => {
response += chunk;
progress.report({ message: 'Generating...' });
});
Copy link

Copilot AI Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The same cancellation token issue exists here as well.

Suggested change
});
}, token);

Copilot uses AI. Check for mistakes.

if (token.isCancellationRequested) return;

const methodCode = parseCodeBlock(response, language);
if (!methodCode) {
vscode.window.showWarningMessage('Failed to generate method implementation');
return;
}

const diffManager = new DiffManager(log);
const originalContent = document.getText();
const newContent = replaceMethodBody(originalContent, element, methodCode, document);

await diffManager.showDiff(document.uri.fsPath, originalContent, newContent);
log(`AutoMethod: Implementation generated for ${element.name}`);
});
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
log(`AutoMethod error: ${message}`);
vscode.window.showErrorMessage(`Failed to generate implementation: ${message}`);
}
}

// Helper functions

function formatDocComment(docComment: string, document: vscode.TextDocument, element: CodeElement): string {
const line = document.lineAt(element.bodyRange.start.line);
const indent = line.text.substring(0, line.firstNonWhitespaceCharacterIndex);

let formatted = docComment.trim();
if (!formatted.endsWith('\n')) formatted += '\n';

const lines = formatted.split('\n');
return lines.map(l => l ? indent + l : l).join('\n');
}

function insertTextAtPosition(content: string, text: string, position: number): string {
return content.substring(0, position) + text + content.substring(position);
}

function extractMethodSignature(code: string): string {
const lines = code.split('\n');
const signatureLines: string[] = [];
for (const line of lines) {
signatureLines.push(line);
if (line.includes('{') || line.includes(':')) break;
}
return signatureLines.join('\n').trim();
}

function findContainingClass(document: vscode.TextDocument, element: CodeElement): string | undefined {
const text = document.getText(new vscode.Range(new vscode.Position(0, 0), element.bodyRange.start));
const classMatch = text.match(/class\s+(\w+)/g);
if (classMatch && classMatch.length > 0) {
const lastMatch = classMatch[classMatch.length - 1];
const nameMatch = lastMatch.match(/class\s+(\w+)/);
return nameMatch ? nameMatch[1] : undefined;
}
return undefined;
}

function replaceMethodBody(content: string, element: CodeElement, newMethodCode: string, document: vscode.TextDocument): string {
const startOffset = document.offsetAt(element.bodyRange.start);
const endOffset = document.offsetAt(element.bodyRange.end);
return content.substring(0, startOffset) + newMethodCode + content.substring(endOffset);
}
Loading
Loading