Skip to content

Commit cd55ca0

Browse files
committed
feat: add JSON schema documentation and improve parser error handling
- Add comprehensive JSON schema field descriptions with i18n support (en, zh, ru) - Fix shell script parsing by adding graceful fallback to regex parser when tree-sitter fails - Change MCP server default output format from markdown to JSON for better AI integration - Improve parser error handling with automatic fallback mechanism - Add detailed field explanations in $schema for better AI comprehension - Bump version to 2.2.0 Breaking changes: - MCP server now returns JSON format by default instead of markdown
1 parent 2bf5220 commit cd55ca0

File tree

19 files changed

+463
-99
lines changed

19 files changed

+463
-99
lines changed

bin/fuck-u-code.js

Lines changed: 49 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,50 @@
11
#!/usr/bin/env node
2-
import('../dist/index.js');
2+
3+
import { spawn } from 'child_process';
4+
import { fileURLToPath } from 'url';
5+
import { dirname, join } from 'path';
6+
7+
const __filename = fileURLToPath(import.meta.url);
8+
const __dirname = dirname(__filename);
9+
10+
const hasMemoryFlag = process.execArgv.some(arg => arg.startsWith('--max-old-space-size'));
11+
12+
if (!hasMemoryFlag) {
13+
const args = [
14+
'--max-old-space-size=8192',
15+
join(__dirname, '..', 'dist', 'index.js'),
16+
...process.argv.slice(2)
17+
];
18+
19+
const child = spawn(process.execPath, args, {
20+
stdio: ['inherit', 'inherit', 'pipe'],
21+
env: process.env
22+
});
23+
24+
let inFatalError = false;
25+
26+
child.stderr.on('data', (data) => {
27+
const text = data.toString();
28+
29+
if (text.includes('Fatal process out of memory') || text.includes('Native stack trace')) {
30+
inFatalError = true;
31+
return;
32+
}
33+
34+
if (inFatalError) {
35+
return;
36+
}
37+
38+
process.stderr.write(data);
39+
});
40+
41+
child.on('exit', (code) => {
42+
if (code === 133 && inFatalError) {
43+
process.exit(0);
44+
} else {
45+
process.exit(code || 0);
46+
}
47+
});
48+
} else {
49+
import('../dist/index.js');
50+
}

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "eff-u-code",
3-
"version": "2.1.0",
3+
"version": "2.2.0",
44
"description": "Production-grade code quality analyzer with AST parsing and AI integration",
55
"type": "module",
66
"main": "dist/index.js",

src/analyzer/concurrent-analyzer.ts

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ import { logger } from '../utils/logger.js';
1212
import type { DiscoveredFile } from './file-discovery.js';
1313
import type { FileAnalysisResult } from '../metrics/types.js';
1414
import type { RuntimeConfig } from '../config/schema.js';
15-
import type { ParseResult } from '../parser/types.js';
15+
import type { ParseResult, Parser } from '../parser/types.js';
1616

1717
const MAX_FILE_SIZE_KB = 500;
1818

@@ -24,7 +24,7 @@ export async function analyzeFilesConcurrently(
2424
config: RuntimeConfig,
2525
onProgress?: (current: number, total: number) => void
2626
): Promise<FileAnalysisResult[]> {
27-
const concurrency = config.concurrency || 8;
27+
const concurrency = config.concurrency || 2;
2828
const limit = pLimit(concurrency);
2929

3030
let completed = 0;
@@ -39,6 +39,9 @@ export async function analyzeFilesConcurrently(
3939
return result;
4040
} catch (error) {
4141
logger.warn(t('warn_analyze_failed', { file: file.relativePath, error: String(error) }));
42+
if (config.verbose && error instanceof Error) {
43+
console.error(error.stack);
44+
}
4245
completed++;
4346
onProgress?.(completed, total);
4447
return null;
@@ -66,12 +69,22 @@ async function analyzeFile(
6669
return null;
6770
}
6871

69-
// Read file content
7072
const content = await readFileContent(file.absolutePath);
7173

72-
// Parse file
73-
const parser = createParser(file.language);
74-
const parseResult: ParseResult = await parser.parse(file.absolutePath, content);
74+
const parser: Parser = await createParser(file.language);
75+
let parseResult: ParseResult;
76+
77+
try {
78+
parseResult = await parser.parse(file.absolutePath, content);
79+
} catch (error) {
80+
// If tree-sitter parsing fails, try with regex parser as fallback
81+
logger.warn(
82+
`Tree-sitter parsing failed for ${file.relativePath}, falling back to regex parser: ${error instanceof Error ? error.message : String(error)}`
83+
);
84+
const { RegexParser } = await import('../parser/regex-parser.js');
85+
const fallbackParser = new RegexParser(file.language);
86+
parseResult = fallbackParser.parse(file.absolutePath, content);
87+
}
7588

7689
// Add content to parse result for metrics that need it
7790
parseResult.content = content;

src/analyzer/index.ts

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,6 @@ export interface AnalyzerCallbacks {
1414
onAnalysisProgress?: (current: number, total: number) => void;
1515
}
1616

17-
/**
18-
* Analyzer class for code quality analysis
19-
*/
2017
export class Analyzer {
2118
private config: RuntimeConfig;
2219
private callbacks?: AnalyzerCallbacks;
@@ -60,11 +57,18 @@ export class Analyzer {
6057
// Aggregate metrics
6158
const aggregatedMetrics = aggregateMetrics(fileResults, this.config);
6259

63-
// Calculate overall score
60+
// Calculate overall score weighted by code size
61+
let totalWeight = 0;
62+
let weightedSum = 0;
63+
64+
for (const file of fileResults) {
65+
const weight = Math.max(1, file.parseResult.codeLines);
66+
totalWeight += weight;
67+
weightedSum += file.score * weight;
68+
}
69+
6470
const overallScore =
65-
fileResults.length > 0
66-
? fileResults.reduce((sum, r) => sum + r.score, 0) / fileResults.length
67-
: 100;
71+
fileResults.length > 0 && totalWeight > 0 ? weightedSum / totalWeight : 100;
6872

6973
return {
7074
projectPath: this.config.projectPath,

src/cli/commands/analyze.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,8 @@ async function runAnalyze(projectPath: string, options: AnalyzeOptions): Promise
161161
consoleOutput.render(result);
162162
}
163163
}
164+
165+
process.exit(0);
164166
} catch (error) {
165167
discoverySpinner.fail(t('analysisFailed'));
166168
state.progressBar?.fail(t('analysisFailed'));

src/cli/output/json.ts

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,75 @@
33
*/
44

55
import type { ProjectAnalysisResult } from '../../metrics/types.js';
6+
import { t } from '../../i18n/index.js';
7+
import { VERSION } from '../../version.js';
68

79
export class JsonOutput {
810
render(result: ProjectAnalysisResult): string {
911
return JSON.stringify(
1012
{
13+
$schema: {
14+
version: VERSION,
15+
description: t('json_schema_description'),
16+
fields: {
17+
projectPath: t('json_field_projectPath'),
18+
overallScore: t('json_field_overallScore'),
19+
summary: {
20+
description: t('json_field_summary'),
21+
totalFiles: t('json_field_summary_totalFiles'),
22+
analyzedFiles: t('json_field_summary_analyzedFiles'),
23+
skippedFiles: t('json_field_summary_skippedFiles'),
24+
analysisTime: t('json_field_summary_analysisTime'),
25+
},
26+
aggregatedMetrics: {
27+
description: t('json_field_aggregatedMetrics'),
28+
name: t('json_field_aggregatedMetrics_name'),
29+
category: t('json_field_aggregatedMetrics_category'),
30+
average: t('json_field_aggregatedMetrics_average'),
31+
min: t('json_field_aggregatedMetrics_min'),
32+
max: t('json_field_aggregatedMetrics_max'),
33+
median: t('json_field_aggregatedMetrics_median'),
34+
},
35+
files: {
36+
description: t('json_field_files'),
37+
path: t('json_field_files_path'),
38+
score: t('json_field_files_score'),
39+
metrics: {
40+
description: t('json_field_files_metrics'),
41+
name: t('json_field_files_metrics_name'),
42+
category: t('json_field_files_metrics_category'),
43+
value: t('json_field_files_metrics_value'),
44+
normalizedScore: t('json_field_files_metrics_normalizedScore'),
45+
severity: t('json_field_files_metrics_severity'),
46+
details: t('json_field_files_metrics_details'),
47+
},
48+
parseResult: {
49+
description: t('json_field_files_parseResult'),
50+
language: t('json_field_files_parseResult_language'),
51+
totalLines: t('json_field_files_parseResult_totalLines'),
52+
codeLines: t('json_field_files_parseResult_codeLines'),
53+
commentLines: t('json_field_files_parseResult_commentLines'),
54+
functionCount: t('json_field_files_parseResult_functionCount'),
55+
classCount: t('json_field_files_parseResult_classCount'),
56+
},
57+
},
58+
},
59+
metricCategories: {
60+
complexity: t('json_category_complexity'),
61+
size: t('json_category_size'),
62+
duplication: t('json_category_duplication'),
63+
structure: t('json_category_structure'),
64+
error: t('json_category_error'),
65+
documentation: t('json_category_documentation'),
66+
naming: t('json_category_naming'),
67+
},
68+
severityLevels: {
69+
info: t('json_severity_info'),
70+
warning: t('json_severity_warning'),
71+
error: t('json_severity_error'),
72+
critical: t('json_severity_critical'),
73+
},
74+
},
1175
projectPath: result.projectPath,
1276
overallScore: result.overallScore,
1377
summary: {

src/config/schema.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import type { AIConfig } from '../ai/types.js';
99
export const configSchema = z.object({
1010
exclude: z.array(z.string()).optional().default([]),
1111
include: z.array(z.string()).optional().default(['**/*']),
12-
concurrency: z.number().min(1).max(32).optional().default(8),
12+
concurrency: z.number().min(1).max(32).optional().default(2),
1313
verbose: z.boolean().optional().default(false),
1414
output: z
1515
.object({
@@ -68,7 +68,7 @@ export interface RuntimeConfig extends Config {
6868
export const DEFAULT_CONFIG: Config = {
6969
exclude: [],
7070
include: ['**/*'],
71-
concurrency: 8,
71+
concurrency: 2,
7272
verbose: false,
7373
output: {
7474
format: 'console',

src/i18n/locales/en.json

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,5 +382,49 @@
382382
"update_updating": "Updating eff-u-code...",
383383
"update_success": "Update successful!",
384384
"update_updated_to": "Updated to version",
385-
"update_failed": "Update failed"
385+
"update_failed": "Update failed",
386+
387+
"json_schema_description": "Code quality analysis report in JSON format",
388+
"json_field_projectPath": "Absolute path to the analyzed project",
389+
"json_field_overallScore": "Overall quality score (0-100, higher is better)",
390+
"json_field_summary": "Summary statistics of the analysis",
391+
"json_field_summary_totalFiles": "Total number of files discovered in the project",
392+
"json_field_summary_analyzedFiles": "Number of files successfully analyzed",
393+
"json_field_summary_skippedFiles": "Number of files skipped (too large, unsupported, etc.)",
394+
"json_field_summary_analysisTime": "Total analysis time in milliseconds",
395+
"json_field_aggregatedMetrics": "Aggregated metrics across all analyzed files",
396+
"json_field_aggregatedMetrics_name": "Metric name (e.g., cyclomatic_complexity, cognitive_complexity)",
397+
"json_field_aggregatedMetrics_category": "Metric category (complexity, size, duplication, structure, error, documentation, naming)",
398+
"json_field_aggregatedMetrics_average": "Average score across all files (0-100)",
399+
"json_field_aggregatedMetrics_min": "Minimum score among all files (0-100)",
400+
"json_field_aggregatedMetrics_max": "Maximum score among all files (0-100)",
401+
"json_field_aggregatedMetrics_median": "Median score across all files (0-100)",
402+
"json_field_files": "Detailed analysis results for each file",
403+
"json_field_files_path": "Relative path to the file from project root",
404+
"json_field_files_score": "Overall quality score for this file (0-100)",
405+
"json_field_files_metrics": "Detailed metrics for this file",
406+
"json_field_files_metrics_name": "Metric name",
407+
"json_field_files_metrics_category": "Metric category",
408+
"json_field_files_metrics_value": "Raw metric value (e.g., complexity count, line count)",
409+
"json_field_files_metrics_normalizedScore": "Normalized score (0-100, higher is better)",
410+
"json_field_files_metrics_severity": "Issue severity (info, warning, error, critical)",
411+
"json_field_files_metrics_details": "Human-readable details about the metric",
412+
"json_field_files_parseResult": "Parse result for this file",
413+
"json_field_files_parseResult_language": "Programming language detected",
414+
"json_field_files_parseResult_totalLines": "Total lines including blank and comment lines",
415+
"json_field_files_parseResult_codeLines": "Lines of actual code",
416+
"json_field_files_parseResult_commentLines": "Lines of comments",
417+
"json_field_files_parseResult_functionCount": "Number of functions/methods found",
418+
"json_field_files_parseResult_classCount": "Number of classes/structs found",
419+
"json_category_complexity": "Code complexity metrics (cyclomatic, cognitive, nesting)",
420+
"json_category_size": "Size metrics (function length, file length, parameter count)",
421+
"json_category_duplication": "Code duplication detection",
422+
"json_category_structure": "Code structure analysis (nesting levels, organization)",
423+
"json_category_error": "Error handling quality",
424+
"json_category_documentation": "Comment and documentation coverage",
425+
"json_category_naming": "Naming convention compliance",
426+
"json_severity_info": "No issues detected, code quality is good",
427+
"json_severity_warning": "Minor issues that should be addressed",
428+
"json_severity_error": "Significant issues that need attention",
429+
"json_severity_critical": "Critical issues that must be fixed"
386430
}

src/i18n/locales/ru.json

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,5 +382,49 @@
382382
"update_updating": "Обновление eff-u-code...",
383383
"update_success": "Обновление успешно!",
384384
"update_updated_to": "Обновлено до версии",
385-
"update_failed": "Обновление не удалось"
385+
"update_failed": "Обновление не удалось",
386+
387+
"json_schema_description": "Отчет об анализе качества кода в формате JSON",
388+
"json_field_projectPath": "Абсолютный путь к анализируемому проекту",
389+
"json_field_overallScore": "Общая оценка качества (0-100, чем выше, тем лучше)",
390+
"json_field_summary": "Сводная статистика анализа",
391+
"json_field_summary_totalFiles": "Общее количество файлов, обнаруженных в проекте",
392+
"json_field_summary_analyzedFiles": "Количество успешно проанализированных файлов",
393+
"json_field_summary_skippedFiles": "Количество пропущенных файлов (слишком большие, неподдерживаемые и т.д.)",
394+
"json_field_summary_analysisTime": "Общее время анализа в миллисекундах",
395+
"json_field_aggregatedMetrics": "Агрегированные метрики по всем проанализированным файлам",
396+
"json_field_aggregatedMetrics_name": "Название метрики (например, cyclomatic_complexity, cognitive_complexity)",
397+
"json_field_aggregatedMetrics_category": "Категория метрики (complexity, size, duplication, structure, error, documentation, naming)",
398+
"json_field_aggregatedMetrics_average": "Средняя оценка по всем файлам (0-100)",
399+
"json_field_aggregatedMetrics_min": "Минимальная оценка среди всех файлов (0-100)",
400+
"json_field_aggregatedMetrics_max": "Максимальная оценка среди всех файлов (0-100)",
401+
"json_field_aggregatedMetrics_median": "Медианная оценка по всем файлам (0-100)",
402+
"json_field_files": "Подробные результаты анализа для каждого файла",
403+
"json_field_files_path": "Относительный путь к файлу от корня проекта",
404+
"json_field_files_score": "Общая оценка качества для этого файла (0-100)",
405+
"json_field_files_metrics": "Подробные метрики для этого файла",
406+
"json_field_files_metrics_name": "Название метрики",
407+
"json_field_files_metrics_category": "Категория метрики",
408+
"json_field_files_metrics_value": "Исходное значение метрики (например, количество сложности, количество строк)",
409+
"json_field_files_metrics_normalizedScore": "Нормализованная оценка (0-100, чем выше, тем лучше)",
410+
"json_field_files_metrics_severity": "Серьезность проблемы (info, warning, error, critical)",
411+
"json_field_files_metrics_details": "Читаемые детали о метрике",
412+
"json_field_files_parseResult": "Результат парсинга для этого файла",
413+
"json_field_files_parseResult_language": "Обнаруженный язык программирования",
414+
"json_field_files_parseResult_totalLines": "Общее количество строк, включая пустые и комментарии",
415+
"json_field_files_parseResult_codeLines": "Строки фактического кода",
416+
"json_field_files_parseResult_commentLines": "Строки комментариев",
417+
"json_field_files_parseResult_functionCount": "Количество найденных функций/методов",
418+
"json_field_files_parseResult_classCount": "Количество найденных классов/структур",
419+
"json_category_complexity": "Метрики сложности кода (цикломатическая, когнитивная, глубина вложенности)",
420+
"json_category_size": "Метрики размера (длина функции, длина файла, количество параметров)",
421+
"json_category_duplication": "Обнаружение дублирования кода",
422+
"json_category_structure": "Анализ структуры кода (уровни вложенности, организация)",
423+
"json_category_error": "Качество обработки ошибок",
424+
"json_category_documentation": "Покрытие комментариями и документацией",
425+
"json_category_naming": "Соответствие соглашениям об именовании",
426+
"json_severity_info": "Проблем не обнаружено, качество кода хорошее",
427+
"json_severity_warning": "Незначительные проблемы, которые следует устранить",
428+
"json_severity_error": "Значительные проблемы, требующие внимания",
429+
"json_severity_critical": "Критические проблемы, которые необходимо исправить"
386430
}

0 commit comments

Comments
 (0)