mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-18 19:56:59 +00:00
Add GitHub Actions CI/CD pipeline and test framework
- Add .github/workflows/build-test.yml for automated testing - Add tests/ directory with TypeScript test runner - Add docs/CICD.md documentation - Remove .gitlab-ci.yml (migrated to GitHub Actions) - Update .gitignore for test artifacts 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
165
tests/src/cli.ts
Normal file
165
tests/src/cli.ts
Normal file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { Command } from 'commander'
|
||||
import { writeFileSync } from 'fs'
|
||||
import path from 'path'
|
||||
import { fileURLToPath } from 'url'
|
||||
import { TestLoader } from './loader.js'
|
||||
import { TestExecutor } from './executor.js'
|
||||
import { LLMJudge } from './judge.js'
|
||||
import { Reporter, TestLinkReporter } from './reporter.js'
|
||||
import { RunnerOptions } from './types.js'
|
||||
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url))
|
||||
const defaultTestcasesDir = path.join(__dirname, '..', 'testcases')
|
||||
|
||||
const program = new Command()
|
||||
|
||||
program
|
||||
.name('ollama37-test')
|
||||
.description('Scalable test runner with LLM-as-judge for ollama37')
|
||||
.version('1.0.0')
|
||||
|
||||
program
|
||||
.command('run')
|
||||
.description('Run test cases')
|
||||
.option('-s, --suite <suite>', 'Run only tests in specified suite (build, runtime, inference)')
|
||||
.option('-i, --id <id>', 'Run only specified test case by ID')
|
||||
.option('-w, --workers <n>', 'Number of parallel workers', '1')
|
||||
.option('-d, --dry-run', 'Show what would be executed without running')
|
||||
.option('-o, --output <format>', 'Output format: console, json, junit', 'console')
|
||||
.option('--report-testlink', 'Report results to TestLink')
|
||||
.option('--ollama-url <url>', 'Ollama server URL', 'http://localhost:11434')
|
||||
.option('--ollama-model <model>', 'Ollama model for judging', 'gemma3:4b')
|
||||
.option('--testlink-url <url>', 'TestLink server URL', 'http://localhost:8090')
|
||||
.option('--testlink-api-key <key>', 'TestLink API key')
|
||||
.option('--no-llm', 'Skip LLM judging, use simple exit code check')
|
||||
.option('--testcases-dir <dir>', 'Test cases directory', defaultTestcasesDir)
|
||||
.action(async (options) => {
|
||||
console.log('='.repeat(60))
|
||||
console.log('OLLAMA37 TEST RUNNER')
|
||||
console.log('='.repeat(60))
|
||||
|
||||
const loader = new TestLoader(options.testcasesDir)
|
||||
const executor = new TestExecutor(path.join(__dirname, '..', '..'))
|
||||
const judge = new LLMJudge(options.ollamaUrl, options.ollamaModel)
|
||||
|
||||
// Load test cases
|
||||
console.log('\nLoading test cases...')
|
||||
let testCases = await loader.loadAll()
|
||||
|
||||
if (options.suite) {
|
||||
testCases = testCases.filter(tc => tc.suite === options.suite)
|
||||
console.log(` Filtered by suite: ${options.suite}`)
|
||||
}
|
||||
|
||||
if (options.id) {
|
||||
testCases = testCases.filter(tc => tc.id === options.id)
|
||||
console.log(` Filtered by ID: ${options.id}`)
|
||||
}
|
||||
|
||||
// Sort by dependencies
|
||||
testCases = loader.sortByDependencies(testCases)
|
||||
|
||||
console.log(` Found ${testCases.length} test cases`)
|
||||
|
||||
if (testCases.length === 0) {
|
||||
console.log('\nNo test cases found!')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// Dry run
|
||||
if (options.dryRun) {
|
||||
console.log('\nDRY RUN - Would execute:')
|
||||
for (const tc of testCases) {
|
||||
console.log(` ${tc.id}: ${tc.name}`)
|
||||
for (const step of tc.steps) {
|
||||
console.log(` - ${step.name}: ${step.command}`)
|
||||
}
|
||||
}
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
// Execute tests
|
||||
console.log('\nExecuting tests...')
|
||||
const workers = parseInt(options.workers)
|
||||
const results = await executor.executeAll(testCases, workers)
|
||||
|
||||
// Judge results
|
||||
console.log('\nJudging results...')
|
||||
let judgments
|
||||
if (options.llm === false) {
|
||||
console.log(' Using simple exit code check (--no-llm)')
|
||||
judgments = results.map(r => judge.simpleJudge(r))
|
||||
} else {
|
||||
try {
|
||||
judgments = await judge.judgeResults(results)
|
||||
} catch (error) {
|
||||
console.error(' LLM judging failed, falling back to simple check:', error)
|
||||
judgments = results.map(r => judge.simpleJudge(r))
|
||||
}
|
||||
}
|
||||
|
||||
// Create reports
|
||||
const reports = Reporter.createReports(results, judgments)
|
||||
|
||||
// Output results
|
||||
switch (options.output) {
|
||||
case 'json':
|
||||
const json = Reporter.toJSON(reports)
|
||||
console.log(json)
|
||||
writeFileSync('test-results.json', json)
|
||||
console.log('\nResults written to test-results.json')
|
||||
break
|
||||
|
||||
case 'junit':
|
||||
const junit = Reporter.toJUnit(reports)
|
||||
writeFileSync('test-results.xml', junit)
|
||||
console.log('\nResults written to test-results.xml')
|
||||
break
|
||||
|
||||
case 'console':
|
||||
default:
|
||||
Reporter.toConsole(reports)
|
||||
break
|
||||
}
|
||||
|
||||
// Report to TestLink
|
||||
if (options.reportTestlink && options.testlinkApiKey) {
|
||||
const testlinkReporter = new TestLinkReporter(
|
||||
options.testlinkUrl,
|
||||
options.testlinkApiKey
|
||||
)
|
||||
// Would need plan ID and build ID
|
||||
// await testlinkReporter.reportResults(reports, planId, buildId)
|
||||
console.log('\nTestLink reporting not yet implemented')
|
||||
}
|
||||
|
||||
// Exit with appropriate code
|
||||
const failed = reports.filter(r => !r.pass).length
|
||||
process.exit(failed > 0 ? 1 : 0)
|
||||
})
|
||||
|
||||
program
|
||||
.command('list')
|
||||
.description('List all test cases')
|
||||
.option('--testcases-dir <dir>', 'Test cases directory', defaultTestcasesDir)
|
||||
.action(async (options) => {
|
||||
const loader = new TestLoader(options.testcasesDir)
|
||||
const testCases = await loader.loadAll()
|
||||
|
||||
const grouped = loader.groupBySuite(testCases)
|
||||
|
||||
console.log('Available Test Cases:\n')
|
||||
for (const [suite, cases] of grouped) {
|
||||
console.log(`${suite.toUpperCase()}:`)
|
||||
for (const tc of cases) {
|
||||
console.log(` ${tc.id}: ${tc.name}`)
|
||||
}
|
||||
console.log()
|
||||
}
|
||||
|
||||
console.log(`Total: ${testCases.length} test cases`)
|
||||
})
|
||||
|
||||
program.parse()
|
||||
119
tests/src/executor.ts
Normal file
119
tests/src/executor.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import { exec } from 'child_process'
|
||||
import { promisify } from 'util'
|
||||
import { TestCase, TestResult, StepResult } from './types.js'
|
||||
|
||||
const execAsync = promisify(exec)
|
||||
|
||||
export class TestExecutor {
|
||||
private workingDir: string
|
||||
|
||||
constructor(workingDir: string = process.cwd()) {
|
||||
this.workingDir = workingDir
|
||||
}
|
||||
|
||||
async executeStep(command: string, timeout: number): Promise<StepResult> {
|
||||
const startTime = Date.now()
|
||||
let stdout = ''
|
||||
let stderr = ''
|
||||
let exitCode = 0
|
||||
|
||||
try {
|
||||
const result = await execAsync(command, {
|
||||
cwd: this.workingDir,
|
||||
timeout,
|
||||
maxBuffer: 10 * 1024 * 1024, // 10MB buffer
|
||||
shell: '/bin/bash'
|
||||
})
|
||||
stdout = result.stdout
|
||||
stderr = result.stderr
|
||||
} catch (error: any) {
|
||||
stdout = error.stdout || ''
|
||||
stderr = error.stderr || error.message || 'Unknown error'
|
||||
exitCode = error.code || 1
|
||||
}
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
|
||||
return {
|
||||
name: '',
|
||||
command,
|
||||
stdout,
|
||||
stderr,
|
||||
exitCode,
|
||||
duration
|
||||
}
|
||||
}
|
||||
|
||||
async executeTestCase(testCase: TestCase): Promise<TestResult> {
|
||||
const startTime = Date.now()
|
||||
const stepResults: StepResult[] = []
|
||||
|
||||
console.log(` Executing: ${testCase.id} - ${testCase.name}`)
|
||||
|
||||
for (const step of testCase.steps) {
|
||||
console.log(` Step: ${step.name}`)
|
||||
|
||||
const timeout = step.timeout || testCase.timeout
|
||||
const result = await this.executeStep(step.command, timeout)
|
||||
result.name = step.name
|
||||
|
||||
stepResults.push(result)
|
||||
|
||||
// Log step result
|
||||
if (result.exitCode === 0) {
|
||||
console.log(` Exit: ${result.exitCode} (${result.duration}ms)`)
|
||||
} else {
|
||||
console.log(` Exit: ${result.exitCode} (FAILED, ${result.duration}ms)`)
|
||||
}
|
||||
}
|
||||
|
||||
const totalDuration = Date.now() - startTime
|
||||
|
||||
// Combine all logs
|
||||
const logs = stepResults.map(r => {
|
||||
return `=== Step: ${r.name} ===
|
||||
Command: ${r.command}
|
||||
Exit Code: ${r.exitCode}
|
||||
Duration: ${r.duration}ms
|
||||
|
||||
STDOUT:
|
||||
${r.stdout || '(empty)'}
|
||||
|
||||
STDERR:
|
||||
${r.stderr || '(empty)'}
|
||||
`
|
||||
}).join('\n' + '='.repeat(50) + '\n')
|
||||
|
||||
return {
|
||||
testCase,
|
||||
steps: stepResults,
|
||||
totalDuration,
|
||||
logs
|
||||
}
|
||||
}
|
||||
|
||||
async executeAll(testCases: TestCase[], concurrency: number = 1): Promise<TestResult[]> {
|
||||
const results: TestResult[] = []
|
||||
|
||||
if (concurrency === 1) {
|
||||
// Sequential execution
|
||||
for (const tc of testCases) {
|
||||
const result = await this.executeTestCase(tc)
|
||||
results.push(result)
|
||||
}
|
||||
} else {
|
||||
// Parallel execution with p-limit
|
||||
const pLimit = (await import('p-limit')).default
|
||||
const limit = pLimit(concurrency)
|
||||
|
||||
const promises = testCases.map(tc =>
|
||||
limit(() => this.executeTestCase(tc))
|
||||
)
|
||||
|
||||
const parallelResults = await Promise.all(promises)
|
||||
results.push(...parallelResults)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
}
|
||||
146
tests/src/judge.ts
Normal file
146
tests/src/judge.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
import axios from 'axios'
|
||||
import { TestResult, Judgment } from './types.js'
|
||||
|
||||
export class LLMJudge {
|
||||
private ollamaUrl: string
|
||||
private model: string
|
||||
private batchSize: number
|
||||
|
||||
constructor(ollamaUrl: string = 'http://localhost:11434', model: string = 'gemma3:4b') {
|
||||
this.ollamaUrl = ollamaUrl
|
||||
this.model = model
|
||||
this.batchSize = 5 // Judge 5 tests per LLM call
|
||||
}
|
||||
|
||||
private buildPrompt(results: TestResult[]): string {
|
||||
const testsSection = results.map((r, i) => {
|
||||
return `
|
||||
### Test ${i + 1}: ${r.testCase.id} - ${r.testCase.name}
|
||||
|
||||
**Criteria:**
|
||||
${r.testCase.criteria}
|
||||
|
||||
**Execution Logs:**
|
||||
\`\`\`
|
||||
${r.logs.substring(0, 3000)}${r.logs.length > 3000 ? '\n... (truncated)' : ''}
|
||||
\`\`\`
|
||||
`
|
||||
}).join('\n---\n')
|
||||
|
||||
return `You are a test evaluation judge. Analyze the following test results and determine if each test passed or failed based on the criteria provided.
|
||||
|
||||
For each test, examine:
|
||||
1. The expected criteria
|
||||
2. The actual execution logs (stdout, stderr, exit codes)
|
||||
3. Whether the output meets the criteria (use fuzzy matching for AI outputs)
|
||||
|
||||
${testsSection}
|
||||
|
||||
Respond with a JSON array containing one object per test:
|
||||
[
|
||||
{"testId": "TC-XXX-001", "pass": true, "reason": "Brief explanation"},
|
||||
{"testId": "TC-XXX-002", "pass": false, "reason": "Brief explanation"}
|
||||
]
|
||||
|
||||
Important:
|
||||
- For AI-generated text, accept reasonable variations (e.g., "4", "four", "The answer is 4" are all valid for math questions)
|
||||
- For build/runtime tests, check exit codes and absence of error messages
|
||||
- Be lenient with formatting differences, focus on semantic correctness
|
||||
|
||||
Respond ONLY with the JSON array, no other text.`
|
||||
}
|
||||
|
||||
async judgeResults(results: TestResult[]): Promise<Judgment[]> {
|
||||
const allJudgments: Judgment[] = []
|
||||
|
||||
// Process in batches
|
||||
for (let i = 0; i < results.length; i += this.batchSize) {
|
||||
const batch = results.slice(i, i + this.batchSize)
|
||||
console.log(` Judging batch ${Math.floor(i / this.batchSize) + 1}/${Math.ceil(results.length / this.batchSize)}...`)
|
||||
|
||||
try {
|
||||
const judgments = await this.judgeBatch(batch)
|
||||
allJudgments.push(...judgments)
|
||||
} catch (error) {
|
||||
console.error(` Failed to judge batch:`, error)
|
||||
// Mark all tests in batch as failed
|
||||
for (const r of batch) {
|
||||
allJudgments.push({
|
||||
testId: r.testCase.id,
|
||||
pass: false,
|
||||
reason: 'LLM judgment failed: ' + String(error)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allJudgments
|
||||
}
|
||||
|
||||
private async judgeBatch(results: TestResult[]): Promise<Judgment[]> {
|
||||
const prompt = this.buildPrompt(results)
|
||||
|
||||
const response = await axios.post(`${this.ollamaUrl}/api/generate`, {
|
||||
model: this.model,
|
||||
prompt,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: 0.1, // Low temperature for consistent judging
|
||||
num_predict: 1000
|
||||
}
|
||||
}, {
|
||||
timeout: 120000 // 2 minute timeout
|
||||
})
|
||||
|
||||
const responseText = response.data.response
|
||||
|
||||
// Extract JSON from response
|
||||
const jsonMatch = responseText.match(/\[[\s\S]*\]/)
|
||||
if (!jsonMatch) {
|
||||
throw new Error('No JSON array found in LLM response')
|
||||
}
|
||||
|
||||
try {
|
||||
const judgments = JSON.parse(jsonMatch[0]) as Judgment[]
|
||||
|
||||
// Validate and fill missing
|
||||
const resultIds = results.map(r => r.testCase.id)
|
||||
const judgedIds = new Set(judgments.map(j => j.testId))
|
||||
|
||||
// Add missing judgments
|
||||
for (const id of resultIds) {
|
||||
if (!judgedIds.has(id)) {
|
||||
judgments.push({
|
||||
testId: id,
|
||||
pass: false,
|
||||
reason: 'No judgment provided by LLM'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return judgments
|
||||
} catch (parseError) {
|
||||
throw new Error(`Failed to parse LLM response: ${responseText.substring(0, 200)}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: Simple rule-based judgment (no LLM)
|
||||
simpleJudge(result: TestResult): Judgment {
|
||||
const allStepsPassed = result.steps.every(s => s.exitCode === 0)
|
||||
|
||||
if (allStepsPassed) {
|
||||
return {
|
||||
testId: result.testCase.id,
|
||||
pass: true,
|
||||
reason: 'All steps completed with exit code 0'
|
||||
}
|
||||
} else {
|
||||
const failedSteps = result.steps.filter(s => s.exitCode !== 0)
|
||||
return {
|
||||
testId: result.testCase.id,
|
||||
pass: false,
|
||||
reason: `Steps failed: ${failedSteps.map(s => s.name).join(', ')}`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
91
tests/src/loader.ts
Normal file
91
tests/src/loader.ts
Normal file
@@ -0,0 +1,91 @@
|
||||
import { readFileSync } from 'fs'
|
||||
import { glob } from 'glob'
|
||||
import yaml from 'js-yaml'
|
||||
import path from 'path'
|
||||
import { TestCase } from './types.js'
|
||||
|
||||
export class TestLoader {
|
||||
private testcasesDir: string
|
||||
|
||||
constructor(testcasesDir: string = './testcases') {
|
||||
this.testcasesDir = testcasesDir
|
||||
}
|
||||
|
||||
async loadAll(): Promise<TestCase[]> {
|
||||
const pattern = path.join(this.testcasesDir, '**/*.yml')
|
||||
const files = await glob(pattern)
|
||||
|
||||
const testCases: TestCase[] = []
|
||||
|
||||
for (const file of files) {
|
||||
try {
|
||||
const content = readFileSync(file, 'utf-8')
|
||||
const testCase = yaml.load(content) as TestCase
|
||||
|
||||
// Set defaults
|
||||
testCase.timeout = testCase.timeout || 60000
|
||||
testCase.dependencies = testCase.dependencies || []
|
||||
testCase.priority = testCase.priority || 1
|
||||
|
||||
testCases.push(testCase)
|
||||
} catch (error) {
|
||||
console.error(`Failed to load ${file}:`, error)
|
||||
}
|
||||
}
|
||||
|
||||
return testCases
|
||||
}
|
||||
|
||||
async loadBySuite(suite: string): Promise<TestCase[]> {
|
||||
const all = await this.loadAll()
|
||||
return all.filter(tc => tc.suite === suite)
|
||||
}
|
||||
|
||||
async loadById(id: string): Promise<TestCase | undefined> {
|
||||
const all = await this.loadAll()
|
||||
return all.find(tc => tc.id === id)
|
||||
}
|
||||
|
||||
// Sort test cases by dependencies (topological sort)
|
||||
sortByDependencies(testCases: TestCase[]): TestCase[] {
|
||||
const sorted: TestCase[] = []
|
||||
const visited = new Set<string>()
|
||||
const idMap = new Map(testCases.map(tc => [tc.id, tc]))
|
||||
|
||||
const visit = (tc: TestCase) => {
|
||||
if (visited.has(tc.id)) return
|
||||
visited.add(tc.id)
|
||||
|
||||
// Visit dependencies first
|
||||
for (const depId of tc.dependencies) {
|
||||
const dep = idMap.get(depId)
|
||||
if (dep) visit(dep)
|
||||
}
|
||||
|
||||
sorted.push(tc)
|
||||
}
|
||||
|
||||
// Sort by priority first, then by dependencies
|
||||
const byPriority = [...testCases].sort((a, b) => a.priority - b.priority)
|
||||
for (const tc of byPriority) {
|
||||
visit(tc)
|
||||
}
|
||||
|
||||
return sorted
|
||||
}
|
||||
|
||||
// Group test cases by suite for parallel execution
|
||||
groupBySuite(testCases: TestCase[]): Map<string, TestCase[]> {
|
||||
const groups = new Map<string, TestCase[]>()
|
||||
|
||||
for (const tc of testCases) {
|
||||
const suite = tc.suite
|
||||
if (!groups.has(suite)) {
|
||||
groups.set(suite, [])
|
||||
}
|
||||
groups.get(suite)!.push(tc)
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
}
|
||||
138
tests/src/reporter.ts
Normal file
138
tests/src/reporter.ts
Normal file
@@ -0,0 +1,138 @@
|
||||
import axios from 'axios'
|
||||
import { TestReport, Judgment, TestResult } from './types.js'
|
||||
|
||||
export class Reporter {
|
||||
// Console reporter
|
||||
static toConsole(reports: TestReport[]): void {
|
||||
console.log('\n' + '='.repeat(60))
|
||||
console.log('TEST RESULTS')
|
||||
console.log('='.repeat(60))
|
||||
|
||||
const passed = reports.filter(r => r.pass)
|
||||
const failed = reports.filter(r => !r.pass)
|
||||
|
||||
for (const report of reports) {
|
||||
const status = report.pass ? '\x1b[32mPASS\x1b[0m' : '\x1b[31mFAIL\x1b[0m'
|
||||
console.log(`[${status}] ${report.testId}: ${report.name}`)
|
||||
console.log(` Reason: ${report.reason}`)
|
||||
console.log(` Duration: ${report.duration}ms`)
|
||||
}
|
||||
|
||||
console.log('\n' + '-'.repeat(60))
|
||||
console.log(`Total: ${reports.length} | Passed: ${passed.length} | Failed: ${failed.length}`)
|
||||
console.log('='.repeat(60))
|
||||
}
|
||||
|
||||
// JSON reporter
|
||||
static toJSON(reports: TestReport[]): string {
|
||||
return JSON.stringify({
|
||||
summary: {
|
||||
total: reports.length,
|
||||
passed: reports.filter(r => r.pass).length,
|
||||
failed: reports.filter(r => !r.pass).length,
|
||||
timestamp: new Date().toISOString()
|
||||
},
|
||||
results: reports
|
||||
}, null, 2)
|
||||
}
|
||||
|
||||
// JUnit XML reporter (for CI/CD integration)
|
||||
static toJUnit(reports: TestReport[]): string {
|
||||
const escapeXml = (s: string) => s
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''')
|
||||
|
||||
const testcases = reports.map(r => {
|
||||
if (r.pass) {
|
||||
return ` <testcase name="${escapeXml(r.testId)}: ${escapeXml(r.name)}" classname="${r.suite}" time="${r.duration / 1000}"/>`
|
||||
} else {
|
||||
return ` <testcase name="${escapeXml(r.testId)}: ${escapeXml(r.name)}" classname="${r.suite}" time="${r.duration / 1000}">
|
||||
<failure message="${escapeXml(r.reason)}">${escapeXml(r.logs.substring(0, 1000))}</failure>
|
||||
</testcase>`
|
||||
}
|
||||
}).join('\n')
|
||||
|
||||
const failures = reports.filter(r => !r.pass).length
|
||||
const time = reports.reduce((sum, r) => sum + r.duration, 0) / 1000
|
||||
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuite name="ollama37-tests" tests="${reports.length}" failures="${failures}" time="${time}">
|
||||
${testcases}
|
||||
</testsuite>`
|
||||
}
|
||||
|
||||
// Combine results and judgments into reports
|
||||
static createReports(results: TestResult[], judgments: Judgment[]): TestReport[] {
|
||||
const judgmentMap = new Map(judgments.map(j => [j.testId, j]))
|
||||
|
||||
return results.map(result => {
|
||||
const judgment = judgmentMap.get(result.testCase.id)
|
||||
|
||||
return {
|
||||
testId: result.testCase.id,
|
||||
name: result.testCase.name,
|
||||
suite: result.testCase.suite,
|
||||
pass: judgment?.pass ?? false,
|
||||
reason: judgment?.reason ?? 'No judgment',
|
||||
duration: result.totalDuration,
|
||||
logs: result.logs
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestLink reporter
|
||||
export class TestLinkReporter {
|
||||
private url: string
|
||||
private apiKey: string
|
||||
|
||||
constructor(url: string, apiKey: string) {
|
||||
this.url = url
|
||||
this.apiKey = apiKey
|
||||
}
|
||||
|
||||
async reportResults(
|
||||
reports: TestReport[],
|
||||
planId: string,
|
||||
buildId: string
|
||||
): Promise<void> {
|
||||
console.log('\nReporting to TestLink...')
|
||||
|
||||
for (const report of reports) {
|
||||
try {
|
||||
await this.reportTestExecution(report, planId, buildId)
|
||||
console.log(` Reported: ${report.testId}`)
|
||||
} catch (error) {
|
||||
console.error(` Failed to report ${report.testId}:`, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async reportTestExecution(
|
||||
report: TestReport,
|
||||
planId: string,
|
||||
buildId: string
|
||||
): Promise<void> {
|
||||
// Extract numeric test case ID from external ID (e.g., "ollama37-8" -> need internal ID)
|
||||
// This would need to be mapped from TestLink
|
||||
|
||||
const status = report.pass ? 'p' : 'f' // p=passed, f=failed, b=blocked
|
||||
|
||||
// Note: This uses the TestLink XML-RPC API
|
||||
// In practice, you'd use the testlink-mcp or direct API calls
|
||||
const payload = {
|
||||
devKey: this.apiKey,
|
||||
testcaseexternalid: report.testId,
|
||||
testplanid: planId,
|
||||
buildid: buildId,
|
||||
status,
|
||||
notes: `${report.reason}\n\nDuration: ${report.duration}ms\n\nLogs:\n${report.logs.substring(0, 4000)}`
|
||||
}
|
||||
|
||||
// For now, just log - actual implementation would call TestLink API
|
||||
console.log(` Would report: ${report.testId} = ${status}`)
|
||||
}
|
||||
}
|
||||
66
tests/src/types.ts
Normal file
66
tests/src/types.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
// Test case definition
|
||||
export interface TestStep {
|
||||
name: string
|
||||
command: string
|
||||
timeout?: number
|
||||
}
|
||||
|
||||
export interface TestCase {
|
||||
id: string
|
||||
name: string
|
||||
suite: string
|
||||
priority: number
|
||||
timeout: number
|
||||
dependencies: string[]
|
||||
steps: TestStep[]
|
||||
criteria: string
|
||||
}
|
||||
|
||||
// Execution results
|
||||
export interface StepResult {
|
||||
name: string
|
||||
command: string
|
||||
stdout: string
|
||||
stderr: string
|
||||
exitCode: number
|
||||
duration: number
|
||||
}
|
||||
|
||||
export interface TestResult {
|
||||
testCase: TestCase
|
||||
steps: StepResult[]
|
||||
totalDuration: number
|
||||
logs: string
|
||||
}
|
||||
|
||||
// LLM judgment
|
||||
export interface Judgment {
|
||||
testId: string
|
||||
pass: boolean
|
||||
reason: string
|
||||
}
|
||||
|
||||
// Final report
|
||||
export interface TestReport {
|
||||
testId: string
|
||||
name: string
|
||||
suite: string
|
||||
pass: boolean
|
||||
reason: string
|
||||
duration: number
|
||||
logs: string
|
||||
}
|
||||
|
||||
// Runner options
|
||||
export interface RunnerOptions {
|
||||
suite?: string
|
||||
id?: string
|
||||
workers: number
|
||||
dryRun: boolean
|
||||
output: 'console' | 'json' | 'junit'
|
||||
reportTestlink: boolean
|
||||
ollamaUrl: string
|
||||
ollamaModel: string
|
||||
testlinkUrl: string
|
||||
testlinkApiKey: string
|
||||
}
|
||||
Reference in New Issue
Block a user