Files
ollama37/cmd/test-runner/config.go
Shang Chieh Tseng d59284d30a Implement Go-based test runner framework for Tesla K80 testing
Add comprehensive test orchestration framework:

Test Runner (cmd/test-runner/):
- config.go: YAML configuration loading and validation
- server.go: Ollama server lifecycle management (start/stop/health checks)
- monitor.go: Real-time log monitoring with pattern matching
- test.go: Model testing via Ollama API (pull, chat, validation)
- validate.go: Test result validation (GPU usage, response quality, log analysis)
- report.go: Structured reporting (JSON and Markdown formats)
- main.go: CLI interface with run/validate/list commands

Test Configurations (test/config/):
- models.yaml: Full test suite with quick/full/stress profiles
- quick.yaml: Fast smoke test with gemma2:2b

Updated Workflow:
- tesla-k80-tests.yml: Use test-runner instead of shell scripts
- Run quick tests first, then full tests if passing
- Generate structured JSON reports for pass/fail checking
- Upload test results as artifacts

Features:
- Multi-model testing with configurable profiles
- API-based testing (not CLI commands)
- Real-time log monitoring for GPU events and errors
- Automatic validation of GPU loading and response quality
- Structured JSON and Markdown reports
- Graceful server lifecycle management
- Interrupt handling (Ctrl+C cleanup)

Addresses limitations of shell-based testing by providing:
- Better error handling and reporting
- Programmatic test orchestration
- Reusable test framework
- Clear pass/fail criteria
- Detailed test metrics and timing
2025-10-30 11:04:48 +08:00

155 lines
4.3 KiB
Go

package main
import (
"fmt"
"os"
"time"
"gopkg.in/yaml.v3"
)
// Config represents the complete test configuration
type Config struct {
Profiles map[string]Profile `yaml:"profiles"`
Validation Validation `yaml:"validation"`
Server ServerConfig `yaml:"server"`
Reporting ReportingConfig `yaml:"reporting"`
}
// Profile represents a test profile with multiple models
type Profile struct {
Timeout time.Duration `yaml:"timeout"`
Models []ModelTest `yaml:"models"`
}
// ModelTest represents a single model test configuration
type ModelTest struct {
Name string `yaml:"name"`
Prompts []string `yaml:"prompts"`
MinResponseTokens int `yaml:"min_response_tokens"`
MaxResponseTokens int `yaml:"max_response_tokens"`
Timeout time.Duration `yaml:"timeout"`
}
// Validation represents validation rules
type Validation struct {
GPURequired bool `yaml:"gpu_required"`
SingleGPUPreferred bool `yaml:"single_gpu_preferred"`
CheckPatterns CheckPatterns `yaml:"check_patterns"`
}
// CheckPatterns defines log patterns to match
type CheckPatterns struct {
Success []string `yaml:"success"`
Failure []string `yaml:"failure"`
Warning []string `yaml:"warning"`
}
// ServerConfig represents server configuration
type ServerConfig struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
StartupTimeout time.Duration `yaml:"startup_timeout"`
HealthCheckInterval time.Duration `yaml:"health_check_interval"`
HealthCheckEndpoint string `yaml:"health_check_endpoint"`
}
// ReportingConfig represents reporting configuration
type ReportingConfig struct {
Formats []string `yaml:"formats"`
IncludeLogs bool `yaml:"include_logs"`
LogExcerptLines int `yaml:"log_excerpt_lines"`
}
// LoadConfig loads and validates a test configuration from a YAML file
func LoadConfig(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read config file: %w", err)
}
var config Config
if err := yaml.Unmarshal(data, &config); err != nil {
return nil, fmt.Errorf("failed to parse config YAML: %w", err)
}
// Set defaults
if config.Server.Host == "" {
config.Server.Host = "localhost"
}
if config.Server.Port == 0 {
config.Server.Port = 11434
}
if config.Server.StartupTimeout == 0 {
config.Server.StartupTimeout = 30 * time.Second
}
if config.Server.HealthCheckInterval == 0 {
config.Server.HealthCheckInterval = 1 * time.Second
}
if config.Server.HealthCheckEndpoint == "" {
config.Server.HealthCheckEndpoint = "/api/tags"
}
if config.Reporting.LogExcerptLines == 0 {
config.Reporting.LogExcerptLines = 50
}
if len(config.Reporting.Formats) == 0 {
config.Reporting.Formats = []string{"json"}
}
// Validate config
if err := validateConfig(&config); err != nil {
return nil, fmt.Errorf("invalid config: %w", err)
}
return &config, nil
}
// validateConfig validates the loaded configuration
func validateConfig(config *Config) error {
if len(config.Profiles) == 0 {
return fmt.Errorf("no profiles defined in config")
}
for profileName, profile := range config.Profiles {
if len(profile.Models) == 0 {
return fmt.Errorf("profile %q has no models defined", profileName)
}
for i, model := range profile.Models {
if model.Name == "" {
return fmt.Errorf("profile %q model %d has no name", profileName, i)
}
if len(model.Prompts) == 0 {
return fmt.Errorf("profile %q model %q has no prompts", profileName, model.Name)
}
if model.Timeout == 0 {
return fmt.Errorf("profile %q model %q has no timeout", profileName, model.Name)
}
}
if profile.Timeout == 0 {
return fmt.Errorf("profile %q has no timeout", profileName)
}
}
return nil
}
// GetProfile returns a specific profile by name
func (c *Config) GetProfile(name string) (*Profile, error) {
profile, ok := c.Profiles[name]
if !ok {
return nil, fmt.Errorf("profile %q not found", name)
}
return &profile, nil
}
// ListProfiles returns a list of all profile names
func (c *Config) ListProfiles() []string {
profiles := make([]string, 0, len(c.Profiles))
for name := range c.Profiles {
profiles = append(profiles, name)
}
return profiles
}