From 32737ee6d647e62161891c1fb2ff683e54c7f103 Mon Sep 17 00:00:00 2001 From: Mikhail Kornilovich Date: Sun, 5 Apr 2026 18:20:42 +0300 Subject: [PATCH] init: --- .gitignore | 2 + cmd/cli/main.go | 112 ++++ dsl/ast.go | 104 ++++ dsl/lexer.go | 387 +++++++++++++ dsl/parser.go | 687 +++++++++++++++++++++++ dsl/parser_test.go | 217 +++++++ example/c-sum/.gitea/workflows/judge.yml | 148 +++++ example/c-sum/README.md | 49 ++ example/c-sum/solution.c | 16 + example/c-sum/sum.jdg | 75 +++ example/lab1.jdg | 60 ++ example/solution/main.go | 34 ++ go.mod | 3 + reporter/reporter.go | 101 ++++ runner/expander.go | 119 ++++ runner/matcher.go | 134 +++++ runner/result.go | 66 +++ runner/runner.go | 405 +++++++++++++ 18 files changed, 2719 insertions(+) create mode 100644 .gitignore create mode 100644 cmd/cli/main.go create mode 100644 dsl/ast.go create mode 100644 dsl/lexer.go create mode 100644 dsl/parser.go create mode 100644 dsl/parser_test.go create mode 100644 example/c-sum/.gitea/workflows/judge.yml create mode 100644 example/c-sum/README.md create mode 100644 example/c-sum/solution.c create mode 100644 example/c-sum/sum.jdg create mode 100644 example/lab1.jdg create mode 100644 example/solution/main.go create mode 100644 go.mod create mode 100644 reporter/reporter.go create mode 100644 runner/expander.go create mode 100644 runner/matcher.go create mode 100644 runner/result.go create mode 100644 runner/runner.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..36cd02e --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +example/c-sum/solution +example/solution/solution \ No newline at end of file diff --git a/cmd/cli/main.go b/cmd/cli/main.go new file mode 100644 index 0000000..d3ee1fd --- /dev/null +++ b/cmd/cli/main.go @@ -0,0 +1,112 @@ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/Mond1c/judge/dsl" + "github.com/Mond1c/judge/reporter" + "github.com/Mond1c/judge/runner" +) + +const usage = `judge — CI/CD testing system for student solutions + +Usage: + judge [flags] + +Flags: + --json output as JSON instead of text + --wrapper exec wrapper (e.g. "valgrind --error-exitcode=99") + --binary name of executable produced by build (overrides .jdg) + --help show help + +Example: + judge lab1.jdg ./student-solution + judge lab1.jdg ./student-solution --json + judge lab1.jdg ./student-solution --wrapper "valgrind --error-exitcode=99" +` + +func main() { + args := os.Args[1:] + + if len(args) == 0 || contains(args, "--help") || contains(args, "-h") { + fmt.Print(usage) + os.Exit(0) + } + + if len(args) < 2 { + fmt.Fprintf(os.Stderr, "error: need and \n\n%s", usage) + os.Exit(1) + } + + testFile := args[0] + solutionDir := args[1] + jsonOutput := contains(args, "--json") + wrapper := flagValue(args, "--wrapper") + binary := flagValue(args, "--binary") + + src, err := os.ReadFile(testFile) + if err != nil { + fatalf("cannot read %q: %v", testFile, err) + } + + f, warns, err := dsl.Parse(string(src)) + if err != nil { + fatalf("parse error in %q:\n %v", testFile, err) + } + for _, w := range warns { + fmt.Fprintf(os.Stderr, "warning: %s\n", w) + } + + if _, err := os.Stat(solutionDir); err != nil { + fatalf("solution dir %q not found: %v", solutionDir, err) + } + + r := runner.New(f, runner.Config{ + WorkDir: solutionDir, + BinaryName: binary, + Wrapper: wrapper, + }) + result := r.Run() + + if jsonOutput { + if err := reporter.JSON(os.Stdout, result); err != nil { + fatalf("json output error: %v", err) + } + } else { + reporter.Text(os.Stdout, result) + } + + if result.TotalScore < 0.9999 { + os.Exit(1) + } +} + +func fatalf(msg string, args ...any) { + fmt.Fprintf(os.Stderr, "error: "+msg+"\n", args...) + os.Exit(1) +} + +// flagValue returns the value of --name or --name=value, else "". +func flagValue(args []string, name string) string { + prefix := name + "=" + for i, a := range args { + if a == name && i+1 < len(args) { + return args[i+1] + } + if strings.HasPrefix(a, prefix) { + return a[len(prefix):] + } + } + return "" +} + +func contains(slice []string, s string) bool { + for _, v := range slice { + if v == s { + return true + } + } + return false +} diff --git a/dsl/ast.go b/dsl/ast.go new file mode 100644 index 0000000..a877c13 --- /dev/null +++ b/dsl/ast.go @@ -0,0 +1,104 @@ +package dsl + +import "time" + +type File struct { + Build string + BuildLinux string + BuildWindows string + BuildDarwin string + Timeout time.Duration + Binary string // executable name produced by build (default: solution) + + NormalizeCRLF bool // strip \r before matching stdout/stderr/outFiles + TrimTrailingWS bool // trim trailing whitespace on each line before matching + + Groups []*Group +} + +type Group struct { + Name string + Weight float64 + Timeout time.Duration + Env map[string]string + Scoring ScoringMode + Wrapper string // exec wrapper command (e.g., "valgrind --error-exitcode=1") + + Tests []*Test + Pattern *Pattern +} + +type ScoringMode int + +const ( + ScoringPartial ScoringMode = iota // weight * passed/total (default) + ScoringAllOrNone // weight or 0 +) + +type Pattern struct { + InputGlob string + OutputGlob string + + DirsGlob string + InputFile string + OutputFile string +} + +func (p *Pattern) IsDirMode() bool { + return p.DirsGlob != "" +} + +type Test struct { + Name string + Timeout time.Duration + Env map[string]string + Wrapper string + + Stdin *string + Args []string + InFiles map[string]string + + ExitCode *int + Stdout Matcher + Stderr Matcher + OutFiles map[string]string +} + +type Matcher interface { + matcherNode() +} + +type ExactMatcher struct { + Value string +} + +func (ExactMatcher) matcherNode() {} + +type ContainsMatcher struct { + Substr string +} + +func (ContainsMatcher) matcherNode() {} + +type RegexMatcher struct { + Pattern string +} + +func (RegexMatcher) matcherNode() {} + +type NumericEpsMatcher struct { + Epsilon float64 + Value string +} + +func (NumericEpsMatcher) matcherNode() {} + +type AnyOrderMatcher struct { + Lines []string +} + +func (AnyOrderMatcher) matcherNode() {} + +type NoMatcher struct{} + +func (NoMatcher) matcherNode() {} diff --git a/dsl/lexer.go b/dsl/lexer.go new file mode 100644 index 0000000..da1b12d --- /dev/null +++ b/dsl/lexer.go @@ -0,0 +1,387 @@ +package dsl + +import ( + "fmt" + "strings" + "unicode" +) + +type TokenType int + +const ( + TOKEN_STRING TokenType = iota + TOKEN_IDENT + TOKEN_FLOAT + TOKEN_INT + TOKEN_DURATION + + TOKEN_LBRACE + TOKEN_RBRACE + TOKEN_LPAREN + TOKEN_RPAREN + TOKEN_ASSIGN + TOKEN_TILDE + + TOKEN_EOF +) + +func (t TokenType) String() string { + switch t { + case TOKEN_STRING: + return "STRING" + case TOKEN_IDENT: + return "IDENT" + case TOKEN_FLOAT: + return "FLOAT" + case TOKEN_INT: + return "INT" + case TOKEN_DURATION: + return "DURATION" + case TOKEN_LBRACE: + return "{" + case TOKEN_RBRACE: + return "}" + case TOKEN_LPAREN: + return "(" + case TOKEN_RPAREN: + return ")" + case TOKEN_ASSIGN: + return "=" + case TOKEN_TILDE: + return "~" + case TOKEN_EOF: + return "EOF" + default: + return "UNKNOWN" + } +} + +type Token struct { + Type TokenType + Value string + Line int + Col int +} + +func (t Token) String() string { + return fmt.Sprintf("Token(%s, %q, %d:%d)", t.Type, t.Value, t.Line, t.Col) +} + +type Lexer struct { + src []rune + pos int + line int + col int +} + +func NewLexer(src string) *Lexer { + return &Lexer{src: []rune(src), pos: 0, line: 1, col: 1} +} + +func (l *Lexer) peek() (rune, bool) { + if l.pos >= len(l.src) { + return 0, false + } + return l.src[l.pos], true +} + +func (l *Lexer) peekAt(offset int) (rune, bool) { + i := l.pos + offset + if i >= len(l.src) { + return 0, false + } + return l.src[i], true +} + +func (l *Lexer) advance() rune { + ch := l.src[l.pos] + l.pos++ + if ch == '\n' { + l.line++ + l.col = 1 + } else { + l.col++ + } + return ch +} + +func (l *Lexer) skipWhitespaceAndComments() { + for { + ch, ok := l.peek() + if !ok { + return + } + + if ch == '/' { + next, ok2 := l.peekAt(1) + if ok2 && next == '/' { + for { + c, ok := l.peek() + if !ok || c == '\n' { + break + } + l.advance() + } + continue + } + } + + if unicode.IsSpace(ch) { + l.advance() + continue + } + break + } +} + +func (l *Lexer) Tokenize() ([]Token, error) { + var tokens []Token + for { + l.skipWhitespaceAndComments() + ch, ok := l.peek() + if !ok { + tokens = append(tokens, Token{Type: TOKEN_EOF, Line: l.line, Col: l.col}) + break + } + + line, col := l.line, l.col + + switch { + case ch == '{': + l.advance() + tokens = append(tokens, Token{TOKEN_LBRACE, "{", line, col}) + case ch == '}': + l.advance() + tokens = append(tokens, Token{TOKEN_RBRACE, "}", line, col}) + case ch == '(': + l.advance() + tokens = append(tokens, Token{TOKEN_LPAREN, "(", line, col}) + case ch == ')': + l.advance() + tokens = append(tokens, Token{TOKEN_RPAREN, ")", line, col}) + case ch == '=': + l.advance() + tokens = append(tokens, Token{TOKEN_ASSIGN, "=", line, col}) + case ch == '~': + l.advance() + tokens = append(tokens, Token{TOKEN_TILDE, "~", line, col}) + + case ch == '"': + // проверяем heredoc """ + if l.isHeredocStart() { + s, err := l.readHeredoc() + if err != nil { + return nil, err + } + tokens = append(tokens, Token{TOKEN_STRING, s, line, col}) + } else { + s, err := l.readString() + if err != nil { + return nil, err + } + tokens = append(tokens, Token{TOKEN_STRING, s, line, col}) + } + + case unicode.IsDigit(ch) || (ch == '-' && l.isNumberNext()): + tok, err := l.readNumberOrDuration(line, col) + if err != nil { + return nil, err + } + tokens = append(tokens, tok) + + case unicode.IsLetter(ch) || ch == '_': + ident := l.readIdent() + tokens = append(tokens, Token{TOKEN_IDENT, ident, line, col}) + + default: + return nil, fmt.Errorf("%d:%d: unexpected character %q", line, col, ch) + } + } + + return tokens, nil +} + +func (l *Lexer) isHeredocStart() bool { + a, ok1 := l.peekAt(0) + b, ok2 := l.peekAt(1) + c, ok3 := l.peekAt(2) + return ok1 && ok2 && ok3 && a == '"' && b == '"' && c == '"' +} + +func (l *Lexer) isNumberNext() bool { + next, ok := l.peekAt(1) + return ok && unicode.IsDigit(next) +} + +func (l *Lexer) readHeredoc() (string, error) { + l.advance() + l.advance() + l.advance() + var buf strings.Builder + for { + if l.pos+2 < len(l.src) && + l.src[l.pos] == '"' && + l.src[l.pos+1] == '"' && + l.src[l.pos+2] == '"' { + l.advance() + l.advance() + l.advance() + return dedentHeredoc(buf.String()), nil + } + ch, ok := l.peek() + if !ok { + return "", fmt.Errorf("unterminated heredoc") + } + buf.WriteRune(l.advance()) + _ = ch + } +} + +func dedentHeredoc(s string) string { + lines := strings.Split(s, "\n") + + if len(lines) > 0 && strings.TrimSpace(lines[0]) == "" { + lines = lines[1:] + } + + if len(lines) > 0 && strings.TrimSpace(lines[len(lines)-1]) == "" { + lines = lines[:len(lines)-1] + } + + minIndent := -1 + for _, line := range lines { + if strings.TrimSpace(line) == "" { + continue + } + indent := len(line) - len(strings.TrimLeft(line, " \t")) + if minIndent < 0 || indent < minIndent { + minIndent = indent + } + } + if minIndent < 0 { + minIndent = 0 + } + + var result strings.Builder + for i, line := range lines { + if len(line) >= minIndent { + result.WriteString(line[minIndent:]) + } else { + result.WriteString(line) + } + if i < len(lines)-1 { + result.WriteByte('\n') + } + } + return result.String() +} + +func (l *Lexer) readString() (string, error) { + l.advance() + var buf strings.Builder + for { + ch, ok := l.peek() + if !ok { + return "", fmt.Errorf("unterminated string at line %d", l.line) + } + if ch == '"' { + l.advance() + break + } + if ch == '\\' { + l.advance() + esc, ok := l.peek() + if !ok { + return "", fmt.Errorf("unterminated escape") + } + l.advance() + switch esc { + case 'n': + buf.WriteByte('\n') + case 't': + buf.WriteByte('\t') + case '\\': + buf.WriteByte('\\') + case '"': + buf.WriteByte('"') + default: + return "", fmt.Errorf("unknown escape \\%c", esc) + } + continue + } + buf.WriteRune(l.advance()) + } + return buf.String(), nil +} + +func (l *Lexer) readIdent() string { + var buf strings.Builder + for { + ch, ok := l.peek() + if !ok { + break + } + if unicode.IsLetter(ch) || unicode.IsDigit(ch) || ch == '_' { + buf.WriteRune(l.advance()) + } else { + break + } + } + return buf.String() +} + +func (l *Lexer) readNumberOrDuration(line, col int) (Token, error) { + var buf strings.Builder + isFloat := false + + if ch, _ := l.peek(); ch == '-' { + buf.WriteRune(l.advance()) + } + + for { + ch, ok := l.peek() + if !ok { + break + } + if unicode.IsDigit(ch) { + buf.WriteRune(l.advance()) + } else if ch == '.' && !isFloat { + isFloat = true + buf.WriteRune(l.advance()) + } else { + break + } + } + + suffix := l.tryReadDurationSuffix() + if suffix != "" { + return Token{TOKEN_DURATION, buf.String() + suffix, line, col}, nil + } + + if isFloat { + return Token{TOKEN_FLOAT, buf.String(), line, col}, nil + } + return Token{TOKEN_INT, buf.String(), line, col}, nil +} + +func (l *Lexer) tryReadDurationSuffix() string { + ch, ok := l.peek() + if !ok { + return "" + } + if ch == 'm' { + next, ok2 := l.peekAt(1) + if ok2 && next == 's' { + l.advance() + l.advance() + return "ms" + } + l.advance() + return "m" + } + if ch == 's' { + l.advance() + return "s" + } + return "" +} diff --git a/dsl/parser.go b/dsl/parser.go new file mode 100644 index 0000000..3e717ae --- /dev/null +++ b/dsl/parser.go @@ -0,0 +1,687 @@ +package dsl + +import ( + "fmt" + "math" + "strconv" + "time" +) + +type Parser struct { + tokens []Token + pos int + warns []string +} + +func NewParser(tokens []Token) *Parser { + return &Parser{tokens: tokens} +} + +func (p *Parser) Warnings() []string { + return p.warns +} + +func (p *Parser) warn(msg string) { + p.warns = append(p.warns, msg) +} + +func (p *Parser) peek() Token { + if p.pos >= len(p.tokens) { + return Token{Type: TOKEN_EOF} + } + return p.tokens[p.pos] +} + +func (p *Parser) advance() Token { + t := p.peek() + if t.Type != TOKEN_EOF { + p.pos++ + } + return t +} + +func (p *Parser) expect(tt TokenType) (Token, error) { + t := p.peek() + if t.Type != tt { + return t, fmt.Errorf("%d:%d: expected %s, got %s (%q)", t.Line, t.Col, tt, t.Type, t.Value) + } + return p.advance(), nil +} + +func (p *Parser) expectIdent(val string) error { + t := p.peek() + if t.Type != TOKEN_IDENT || t.Value != val { + return fmt.Errorf("%d:%d: expected %q, got %q", t.Line, t.Col, val, t.Value) + } + p.advance() + return nil +} + +func (p *Parser) isIdent(val string) bool { + t := p.peek() + return t.Type == TOKEN_IDENT && t.Value == val +} + +func Parse(src string) (*File, []string, error) { + tokens, err := NewLexer(src).Tokenize() + if err != nil { + return nil, nil, err + } + parser := NewParser(tokens) + file, err := parser.parseFile() + if err != nil { + return nil, parser.Warnings(), err + } + return file, parser.Warnings(), nil +} + +func (p *Parser) parseFile() (*File, error) { + f := &File{} + + for p.peek().Type != TOKEN_EOF { + t := p.peek() + if t.Type != TOKEN_IDENT { + return nil, fmt.Errorf("%d:%d: unexpected token %q", t.Line, t.Col, t.Value) + } + + switch t.Value { + case "build": + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + f.Build = s.Value + + case "build_linux": + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + f.BuildLinux = s.Value + + case "build_windows": + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + f.BuildWindows = s.Value + + case "build_darwin": + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + f.BuildDarwin = s.Value + + case "binary": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + f.Binary = s.Value + + case "normalize_crlf": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + b, err := p.parseBool() + if err != nil { + return nil, err + } + f.NormalizeCRLF = b + + case "trim_trailing_ws": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + b, err := p.parseBool() + if err != nil { + return nil, err + } + f.TrimTrailingWS = b + + case "timeout": + p.advance() + d, err := p.parseDuration() + if err != nil { + return nil, err + } + f.Timeout = d + + case "group": + g, err := p.parseGroup(f.Timeout) + if err != nil { + return nil, err + } + f.Groups = append(f.Groups, g) + + default: + return nil, fmt.Errorf("%d:%d: unexpected keyword %q", t.Line, t.Col, t.Value) + } + } + + if err := p.validateWeights(f); err != nil { + return nil, err + } + + return f, nil +} + +func (p *Parser) validateWeights(f *File) error { + if len(f.Groups) == 0 { + return nil + } + sum := 0.0 + for _, g := range f.Groups { + sum += g.Weight + } + if math.Abs(sum-1.0) > 0.001 { + p.warn(fmt.Sprintf("group weights sum to %.4f, expected 1.0", sum)) + } + return nil +} + +func (p *Parser) parseGroup(defaultTimeout time.Duration) (*Group, error) { + if err := p.expectIdent("group"); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_LPAREN); err != nil { + return nil, err + } + name, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_RPAREN); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_LBRACE); err != nil { + return nil, err + } + + g := &Group{ + Name: name.Value, + Timeout: defaultTimeout, + Env: map[string]string{}, + Scoring: ScoringPartial, + } + + for !p.isRBrace() { + t := p.peek() + if t.Type != TOKEN_IDENT { + return nil, fmt.Errorf("%d:%d: unexpected token %q in group", t.Line, t.Col, t.Value) + } + + switch t.Value { + case "weight": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + w, err := p.parseFloat() + if err != nil { + return nil, err + } + g.Weight = w + + case "timeout": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + d, err := p.parseDuration() + if err != nil { + return nil, err + } + g.Timeout = d + + case "scoring": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + s, err := p.expect(TOKEN_IDENT) + if err != nil { + return nil, err + } + switch s.Value { + case "partial": + g.Scoring = ScoringPartial + case "all_or_none": + g.Scoring = ScoringAllOrNone + default: + return nil, fmt.Errorf("%d:%d: unknown scoring mode %q", s.Line, s.Col, s.Value) + } + + case "env": + p.advance() + if _, err := p.expect(TOKEN_LPAREN); err != nil { + return nil, err + } + key, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_RPAREN); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + val, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + g.Env[key.Value] = val.Value + + case "wrapper": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + g.Wrapper = s.Value + + case "test": + test, err := p.parseTest(g.Timeout) + if err != nil { + return nil, err + } + g.Tests = append(g.Tests, test) + + case "pattern": + pat, err := p.parsePattern() + if err != nil { + return nil, err + } + g.Pattern = pat + + default: + return nil, fmt.Errorf("%d:%d: unexpected keyword %q in group", t.Line, t.Col, t.Value) + } + } + + if _, err := p.expect(TOKEN_RBRACE); err != nil { + return nil, err + } + return g, nil +} + +func (p *Parser) parseTest(defaultTimeout time.Duration) (*Test, error) { + if err := p.expectIdent("test"); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_LPAREN); err != nil { + return nil, err + } + name, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_RPAREN); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_LBRACE); err != nil { + return nil, err + } + + zero := 0 + test := &Test{ + Name: name.Value, + Timeout: defaultTimeout, + Env: map[string]string{}, + InFiles: map[string]string{}, + OutFiles: map[string]string{}, + ExitCode: &zero, + Stdout: NoMatcher{}, + Stderr: NoMatcher{}, + } + + for !p.isRBrace() { + t := p.peek() + if t.Type != TOKEN_IDENT { + return nil, fmt.Errorf("%d:%d: unexpected token in test body", t.Line, t.Col) + } + + switch t.Value { + case "stdin": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + test.Stdin = &s.Value + + case "stdout": + p.advance() + m, err := p.parseMatcherOrAssign() + if err != nil { + return nil, err + } + test.Stdout = m + + case "stderr": + p.advance() + m, err := p.parseMatcherOrAssign() + if err != nil { + return nil, err + } + test.Stderr = m + + case "args": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + args, err := p.parseStringList() + if err != nil { + return nil, err + } + test.Args = args + + case "exitCode": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + n, err := p.parseInt() + if err != nil { + return nil, err + } + test.ExitCode = &n + + case "timeout": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + d, err := p.parseDuration() + if err != nil { + return nil, err + } + test.Timeout = d + + case "wrapper": + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + test.Wrapper = s.Value + + case "env": + p.advance() + if _, err := p.expect(TOKEN_LPAREN); err != nil { + return nil, err + } + key, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_RPAREN); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + val, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + test.Env[key.Value] = val.Value + + case "file": + p.advance() + if _, err := p.expect(TOKEN_LPAREN); err != nil { + return nil, err + } + fname, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_RPAREN); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + content, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + test.InFiles[fname.Value] = content.Value + + case "outFile": + p.advance() + if _, err := p.expect(TOKEN_LPAREN); err != nil { + return nil, err + } + fname, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_RPAREN); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + content, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + test.OutFiles[fname.Value] = content.Value + + default: + return nil, fmt.Errorf("%d:%d: unexpected keyword %q in test", t.Line, t.Col, t.Value) + } + } + + if _, err := p.expect(TOKEN_RBRACE); err != nil { + return nil, err + } + return test, nil +} + +func (p *Parser) parseMatcherOrAssign() (Matcher, error) { + t := p.peek() + + if t.Type == TOKEN_ASSIGN { + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + return ExactMatcher{Value: s.Value}, nil + } + + if t.Type == TOKEN_TILDE { + p.advance() + eps, err := p.parseFloat() + if err != nil { + return nil, err + } + if err := p.expectIdent("of"); err != nil { + return nil, err + } + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + return NumericEpsMatcher{Epsilon: eps, Value: s.Value}, nil + } + + if t.Type == TOKEN_IDENT { + switch t.Value { + case "contains": + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + return ContainsMatcher{Substr: s.Value}, nil + + case "matches": + p.advance() + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + return RegexMatcher{Pattern: s.Value}, nil + + case "anyOrder": + p.advance() + if _, err := p.expect(TOKEN_LBRACE); err != nil { + return nil, err + } + var lines []string + for !p.isRBrace() { + s, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + lines = append(lines, s.Value) + } + if _, err := p.expect(TOKEN_RBRACE); err != nil { + return nil, err + } + return AnyOrderMatcher{Lines: lines}, nil + } + } + + return nil, fmt.Errorf("%d:%d: expected matcher (=, ~, contains, matches, anyOrder), got %q", t.Line, t.Col, t.Value) +} + +func (p *Parser) parsePattern() (*Pattern, error) { + if err := p.expectIdent("pattern"); err != nil { + return nil, err + } + if _, err := p.expect(TOKEN_LBRACE); err != nil { + return nil, err + } + + pat := &Pattern{} + for !p.isRBrace() { + t := p.peek() + if t.Type != TOKEN_IDENT { + return nil, fmt.Errorf("%d:%d: unexpected token in pattern", t.Line, t.Col) + } + p.advance() + if _, err := p.expect(TOKEN_ASSIGN); err != nil { + return nil, err + } + val, err := p.expect(TOKEN_STRING) + if err != nil { + return nil, err + } + switch t.Value { + case "input": + if pat.DirsGlob != "" { + pat.InputFile = val.Value + } else { + pat.InputGlob = val.Value + } + case "output": + if pat.DirsGlob != "" { + pat.OutputFile = val.Value + } else { + pat.OutputGlob = val.Value + } + case "dirs": + pat.DirsGlob = val.Value + default: + return nil, fmt.Errorf("%d:%d: unknown pattern field %q", t.Line, t.Col, t.Value) + } + } + + if _, err := p.expect(TOKEN_RBRACE); err != nil { + return nil, err + } + return pat, nil +} + +func (p *Parser) parseStringList() ([]string, error) { + var args []string + for p.peek().Type == TOKEN_STRING { + t := p.advance() + args = append(args, t.Value) + } + if len(args) == 0 { + return nil, fmt.Errorf("%d:%d: expected at least one string", p.peek().Line, p.peek().Col) + } + return args, nil +} + +func (p *Parser) parseFloat() (float64, error) { + t := p.peek() + if t.Type == TOKEN_FLOAT || t.Type == TOKEN_INT { + p.advance() + return strconv.ParseFloat(t.Value, 64) + } + return 0, fmt.Errorf("%d:%d: expected float, got %s", t.Line, t.Col, t.Type) +} + +func (p *Parser) parseBool() (bool, error) { + t := p.peek() + if t.Type != TOKEN_IDENT { + return false, fmt.Errorf("%d:%d: expected true/false, got %s %q", t.Line, t.Col, t.Type, t.Value) + } + switch t.Value { + case "true": + p.advance() + return true, nil + case "false": + p.advance() + return false, nil + default: + return false, fmt.Errorf("%d:%d: expected true/false, got %q", t.Line, t.Col, t.Value) + } +} + +func (p *Parser) parseInt() (int, error) { + t, err := p.expect(TOKEN_INT) + if err != nil { + return 0, err + } + n, err := strconv.Atoi(t.Value) + if err != nil { + return 0, fmt.Errorf("%d:%d: invalid int %q", t.Line, t.Col, t.Value) + } + return n, nil +} + +func (p *Parser) parseDuration() (time.Duration, error) { + t := p.peek() + if t.Type != TOKEN_DURATION { + return 0, fmt.Errorf("%d:%d: expected duration (e.g. 10s, 2m, 500ms), got %s %q", t.Line, t.Col, t.Type, t.Value) + } + p.advance() + d, err := time.ParseDuration(t.Value) + if err != nil { + return 0, fmt.Errorf("%d:%d: invalid duration %q: %w", t.Line, t.Col, t.Value, err) + } + return d, nil +} + +func (p *Parser) isRBrace() bool { + return p.peek().Type == TOKEN_RBRACE || p.peek().Type == TOKEN_EOF +} diff --git a/dsl/parser_test.go b/dsl/parser_test.go new file mode 100644 index 0000000..6be777f --- /dev/null +++ b/dsl/parser_test.go @@ -0,0 +1,217 @@ +package dsl + +import ( + "testing" + "time" +) + +func TestParseBasic(t *testing.T) { + src := ` +build "go build -o solution ." +timeout 10s + +group("basic") { + weight = 0.4 + + test("stdin stdout") { + stdin = "5\n1 3 2 5 4\n" + stdout = "1 2 3 4 5\n" + } + + test("args") { + args = "--count" "3" + exitCode = 0 + stdout contains "foo" + } +} + +group("files") { + weight = 0.6 + timeout = 5s + + pattern { + input = "testdata/*/input.txt" + output = "testdata/*/output.txt" + } +} +` + f, warns, err := Parse(src) + if err != nil { + t.Fatalf("parse error: %v", err) + } + if len(warns) > 0 { + t.Logf("warnings: %v", warns) + } + + if f.Build != "go build -o solution ." { + t.Errorf("wrong build: %q", f.Build) + } + if f.Timeout != 10*time.Second { + t.Errorf("wrong timeout: %v", f.Timeout) + } + if len(f.Groups) != 2 { + t.Fatalf("expected 2 groups, got %d", len(f.Groups)) + } + + g0 := f.Groups[0] + if g0.Name != "basic" { + t.Errorf("wrong group name: %q", g0.Name) + } + if g0.Weight != 0.4 { + t.Errorf("wrong weight: %v", g0.Weight) + } + if len(g0.Tests) != 2 { + t.Fatalf("expected 2 tests, got %d", len(g0.Tests)) + } + + t0 := g0.Tests[0] + if t0.Name != "stdin stdout" { + t.Errorf("wrong test name: %q", t0.Name) + } + if t0.Stdin == nil || *t0.Stdin != "5\n1 3 2 5 4\n" { + t.Errorf("wrong stdin: %v", t0.Stdin) + } + exact, ok := t0.Stdout.(ExactMatcher) + if !ok { + t.Errorf("expected ExactMatcher, got %T", t0.Stdout) + } else if exact.Value != "1 2 3 4 5\n" { + t.Errorf("wrong stdout: %q", exact.Value) + } + + t1 := g0.Tests[1] + if len(t1.Args) != 2 || t1.Args[0] != "--count" || t1.Args[1] != "3" { + t.Errorf("wrong args: %v", t1.Args) + } + if _, ok := t1.Stdout.(ContainsMatcher); !ok { + t.Errorf("expected ContainsMatcher, got %T", t1.Stdout) + } + + g1 := f.Groups[1] + if g1.Pattern == nil { + t.Fatal("expected pattern in group files") + } + if g1.Pattern.InputGlob != "testdata/*/input.txt" { + t.Errorf("wrong input glob: %q", g1.Pattern.InputGlob) + } + if g1.Timeout != 5*time.Second { + t.Errorf("wrong group timeout: %v", g1.Timeout) + } +} + +func TestWeightWarning(t *testing.T) { + src := ` +build "go build ." + +group("a") { + weight = 0.3 + test("x") { stdin = "" stdout = "" } +} + +group("b") { + weight = 0.3 + test("y") { stdin = "" stdout = "" } +} +` + _, warns, err := Parse(src) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(warns) == 0 { + t.Error("expected weight warning, got none") + } + t.Logf("warn: %v", warns) +} + +func TestHeredoc(t *testing.T) { + src := ` +build "go build ." + +group("g") { + weight = 1.0 + test("heredoc") { + stdin = """ + hello + world + """ + stdout = """ + HELLO + WORLD + """ + } +} +` + f, _, err := Parse(src) + if err != nil { + t.Fatalf("parse error: %v", err) + } + test := f.Groups[0].Tests[0] + if *test.Stdin != "hello\nworld" { + t.Errorf("wrong stdin: %q", *test.Stdin) + } + exact := test.Stdout.(ExactMatcher) + if exact.Value != "HELLO\nWORLD" { + t.Errorf("wrong stdout: %q", exact.Value) + } +} + +func TestMatchers(t *testing.T) { + src := ` +build "go build ." + +group("matchers") { + weight = 1.0 + + test("regex") { + stdin = "ping\n" + stdout matches "pong.*" + } + + test("numeric") { + stdin = "1 3\n" + stdout ~ 0.001 of "0.333" + } + + test("any order") { + stdin = "data\n" + stdout anyOrder { + "line1" + "line2" + "line3" + } + } + + test("stderr contains") { + args = "--invalid" + exitCode = 1 + stderr contains "invalid flag" + } +} +` + f, _, err := Parse(src) + if err != nil { + t.Fatalf("parse error: %v", err) + } + tests := f.Groups[0].Tests + + if _, ok := tests[0].Stdout.(RegexMatcher); !ok { + t.Errorf("test 0: expected RegexMatcher, got %T", tests[0].Stdout) + } + + num, ok := tests[1].Stdout.(NumericEpsMatcher) + if !ok { + t.Errorf("test 1: expected NumericEpsMatcher, got %T", tests[1].Stdout) + } else if num.Epsilon != 0.001 { + t.Errorf("test 1: wrong epsilon %v", num.Epsilon) + } + + if _, ok := tests[2].Stdout.(AnyOrderMatcher); !ok { + t.Errorf("test 2: expected AnyOrderMatcher, got %T", tests[2].Stdout) + } + + if _, ok := tests[3].Stderr.(ContainsMatcher); !ok { + t.Errorf("test 3: expected ContainsMatcher on stderr, got %T", tests[3].Stderr) + } + if *tests[3].ExitCode != 1 { + t.Errorf("test 3: expected exitCode 1, got %v", *tests[3].ExitCode) + } +} diff --git a/example/c-sum/.gitea/workflows/judge.yml b/example/c-sum/.gitea/workflows/judge.yml new file mode 100644 index 0000000..c3b85a0 --- /dev/null +++ b/example/c-sum/.gitea/workflows/judge.yml @@ -0,0 +1,148 @@ +name: judge +run-name: "Sum tests (${{ inputs.student_url || github.repository }})" + +on: + push: + pull_request: + workflow_dispatch: + inputs: + student_url: + description: "Student repo (owner/repo), leave empty to use current repo" + required: false + type: string + default: "" + student_ref: + description: "Ref (branch / tag / SHA)" + required: false + type: string + default: "main" + +env: + SUITE_FILE: sum.jdg + SOURCES_DIR: __sources__ + +jobs: + test: + name: "${{ matrix.toolchain.system }} / ${{ matrix.toolchain.use_compiler }} / ${{ matrix.toolchain.build_type }}${{ matrix.toolchain.wrapper != 'no' && format(' ({0})', matrix.toolchain.wrapper) || '' }}" + + strategy: + fail-fast: false + matrix: + toolchain: + - { system: Linux, use_compiler: gcc, build_type: Release, cflags: "-O2", wrapper: no, timeout_factor: 1.0 } + - { system: Linux, use_compiler: gcc, build_type: Debug, cflags: "-O0 -g", wrapper: no, timeout_factor: 2.5 } + - { system: Linux, use_compiler: gcc, build_type: Sanitized, cflags: "-O1 -g -fsanitize=address,undefined", wrapper: no, timeout_factor: 2.5 } + - { system: Linux, use_compiler: gcc, build_type: Debug, cflags: "-O0 -g", wrapper: valgrind, timeout_factor: 5.0 } + - { system: Linux, use_compiler: clang, build_type: Release, cflags: "-O2", wrapper: no, timeout_factor: 1.0 } + - { system: Linux, use_compiler: clang, build_type: Sanitized, cflags: "-O1 -g -fsanitize=address,undefined", wrapper: no, timeout_factor: 2.5 } + - { system: Windows, use_compiler: clang, build_type: Release, cflags: "-O2", wrapper: no, timeout_factor: 2.0 } + - { system: Windows, use_compiler: clang, build_type: Debug, cflags: "-O0 -g", wrapper: no, timeout_factor: 3.0 } + - { system: Windows, use_compiler: msvc, build_type: Release, cflags: "/O2", wrapper: no, timeout_factor: 2.0 } + - { system: Windows, use_compiler: msvc, build_type: Debug, cflags: "/Od /Zi", wrapper: no, timeout_factor: 5.0 } + + runs-on: ${{ matrix.toolchain.system }}-Runner + timeout-minutes: 10 + + env: + REPORT_NAME: "report_${{ matrix.toolchain.system }}_${{ matrix.toolchain.use_compiler }}_${{ matrix.toolchain.build_type }}_${{ matrix.toolchain.wrapper }}" + CC: ${{ matrix.toolchain.use_compiler }} + CFLAGS: ${{ matrix.toolchain.cflags }} + + steps: + - name: Checkout judge harness + uses: actions/checkout@v4 + + - name: Checkout student sources + if: ${{ inputs.student_url != '' }} + uses: actions/checkout@v4 + with: + repository: ${{ inputs.student_url }} + ref: ${{ inputs.student_ref }} + path: ${{ env.SOURCES_DIR }} + token: ${{ secrets.VAR_TOKEN }} + + - name: Stage sources (self) + if: ${{ inputs.student_url == '' }} + shell: bash + run: | + mkdir -p "${SOURCES_DIR}" + cp solution.c "${SOURCES_DIR}/" + + - name: Set up MSVC environment + if: matrix.toolchain.use_compiler == 'msvc' + uses: ilammy/msvc-dev-cmd@v1 + + - name: Install judge + shell: bash + run: | + go install github.com/Mond1c/judge/cmd/cli@latest + echo "$HOME/go/bin" >> "$GITHUB_PATH" + + - name: Install valgrind + if: matrix.toolchain.wrapper == 'valgrind' + run: sudo apt-get update && sudo apt-get install -y valgrind + + - name: Run judge + shell: bash + working-directory: ${{ env.SOURCES_DIR }} + env: + WRAPPER: ${{ matrix.toolchain.wrapper }} + run: | + cp ../${{ env.SUITE_FILE }} . + + WRAPPER_ARG="" + case "$WRAPPER" in + valgrind) WRAPPER_ARG='--wrapper=valgrind --error-exitcode=99 --leak-check=full -q' ;; + no) WRAPPER_ARG="" ;; + *) WRAPPER_ARG="--wrapper=$WRAPPER" ;; + esac + + # For MSVC the suffixed .exe is produced; runner auto-detects it. + judge ${{ env.SUITE_FILE }} . --json $WRAPPER_ARG > "$GITHUB_WORKSPACE/${REPORT_NAME}.json" \ + || echo "judge exited non-zero (expected when tests fail)" + + judge ${{ env.SUITE_FILE }} . $WRAPPER_ARG || true + + - name: Upload report + if: ${{ always() }} + uses: https://github.com/christopherHX/gitea-upload-artifact@v4 + with: + name: ${{ env.REPORT_NAME }} + path: ${{ env.REPORT_NAME }}.json + retention-days: 7 + compression-level: 9 + + summary: + needs: [test] + if: ${{ always() }} + name: SUMMARY + runs-on: Linux-Runner + timeout-minutes: 5 + + steps: + - name: Download all reports + uses: https://github.com/christopherHX/gitea-download-artifact@v4 + with: + path: reports + pattern: report_* + + - name: Aggregate + shell: bash + run: | + echo "# Judge results" > SUMMARY.md + echo "" >> SUMMARY.md + echo "| Configuration | Score |" >> SUMMARY.md + echo "|---|---|" >> SUMMARY.md + for f in reports/*/*.json; do + cfg=$(basename "$(dirname "$f")" | sed 's/^report_//') + score=$(grep -o '"TotalScore":[^,}]*' "$f" | head -1 | cut -d: -f2) + echo "| $cfg | $score |" >> SUMMARY.md + done + cat SUMMARY.md + + - name: Upload summary + uses: https://github.com/christopherHX/gitea-upload-artifact@v4 + with: + name: SUMMARY + path: SUMMARY.md + retention-days: 7 diff --git a/example/c-sum/README.md b/example/c-sum/README.md new file mode 100644 index 0000000..dbc0816 --- /dev/null +++ b/example/c-sum/README.md @@ -0,0 +1,49 @@ +# c-sum — cross-platform C example + +Minimal example: a C program that reads `N` then `N` integers and prints their +sum. Tested with `judge` across **gcc / clang / MSVC** on **Linux / Windows**, +with optional **valgrind** and **sanitizer** runs. + +## Files + +- `solution.c` — student-facing solution (could be what the student submits) +- `sum.jdg` — judge test suite +- `.gitea/workflows/judge.yml` — Gitea CI matrix + +## Run locally + +```sh +# Linux / macOS +CC=gcc judge sum.jdg . +CC=clang judge sum.jdg . + +# With valgrind +judge sum.jdg . --wrapper="valgrind --error-exitcode=99 --leak-check=full -q" + +# With ASan+UBSan build +CC=clang CFLAGS="-O1 -g -fsanitize=address,undefined" judge sum.jdg . +``` + +On Windows (inside an MSVC dev cmd shell), `build_windows` kicks in and +produces `solution.exe`, which the runner auto-detects. + +## Notes about the `.jdg` + +- `normalize_crlf = true` — Windows `printf` emits `\r\n`; we strip `\r` before + matching so the same expected outputs work on both platforms. +- `trim_trailing_ws = true` — forgives trailing spaces a student's output might + pick up (rare but annoying to debug). +- `binary = "solution"` — the runner tries `solution` first, then + `solution.exe` on Windows automatically. +- Per-group `scoring = all_or_none` on `stress` gives weight only if every + stress test passes. + +## Adapting the Gitea workflow + +- `runs-on: ${{ matrix.toolchain.system }}-Runner` assumes you have + self-hosted Gitea runners labelled `Linux-Runner` / `Windows-Runner` (same + naming as your existing `fixed_floating` pipeline). +- `secrets.VAR_TOKEN` is only needed when pulling a student repo from a + private org. +- The summary job shells `grep` over the JSON; swap to `jq` if available on + your runners. diff --git a/example/c-sum/solution.c b/example/c-sum/solution.c new file mode 100644 index 0000000..c1ce788 --- /dev/null +++ b/example/c-sum/solution.c @@ -0,0 +1,16 @@ +#include + +int main(void) { + int n; + if (scanf("%d", &n) != 1) return 1; + + long long sum = 0; + for (int i = 0; i < n; i++) { + int x; + if (scanf("%d", &x) != 1) return 1; + sum += x; + } + + printf("%lld\n", sum); + return 0; +} diff --git a/example/c-sum/sum.jdg b/example/c-sum/sum.jdg new file mode 100644 index 0000000..375d639 --- /dev/null +++ b/example/c-sum/sum.jdg @@ -0,0 +1,75 @@ +// Cross-platform C solution test suite. +// $CC is supplied by CI matrix (gcc / clang / cl). +// +// Run locally: +// CC=gcc judge sum.jdg . +// CC=clang judge sum.jdg . +// +// Under MSVC on CI we use build_windows (cl's CLI is different). + +build "$CC -O2 -std=c11 -Wall -Wextra solution.c -o solution" +build_windows "cl /nologo /O2 /W3 solution.c /Fe:solution.exe" + +binary = "solution" +timeout 5s + +// Windows printf emits \r\n; normalize so tests are portable. +normalize_crlf = true +trim_trailing_ws = true + +group("basic") { + weight = 0.4 + timeout = 2s + + test("one number") { + stdin = "1\n42\n" + stdout = "42\n" + } + + test("three numbers") { + stdin = "3\n1 2 3\n" + stdout = "6\n" + } + + test("negatives") { + stdin = "4\n-1 -2 3 5\n" + stdout = "5\n" + } + + test("zero count") { + stdin = "0\n" + stdout = "0\n" + } +} + +group("edge") { + weight = 0.3 + + test("large sum fits in int64") { + stdin = "3\n2000000000 2000000000 2000000000\n" + stdout = "6000000000\n" + } + + test("multiline input") { + stdin = """ + 5 + 10 + 20 + 30 + 40 + 50 + """ + stdout = "150\n" + } +} + +group("stress") { + weight = 0.3 + timeout = 3s + scoring = all_or_none + + test("sum of 1..100") { + stdin = "100\n1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100\n" + stdout = "5050\n" + } +} diff --git a/example/lab1.jdg b/example/lab1.jdg new file mode 100644 index 0000000..775d4bc --- /dev/null +++ b/example/lab1.jdg @@ -0,0 +1,60 @@ +build "go build -o solution ." +timeout 10s + +group("basic") { + weight = 0.2 + timeout = 2s + + test("empty one element") { + stdin = "1\n42\n" + stdout = "42\n" + } + + test("already sorted") { + stdin = "3\n1 2 3\n" + stdout = "1 2 3\n" + } + + test("reverse order") { + stdin = "4\n4 3 2 1\n" + stdout = "1 2 3 4\n" + } +} + +group("main") { + weight = 0.5 + + test("basic") { + stdin = "5\n1 3 2 5 4\n" + stdout = "1 2 3 4 5\n" + } + + test("negative numbers") { + stdin = "5\n-3 1 -1 0 2\n" + stdout = "-3 -1 0 1 2\n" + } + + test("same numbers") { + stdin = "4\n5 5 5 5\n" + stdout = "5 5 5 5\n" + } + + test("multiline stdin") { + stdin = """ + 6 + 100 -50 0 75 -25 50 + """ + stdout = "-50 -25 0 50 75 100\n" + } +} + +group("file-pattern") { + weight = 0.3 + timeout = 5s + + pattern { + input = "testdata/*/input.txt" + output = "testdata/*/output.txt" + } +} + diff --git a/example/solution/main.go b/example/solution/main.go new file mode 100644 index 0000000..ea376fa --- /dev/null +++ b/example/solution/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +func main() { + scanner := bufio.NewScanner(os.Stdin) + + scanner.Scan() + n, _ := strconv.Atoi(strings.TrimSpace(scanner.Text())) + + scanner.Scan() + parts := strings.Fields(scanner.Text()) + + nums := make([]int, 0, n) + for _, p := range parts { + x, _ := strconv.Atoi(p) + nums = append(nums, x) + } + + sort.Ints(nums) + + out := make([]string, len(nums)) + for i, v := range nums { + out[i] = strconv.Itoa(v) + } + fmt.Println(strings.Join(out, " ")) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6008e60 --- /dev/null +++ b/go.mod @@ -0,0 +1,3 @@ +module github.com/Mond1c/judge + +go 1.26.1 diff --git a/reporter/reporter.go b/reporter/reporter.go new file mode 100644 index 0000000..a8eb517 --- /dev/null +++ b/reporter/reporter.go @@ -0,0 +1,101 @@ +package reporter + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/Mond1c/judge/runner" +) + +func Text(w io.Writer, result *runner.SuiteResult) { + if result.BuildLog != "" { + fmt.Fprintf(w, "=== BUILD LOG ===\n%s\n", result.BuildLog) + } + + for _, gr := range result.Groups { + passed := gr.Passed + total := gr.Total + pct := 0.0 + if total > 0 { + pct = float64(passed) / float64(total) * 100 + } + + fmt.Fprintf(w, "\n┌─ group %q weight=%.2f score=%.4f\n", + gr.Name, gr.Weight, gr.Score) + fmt.Fprintf(w, "│ %d/%d passed (%.0f%%)\n", passed, total, pct) + + for _, tr := range gr.Tests { + icon := "✓" + if tr.Status != runner.StatusPass { + icon = "✗" + } + fmt.Fprintf(w, "│ %s [%s] %s (%dms)\n", + icon, tr.Status, tr.Name, tr.Elapsed.Milliseconds()) + + for _, f := range tr.Failures { + for _, line := range strings.Split(f, "\n") { + fmt.Fprintf(w, "│ %s\n", line) + } + } + } + fmt.Fprintf(w, "└─\n") + } + + fmt.Fprintf(w, "\n══ TOTAL SCORE: %.4f / 1.0000 ══\n", result.TotalScore) +} + +func JSON(w io.Writer, result *runner.SuiteResult) error { + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + return enc.Encode(jsonResult(result)) +} + +type jsonSuiteResult struct { + TotalScore float64 `json:"total_score"` + BuildLog string `json:"build_log,omitempty"` + Groups []jsonGroupResult `json:"groups"` +} + +type jsonGroupResult struct { + Name string `json:"name"` + Weight float64 `json:"weight"` + Score float64 `json:"score"` + Passed int `json:"passed"` + Total int `json:"total"` + Tests []jsonTestResult `json:"tests"` +} + +type jsonTestResult struct { + Name string `json:"name"` + Status string `json:"status"` + ElapsedMs int64 `json:"elapsed_ms"` + Failures []string `json:"failures,omitempty"` +} + +func jsonResult(r *runner.SuiteResult) jsonSuiteResult { + res := jsonSuiteResult{ + TotalScore: r.TotalScore, + BuildLog: r.BuildLog, + } + for _, gr := range r.Groups { + jgr := jsonGroupResult{ + Name: gr.Name, + Weight: gr.Weight, + Score: gr.Score, + Passed: gr.Passed, + Total: gr.Total, + } + for _, tr := range gr.Tests { + jgr.Tests = append(jgr.Tests, jsonTestResult{ + Name: tr.Name, + Status: tr.Status.String(), + ElapsedMs: tr.Elapsed.Milliseconds(), + Failures: tr.Failures, + }) + } + res.Groups = append(res.Groups, jgr) + } + return res +} diff --git a/runner/expander.go b/runner/expander.go new file mode 100644 index 0000000..2de28eb --- /dev/null +++ b/runner/expander.go @@ -0,0 +1,119 @@ +package runner + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Mond1c/judge/dsl" +) + +func expandPattern(pattern *dsl.Pattern, groupTimeout interface{ IsZero() bool }) ([]*dsl.Test, error) { + if pattern.IsDirMode() { + return expandDirPattern(pattern) + } + return expandGlobPattern(pattern) +} + +func expandGlobPattern(pattern *dsl.Pattern) ([]*dsl.Test, error) { + inputFiles, err := filepath.Glob(pattern.InputGlob) + if err != nil { + return nil, fmt.Errorf("invalid input glob %q: %w", pattern.InputGlob, err) + } + if len(inputFiles) == 0 { + return nil, fmt.Errorf("no files matched input glob %q", pattern.InputGlob) + } + + inputPrefix, inputSuffix := splitGlob(pattern.InputGlob) + outputPrefix, outputSuffix := splitGlob(pattern.OutputGlob) + + var tests []*dsl.Test + for _, inputPath := range inputFiles { + wildcard := extractWildcard(inputPath, inputPrefix, inputSuffix) + outputPath := outputPrefix + wildcard + outputSuffix + + inputContent, err := os.ReadFile(inputPath) + if err != nil { + return nil, fmt.Errorf("read input %q: %w", inputPath, err) + } + outputContent, err := os.ReadFile(outputPath) + if err != nil { + return nil, fmt.Errorf("read output %q: %w", outputPath, err) + } + + name := fmt.Sprintf("pattern:%s", wildcard) + stdin := string(inputContent) + expected := string(outputContent) + + tests = append(tests, &dsl.Test{ + Name: name, + Stdin: &stdin, + Env: map[string]string{}, + InFiles: map[string]string{}, + OutFiles: map[string]string{}, + Stdout: dsl.ExactMatcher{Value: expected}, + Stderr: dsl.NoMatcher{}, + }) + } + return tests, nil +} + +func expandDirPattern(pattern *dsl.Pattern) ([]*dsl.Test, error) { + dirs, err := filepath.Glob(pattern.DirsGlob) + if err != nil { + return nil, fmt.Errorf("invalid dirs glob %q: %w", pattern.DirsGlob, err) + } + if len(dirs) == 0 { + return nil, fmt.Errorf("no directories matched %q", pattern.DirsGlob) + } + + var tests []*dsl.Test + for _, dir := range dirs { + info, err := os.Stat(dir) + if err != nil || !info.IsDir() { + continue + } + + inputPath := filepath.Join(dir, pattern.InputFile) + outputPath := filepath.Join(dir, pattern.OutputFile) + + inputContent, err := os.ReadFile(inputPath) + if err != nil { + return nil, fmt.Errorf("read %q: %w", inputPath, err) + } + outputContent, err := os.ReadFile(outputPath) + if err != nil { + return nil, fmt.Errorf("read %q: %w", outputPath, err) + } + + name := fmt.Sprintf("pattern:%s", filepath.Base(dir)) + stdin := string(inputContent) + expected := string(outputContent) + + tests = append(tests, &dsl.Test{ + Name: name, + Stdin: &stdin, + Env: map[string]string{}, + InFiles: map[string]string{}, + OutFiles: map[string]string{}, + Stdout: dsl.ExactMatcher{Value: expected}, + Stderr: dsl.NoMatcher{}, + }) + } + return tests, nil +} + +func splitGlob(pattern string) (prefix, suffix string) { + idx := strings.Index(pattern, "*") + if idx < 0 { + return pattern, "" + } + return pattern[:idx], pattern[idx+1:] +} + +func extractWildcard(path, prefix, suffix string) string { + s := strings.TrimPrefix(path, prefix) + s = strings.TrimSuffix(s, suffix) + return s +} diff --git a/runner/matcher.go b/runner/matcher.go new file mode 100644 index 0000000..8b25336 --- /dev/null +++ b/runner/matcher.go @@ -0,0 +1,134 @@ +package runner + +import ( + "fmt" + "math" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/Mond1c/judge/dsl" +) + +func applyMatcher(label string, m dsl.Matcher, actual string) []string { + switch m := m.(type) { + case dsl.NoMatcher: + return nil + case dsl.ExactMatcher: + if actual != m.Value { + return []string{fmt.Sprintf( + "%s mismatch:\n expected: %q\n actual: %q", + label, m.Value, actual, + )} + } + return nil + case dsl.ContainsMatcher: + if !strings.Contains(actual, m.Substr) { + return []string{fmt.Sprintf( + "%s: expected to contain %q, got %q", + label, m.Substr, actual, + )} + } + return nil + case dsl.RegexMatcher: + re, err := regexp.Compile(m.Pattern) + if err != nil { + return []string{fmt.Sprintf("%s: invalid regex %q: %v", label, m.Pattern, err)} + } + if !re.MatchString(actual) { + return []string{fmt.Sprintf( + "%s: %q does not match regex %q", + label, actual, m.Pattern, + )} + } + return nil + + case dsl.NumericEpsMatcher: + errs := matchNumericEps(label, m, actual) + return errs + + case dsl.AnyOrderMatcher: + return matchAnyOrder(label, m, actual) + + default: + return []string{fmt.Sprintf("unknown matcher type %T", m)} + + } +} + +func matchNumericEps(label string, m dsl.NumericEpsMatcher, actual string) []string { + expectedNums, err := parseNumbers(m.Value) + if err != nil { + return []string{fmt.Sprintf("%s: cannot parse expected numbers %q: %v", label, m.Value, err)} + } + actualNums, err := parseNumbers(actual) + if err != nil { + return []string{fmt.Sprintf("%s: cannot parse actual numbers %q: %v", label, actual, err)} + } + if len(expectedNums) != len(actualNums) { + return []string{fmt.Sprintf( + "%s: expected %d numbers, got %d (expected=%q, actual=%q)", + label, len(expectedNums), len(actualNums), m.Value, actual, + )} + } + var errs []string + for i, exp := range expectedNums { + act := actualNums[i] + if math.Abs(exp-act) > m.Epsilon { + errs = append(errs, fmt.Sprintf( + "%s: number[%d] expected %.10g ± %.10g, got %.10g", + label, i, exp, m.Epsilon, act, + )) + } + } + return errs +} + +func parseNumbers(s string) ([]float64, error) { + fields := strings.Fields(s) + nums := make([]float64, 0, len(fields)) + for _, f := range fields { + n, err := strconv.ParseFloat(f, 64) + if err != nil { + return nil, fmt.Errorf("not a number: %q", f) + } + nums = append(nums, n) + } + return nums, nil +} + +func matchAnyOrder(label string, m dsl.AnyOrderMatcher, actual string) []string { + actualLines := splitLines(actual) + expectedLines := make([]string, len(m.Lines)) + copy(expectedLines, m.Lines) + + sort.Strings(actualLines) + sort.Strings(expectedLines) + + if len(actualLines) != len(expectedLines) { + return []string{fmt.Sprintf( + "%s anyOrder: expected %d lines, got %d", + label, len(expectedLines), len(actualLines), + )} + } + + var errs []string + for i := range expectedLines { + if actualLines[i] != expectedLines[i] { + errs = append(errs, fmt.Sprintf( + "%s anyOrder: line mismatch: expected %q, got %q", + label, expectedLines[i], actualLines[i], + )) + } + } + return errs +} + +func splitLines(s string) []string { + s = strings.TrimRight(s, "\n") + if s == "" { + return []string{} + } + return strings.Split(s, "\n") +} diff --git a/runner/result.go b/runner/result.go new file mode 100644 index 0000000..5fc70b8 --- /dev/null +++ b/runner/result.go @@ -0,0 +1,66 @@ +package runner + +import ( + "fmt" + "time" +) + +type Status int + +const ( + StatusPass Status = iota + StatusFail + StatusTLE + StatusBuildError + StatusRuntimeError +) + +func (s Status) String() string { + switch s { + case StatusPass: + return "PASS" + case StatusFail: + return "FAIL" + case StatusTLE: + return "TLE" + case StatusBuildError: + return "BUILD_ERROR" + case StatusRuntimeError: + return "RE" + default: + return "UNKNOWN" + } +} + +type TestResult struct { + Name string + Status Status + Elapsed time.Duration + + Failures []string + + ActualStdout string + ActualStderr string + ActualCode int +} + +func (r *TestResult) fail(msg string, args ...any) { + r.Failures = append(r.Failures, fmt.Sprintf(msg, args...)) + r.Status = StatusFail +} + +type GroupResult struct { + Name string + Weight float64 + Score float64 + + Tests []*TestResult + Passed int + Total int +} + +type SuiteResult struct { + Groups []*GroupResult + TotalScore float64 + BuildLog string +} diff --git a/runner/runner.go b/runner/runner.go new file mode 100644 index 0000000..ddd6cbf --- /dev/null +++ b/runner/runner.go @@ -0,0 +1,405 @@ +package runner + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Mond1c/judge/dsl" +) + +// MaxOutputBytes caps stdout/stderr captured from the solution process. +// Prevents runaway student programs from OOM-ing the judge host. +const MaxOutputBytes = 16 * 1024 * 1024 // 16 MiB + +type Config struct { + WorkDir string + BinaryName string + Wrapper string // CLI override, wins over DSL wrapper +} + +type Runner struct { + cfg Config + file *dsl.File + binary string +} + +func New(f *dsl.File, cfg Config) *Runner { + name := cfg.BinaryName + if name == "" { + name = f.Binary + } + if name == "" { + name = "solution" + } + absWork, err := filepath.Abs(cfg.WorkDir) + if err != nil { + absWork = cfg.WorkDir + } + cfg.WorkDir = absWork + return &Runner{file: f, cfg: cfg, binary: resolveBinary(absWork, name)} +} + +// resolveBinary picks /name, falling back to /name.exe on Windows. +func resolveBinary(workDir, name string) string { + primary := filepath.Join(workDir, name) + if runtime.GOOS == "windows" && !strings.HasSuffix(strings.ToLower(name), ".exe") { + if _, err := os.Stat(primary); err != nil { + withExe := primary + ".exe" + if _, err2 := os.Stat(withExe); err2 == nil { + return withExe + } + } + } + return primary +} + +func (r *Runner) Run() *SuiteResult { + result := &SuiteResult{} + + buildLog, err := r.build() + result.BuildLog = buildLog + if err != nil { + for _, g := range r.file.Groups { + gr := &GroupResult{ + Name: g.Name, + Weight: g.Weight, + Score: 0, + } + + total := len(g.Tests) + if g.Pattern != nil { + total = -1 + } + gr.Total = total + for _, t := range g.Tests { + gr.Tests = append(gr.Tests, &TestResult{ + Name: t.Name, + Status: StatusBuildError, + }) + } + result.Groups = append(result.Groups, gr) + } + return result + } + + // After build, re-resolve binary (the exe may have been produced just now). + r.binary = resolveBinary(r.cfg.WorkDir, filepath.Base(r.binary)) + + for _, g := range r.file.Groups { + gr := r.runGroup(g) + result.Groups = append(result.Groups, gr) + result.TotalScore += gr.Score + } + + return result +} + +// buildCommand picks the most specific build command for this OS. +func (r *Runner) buildCommand() string { + switch runtime.GOOS { + case "windows": + if r.file.BuildWindows != "" { + return r.file.BuildWindows + } + case "linux": + if r.file.BuildLinux != "" { + return r.file.BuildLinux + } + case "darwin": + if r.file.BuildDarwin != "" { + return r.file.BuildDarwin + } + } + if r.file.Build != "" { + return r.file.Build + } + return "go build -o solution ." +} + +func (r *Runner) build() (string, error) { + buildCmd := r.buildCommand() + + ctx := context.Background() + if r.file.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.file.Timeout) + defer cancel() + } + + cmd := shellCommand(ctx, buildCmd) + cmd.Dir = r.cfg.WorkDir + cmd.Env = os.Environ() + + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &out + + if err := cmd.Run(); err != nil { + return out.String(), fmt.Errorf("build failed: %w\n%s", err, out.String()) + } + return out.String(), nil +} + +// shellCommand runs a command string through the platform's shell so env vars +// like $CC expand naturally from the CI matrix. +func shellCommand(ctx context.Context, cmdline string) *exec.Cmd { + if runtime.GOOS == "windows" { + return exec.CommandContext(ctx, "cmd", "/C", cmdline) + } + return exec.CommandContext(ctx, "sh", "-c", cmdline) +} + +func (r *Runner) runGroup(g *dsl.Group) *GroupResult { + gr := &GroupResult{ + Name: g.Name, + Weight: g.Weight, + } + + tests := g.Tests + + if g.Pattern != nil { + expanded, err := expandPattern(g.Pattern, &zeroChecker{}) + if err != nil { + gr.Tests = append(gr.Tests, &TestResult{ + Name: "pattern_expand", + Status: StatusFail, + Failures: []string{fmt.Sprintf("pattern expand error: %v", err)}, + }) + gr.Total = 1 + gr.Score = 0 + return gr + } + for _, t := range expanded { + if t.Timeout == 0 { + t.Timeout = g.Timeout + } + for k, v := range g.Env { + if _, ok := t.Env[k]; !ok { + t.Env[k] = v + } + } + } + tests = append(tests, expanded...) + } + + gr.Total = len(tests) + + for _, t := range tests { + for k, v := range g.Env { + if _, ok := t.Env[k]; !ok { + t.Env[k] = v + } + } + if t.Timeout == 0 { + t.Timeout = g.Timeout + } + if t.Wrapper == "" { + t.Wrapper = g.Wrapper + } + + tr := r.runTest(t) + gr.Tests = append(gr.Tests, tr) + if tr.Status == StatusPass { + gr.Passed++ + } + } + + switch g.Scoring { + case dsl.ScoringAllOrNone: + if gr.Passed == gr.Total { + gr.Score = g.Weight + } else { + gr.Score = 0 + } + default: + if gr.Total > 0 { + gr.Score = g.Weight * float64(gr.Passed) / float64(gr.Total) + } + } + + return gr +} + +func (r *Runner) runTest(t *dsl.Test) *TestResult { + tr := &TestResult{Name: t.Name, Status: StatusPass} + + tmpDir, err := os.MkdirTemp("", "judge-test-*") + if err != nil { + tr.Status = StatusRuntimeError + tr.Failures = append(tr.Failures, fmt.Sprintf("failed to create temp dir: %v", err)) + return tr + } + defer os.RemoveAll(tmpDir) + + for name, content := range t.InFiles { + path := filepath.Join(tmpDir, name) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + tr.fail("mkdir for file %q: %v", name, err) + return tr + } + if err := os.WriteFile(path, []byte(content), 0644); err != nil { + tr.fail("write input file %q: %v", name, err) + return tr + } + } + + timeout := t.Timeout + if timeout == 0 { + timeout = 10 * time.Second + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Wrapper precedence: CLI flag > test > group (already copied into test). + wrapper := r.cfg.Wrapper + if wrapper == "" { + wrapper = t.Wrapper + } + + cmd := buildExecCmd(ctx, wrapper, r.binary, t.Args) + cmd.Dir = tmpDir + + cmd.Env = os.Environ() + // Force C locale so numeric formatting (decimal separator, etc.) is stable + // across runners. Student code can still override via test env. + cmd.Env = append(cmd.Env, "LC_ALL=C", "LANG=C") + for k, v := range t.Env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + + if t.Stdin != nil { + cmd.Stdin = strings.NewReader(*t.Stdin) + } + + stdout := &cappedBuffer{limit: MaxOutputBytes} + stderr := &cappedBuffer{limit: MaxOutputBytes} + cmd.Stdout = stdout + cmd.Stderr = stderr + + start := time.Now() + runErr := cmd.Run() + tr.Elapsed = time.Since(start) + + tr.ActualStdout = normalizeOutput(stdout.String(), r.file) + tr.ActualStderr = normalizeOutput(stderr.String(), r.file) + + if stdout.truncated || stderr.truncated { + tr.fail("output truncated at %d bytes (possible runaway output)", MaxOutputBytes) + } + + if ctx.Err() == context.DeadlineExceeded { + tr.Status = StatusTLE + tr.Failures = append(tr.Failures, fmt.Sprintf("time limit exceeded (%v)", timeout)) + return tr + } + + actualCode := 0 + if runErr != nil { + if exitErr, ok := runErr.(*exec.ExitError); ok { + actualCode = exitErr.ExitCode() + } else { + tr.Status = StatusRuntimeError + tr.fail("runtime error: %v", runErr) + return tr + } + } + tr.ActualCode = actualCode + + if t.ExitCode != nil && actualCode != *t.ExitCode { + tr.fail("exit code: expected %d, got %d", *t.ExitCode, actualCode) + } + + for _, f := range applyMatcher("stdout", t.Stdout, tr.ActualStdout) { + tr.fail("%s", f) + } + for _, f := range applyMatcher("stderr", t.Stderr, tr.ActualStderr) { + tr.fail("%s", f) + } + + for name, expected := range t.OutFiles { + path := filepath.Join(tmpDir, name) + content, err := os.ReadFile(path) + if err != nil { + tr.fail("output file %q not found: %v", name, err) + continue + } + actual := normalizeOutput(string(content), r.file) + for _, f := range applyMatcher(fmt.Sprintf("file(%s)", name), dsl.ExactMatcher{Value: expected}, actual) { + tr.fail("%s", f) + } + } + + if len(tr.Failures) > 0 { + tr.Status = StatusFail + } + + return tr +} + +// buildExecCmd creates an exec.Cmd for the solution, optionally prefixed with +// a wrapper (gdb, valgrind, qemu, ...). Wrapper is a shell string so users can +// pass flags like "valgrind --error-exitcode=99 --leak-check=full". +func buildExecCmd(ctx context.Context, wrapper, binary string, args []string) *exec.Cmd { + if wrapper == "" { + return exec.CommandContext(ctx, binary, args...) + } + wrapperFields := strings.Fields(wrapper) + full := append(wrapperFields, binary) + full = append(full, args...) + return exec.CommandContext(ctx, full[0], full[1:]...) +} + +// normalizeOutput applies the file-level CRLF / trailing-WS rules. +func normalizeOutput(s string, f *dsl.File) string { + if f.NormalizeCRLF { + s = strings.ReplaceAll(s, "\r\n", "\n") + s = strings.ReplaceAll(s, "\r", "\n") + } + if f.TrimTrailingWS { + lines := strings.Split(s, "\n") + for i := range lines { + lines[i] = strings.TrimRight(lines[i], " \t\r") + } + s = strings.Join(lines, "\n") + } + return s +} + +// cappedBuffer stops writing once limit bytes are captured, but keeps draining +// the source so the child process doesn't block on a full pipe. +type cappedBuffer struct { + buf bytes.Buffer + limit int + truncated bool +} + +func (c *cappedBuffer) Write(p []byte) (int, error) { + room := c.limit - c.buf.Len() + if room <= 0 { + c.truncated = true + return len(p), nil // pretend we wrote, to keep the pipe flowing + } + if len(p) > room { + c.buf.Write(p[:room]) + c.truncated = true + return len(p), nil + } + return c.buf.Write(p) +} + +func (c *cappedBuffer) String() string { return c.buf.String() } + +var _ io.Writer = (*cappedBuffer)(nil) + +type zeroChecker struct{} + +func (z *zeroChecker) IsZero() bool { return true }