Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions evergreen.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,35 @@ buildvariants:
tasks:
- name: checkrun_test

- name: attach-results-perf
display_name: "Attach Results Performance Test"
run_on: ubuntu2204-small
expansions:
num_tests: "500"
failure_rate: "0.1"
log_lines: "20"
tasks:
- attach-results-perf-test

- name: gotest-perf
display_name: "GoTest Parse Performance Test"
run_on: ubuntu2204-small
expansions:
num_files: "10"
tests_per_file: "100"
failure_rate: "0.1"
tasks:
- gotest-perf-test

- name: upload-traces-perf
display_name: "Upload Traces Performance Test"
run_on: ubuntu2204-small
expansions:
num_files: "74"
spans_per_file: "5000"
tasks:
- upload-traces-perf-test

functions:
create virtualenv:
- command: shell.exec
Expand All @@ -29,6 +58,15 @@ functions:
echo "noop"
git describe

post:
- command: attach.results
params:
file_location: src/test_results.json
- command: gotest.parse_files
params:
files:
- "src/*.suite"

tasks:

- name: checkrun_test
Expand All @@ -39,6 +77,51 @@ tasks:
script: |
echo "i am become checkrun"

# Performance test for attach.results
# Generates a large JSON file with many test results to test parallel log uploading.
- name: attach-results-perf-test
commands:
- command: subprocess.exec
params:
binary: python3
working_dir: src
args:
- "generate_results_file.py"
- "${num_tests|500}" # Number of test results
- "${failure_rate|0.1}" # Failure rate (0.0-1.0)
- "${log_lines|20}" # Log lines per test

# Performance test for gotest.parse_files
# Generates multiple .suite files with go test output to test parallel log uploading.
- name: gotest-perf-test
commands:
- command: subprocess.exec
params:
binary: python3
working_dir: src
args:
- "generate_gotest_files.py"
- "${num_files|10}" # Number of .suite files
- "${tests_per_file|100}" # Tests per file
- "${failure_rate|0.1}" # Failure rate (0.0-1.0)

# Performance test for upload-traces
# Generates OTel trace files to test parallel trace uploading.
- name: upload-traces-perf-test
commands:
- command: shell.exec
params:
script: |
echo "OTel collector endpoint: '${otel_collector_endpoint}'"
- command: subprocess.exec
params:
binary: python3
working_dir: src
args:
- "generate_trace_files.py"
- "${num_files|10}" # Number of trace files
- "${spans_per_file|100}" # Spans per file

modules:
- name: test-trigger
repo: git@github.com:evergreen-ci/commit-queue-sandbox.git
Expand Down
116 changes: 116 additions & 0 deletions generate_gotest_files.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#!/usr/bin/env python3
"""
Generate go test output files for performance testing of gotest.parse_files.

Usage:
python3 generate_gotest_files.py [num_files] [tests_per_file] [failure_rate]

Arguments:
num_files Number of .suite files to generate (default: 10)
tests_per_file Number of test cases per file (default: 100)
failure_rate Fraction of tests that should fail, 0.0-1.0 (default: 0.1)

Example:
python3 generate_gotest_files.py 10 100 0.1
# Generates 10 files with 100 tests each (1000 total), 10% failures
"""

import os
import random
import sys
import time


def generate_test_output(test_num: int, module_name: str, should_fail: bool) -> tuple[str, str]:
"""Generate go test output for a single test."""
test_name = f"Test{module_name.title()}_Case{test_num:04d}"
duration = random.uniform(0.001, 0.5)

lines = []
lines.append(f"=== RUN {test_name}")

# Add some log output
for i in range(random.randint(2, 8)):
lines.append(f" {test_name}: log line {i+1}: processing step {i+1}")

if should_fail:
lines.append(f" {test_name}: assertion failed")
lines.append(f" Expected: {random.randint(1, 100)}")
lines.append(f" Actual: {random.randint(1, 100)}")
status = "FAIL"
else:
status = "PASS"

lines.append(f"--- {status}: {test_name} ({duration:.3f}s)")

return "\n".join(lines), status


def generate_suite_file(file_num: int, tests_per_file: int, failure_rate: float, output_dir: str) -> tuple[int, int]:
"""Generate a single go test .suite file."""
module_name = f"module_{file_num:04d}"

lines = []
num_failures = 0
num_passes = 0

for test_num in range(tests_per_file):
should_fail = random.random() < failure_rate
output, status = generate_test_output(test_num, module_name, should_fail)
lines.append(output)
if status == "FAIL":
num_failures += 1
else:
num_passes += 1

# Add summary line
if num_failures > 0:
lines.append(f"FAIL")
else:
lines.append(f"PASS")
lines.append(f"ok \tgithub.com/test/{module_name}\t{random.uniform(0.5, 5.0):.3f}s")

filename = os.path.join(output_dir, f"{module_name}.suite")
with open(filename, "w") as f:
f.write("\n".join(lines))

return tests_per_file, num_failures


def main():
num_files = int(sys.argv[1]) if len(sys.argv) > 1 else 10
tests_per_file = int(sys.argv[2]) if len(sys.argv) > 2 else 100
failure_rate = float(sys.argv[3]) if len(sys.argv) > 3 else 0.1

output_dir = "."

print(f"Generating {num_files} go test suite files with {tests_per_file} tests each...")
print(f"Total tests: {num_files * tests_per_file}")
print(f"Expected failures: ~{int(num_files * tests_per_file * failure_rate)}")
print()

start_time = time.time()

total_tests = 0
total_failures = 0

for file_num in range(num_files):
tests, failures = generate_suite_file(file_num, tests_per_file, failure_rate, output_dir)
total_tests += tests
total_failures += failures

if (file_num + 1) % 5 == 0:
print(f" Generated {file_num + 1}/{num_files} files...")

elapsed = time.time() - start_time

print()
print(f"Done! Generated {num_files} files in {elapsed:.2f}s")
print(f"Total tests: {total_tests}")
print(f"Total failures: {total_failures}")
print()
print("Files will be picked up by gotest.parse_files in post section.")


if __name__ == "__main__":
main()
111 changes: 111 additions & 0 deletions generate_results_file.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#!/usr/bin/env python3
"""
Generate a large JSON results file for performance testing of attach.results.

Usage:
python3 generate_results_file.py [num_tests] [failure_rate] [log_lines]

Arguments:
num_tests Number of test results to generate (default: 500)
failure_rate Fraction of tests that should fail, 0.0-1.0 (default: 0.1)
log_lines Number of lines in each test's log_raw (default: 20)

Example:
python3 generate_results_file.py 500 0.1 20
# Generates 500 test results, 10% failures, 20 log lines each
"""

import json
import random
import sys
import time


def generate_log_content(test_name: str, num_lines: int, failed: bool) -> str:
"""Generate realistic log content for a test."""
lines = []
lines.append(f"=== Starting test: {test_name} ===")
lines.append(f"Test configuration loaded at {time.strftime('%Y-%m-%d %H:%M:%S')}")

for i in range(num_lines - 4):
if failed and i == num_lines - 6:
lines.append(f"ERROR: Assertion failed at line {random.randint(50, 200)}")
lines.append(f" Expected: {random.randint(1, 100)}")
lines.append(f" Actual: {random.randint(1, 100)}")
else:
log_type = random.choice(["INFO", "DEBUG", "TRACE"])
lines.append(f"[{log_type}] Processing step {i+1}: operation completed successfully")

status = "FAILED" if failed else "PASSED"
lines.append(f"=== Test {test_name} {status} ===")

return "\n".join(lines)


def generate_test_result(test_num: int, failure_rate: float, log_lines: int) -> dict:
"""Generate a single test result."""
module_name = f"module_{test_num // 100:03d}"
test_name = f"test_{module_name}.Test{module_name.title()}Suite.test_case_{test_num:05d}"

should_fail = random.random() < failure_rate
status = "fail" if should_fail else "pass"

# Generate timestamps (Python time format - seconds since epoch)
start_time = 1700000000.0 + (test_num * 0.5) # Stagger start times
duration = random.uniform(0.1, 2.0)
end_time = start_time + duration

log_content = generate_log_content(test_name, log_lines, should_fail)

return {
"test_file": test_name,
"status": status,
"start": start_time,
"end": end_time,
"log_raw": log_content
}


def main():
num_tests = int(sys.argv[1]) if len(sys.argv) > 1 else 500
failure_rate = float(sys.argv[2]) if len(sys.argv) > 2 else 0.1
log_lines = int(sys.argv[3]) if len(sys.argv) > 3 else 20

print(f"Generating results file with {num_tests} tests...")
print(f"Expected failures: ~{int(num_tests * failure_rate)}")
print(f"Log lines per test: {log_lines}")
print()

start_time = time.time()

results = []
num_failures = 0

for test_num in range(num_tests):
result = generate_test_result(test_num, failure_rate, log_lines)
results.append(result)
if result["status"] == "fail":
num_failures += 1

if (test_num + 1) % 100 == 0:
print(f" Generated {test_num + 1}/{num_tests} results...")

output = {"results": results}

output_file = "test_results.json"
with open(output_file, "w") as f:
json.dump(output, f, indent=2)

elapsed = time.time() - start_time
file_size = len(json.dumps(output)) / 1024 / 1024 # MB

print()
print(f"Done! Generated {num_tests} test results in {elapsed:.2f}s")
print(f"Total failures: {num_failures}")
print(f"Output file: {output_file} ({file_size:.2f} MB)")
print()
print("File will be picked up by attach.results in post section.")


if __name__ == "__main__":
main()
Loading