Skip to content

Commit 34f0932

Browse files
authored
[scoreboard] Add basic tests (#548)
1 parent 1f21a66 commit 34f0932

File tree

9 files changed

+714
-10
lines changed

9 files changed

+714
-10
lines changed

scoreboard/README.md

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Scoreboard
2+
3+
HTML scoreboard generator for parallel programming tasks.
4+
5+
## Usage
6+
7+
```bash
8+
# Install dependencies
9+
pip install -r requirements.txt
10+
11+
# Generate scoreboard
12+
python main.py -o output_directory
13+
```
14+
15+
Generates `output_directory/index.html` with the scoreboard.
16+
17+
## Configuration
18+
19+
- `data/threads-config.yml` - Task points, deadlines, penalties
20+
- `data/plagiarism.yml` - Flagged submissions
21+
22+
## Testing
23+
24+
```bash
25+
# Install test dependencies
26+
pip install -r tests/requirements.txt
27+
28+
# Run tests
29+
python -m pytest tests/ -v
30+
```
31+
32+
## Output
33+
34+
HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), P (plagiarism), Total.

scoreboard/main.py

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -48,14 +48,16 @@ def load_performance_data(perf_stat_file_path):
4848
with open(perf_stat_file_path, "r", newline="") as csvfile:
4949
reader = csv.DictReader(csvfile)
5050
for row in reader:
51-
perf_stats[row["Task"]] = {
52-
"seq": row["SEQ"],
53-
"omp": row["OMP"],
54-
"tbb": row["TBB"],
55-
"stl": row["STL"],
56-
"all": row["ALL"],
57-
"mpi": "N/A",
58-
}
51+
task_name = row.get("Task")
52+
if task_name:
53+
perf_stats[task_name] = {
54+
"seq": row.get("SEQ", "?"),
55+
"omp": row.get("OMP", "?"),
56+
"tbb": row.get("TBB", "?"),
57+
"stl": row.get("STL", "?"),
58+
"all": row.get("ALL", "?"),
59+
"mpi": "N/A",
60+
}
5961
else:
6062
logger.warning("Performance stats CSV not found at %s", perf_stat_file_path)
6163
return perf_stats
@@ -67,7 +69,9 @@ def calculate_performance_metrics(perf_val, eff_num_proc):
6769
efficiency = "?"
6870
try:
6971
perf_float = float(perf_val)
70-
if perf_float > 0:
72+
if perf_float > 0 and not (
73+
perf_float == float("inf") or perf_float != perf_float
74+
):
7175
speedup = 1.0 / perf_float
7276
acceleration = f"{speedup:.2f}"
7377
efficiency = f"{speedup / eff_num_proc * 100:.2f}%"
@@ -92,9 +96,10 @@ def check_plagiarism_and_calculate_penalty(
9296
dir, task_type, sol_points, plagiarism_cfg, cfg
9397
):
9498
"""Check if task is plagiarized and calculate penalty points."""
99+
clean_dir = dir[: -len("_disabled")] if dir.endswith("_disabled") else dir
95100
is_cheated = (
96101
dir in plagiarism_cfg["plagiarism"][task_type]
97-
or dir.rstrip("_disabled") in plagiarism_cfg["plagiarism"][task_type]
102+
or clean_dir in plagiarism_cfg["plagiarism"][task_type]
98103
)
99104
plagiarism_points = 0
100105
if is_cheated:

scoreboard/tests/conftest.py

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
"""
2+
Pytest configuration and shared fixtures for scoreboard tests.
3+
"""
4+
5+
import pytest
6+
import tempfile
7+
import shutil
8+
from pathlib import Path
9+
import yaml
10+
import csv
11+
12+
13+
@pytest.fixture
14+
def temp_dir():
15+
"""Create a temporary directory for tests."""
16+
temp_path = Path(tempfile.mkdtemp())
17+
yield temp_path
18+
shutil.rmtree(temp_path)
19+
20+
21+
@pytest.fixture
22+
def sample_config():
23+
"""Sample configuration dictionary."""
24+
return {
25+
"scoreboard": {
26+
"task": {
27+
"seq": {"solution": {"max": 4}},
28+
"omp": {"solution": {"max": 6}},
29+
"stl": {"solution": {"max": 8}},
30+
"tbb": {"solution": {"max": 6}},
31+
"all": {"solution": {"max": 10}},
32+
"mpi": {"solution": {"max": 0}},
33+
},
34+
"plagiarism": {"coefficient": 0.5},
35+
"efficiency": {"num_proc": 4},
36+
"deadlines": {
37+
"seq": "2025-01-15",
38+
"omp": "2025-02-15",
39+
"stl": "2025-03-15",
40+
"tbb": "2025-04-15",
41+
"all": "2025-05-15",
42+
"mpi": "2025-06-15",
43+
},
44+
}
45+
}
46+
47+
48+
@pytest.fixture
49+
def sample_plagiarism_config():
50+
"""Sample plagiarism configuration dictionary."""
51+
return {
52+
"plagiarism": {
53+
"seq": ["broken_example"],
54+
"omp": [],
55+
"stl": ["cheater_task"],
56+
"tbb": [],
57+
"all": [],
58+
"mpi": [],
59+
}
60+
}
61+
62+
63+
@pytest.fixture
64+
def sample_task_structure(temp_dir):
65+
"""Create a sample task directory structure."""
66+
tasks_dir = temp_dir / "tasks"
67+
68+
# Create task directories
69+
task_dirs = [
70+
"example_task/seq",
71+
"example_task/omp",
72+
"example_task/stl",
73+
"disabled_task_disabled/seq",
74+
"disabled_task_disabled/omp",
75+
"partial_task/seq",
76+
]
77+
78+
for task_dir in task_dirs:
79+
(tasks_dir / task_dir).mkdir(parents=True)
80+
# Create a dummy source file
81+
(tasks_dir / task_dir / "main.cpp").touch()
82+
83+
return tasks_dir
84+
85+
86+
@pytest.fixture
87+
def sample_performance_csv(temp_dir):
88+
"""Create a sample performance CSV file."""
89+
csv_file = temp_dir / "performance.csv"
90+
91+
data = [
92+
{
93+
"Task": "example_task",
94+
"SEQ": "1.0",
95+
"OMP": "0.5",
96+
"STL": "0.3",
97+
"TBB": "0.4",
98+
"ALL": "0.2",
99+
},
100+
{
101+
"Task": "disabled_task",
102+
"SEQ": "2.0",
103+
"OMP": "1.0",
104+
"STL": "0.8",
105+
"TBB": "0.9",
106+
"ALL": "0.7",
107+
},
108+
{
109+
"Task": "partial_task",
110+
"SEQ": "1.5",
111+
"OMP": "N/A",
112+
"STL": "N/A",
113+
"TBB": "N/A",
114+
"ALL": "N/A",
115+
},
116+
]
117+
118+
with open(csv_file, "w", newline="") as f:
119+
writer = csv.DictWriter(
120+
f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"]
121+
)
122+
writer.writeheader()
123+
writer.writerows(data)
124+
125+
return csv_file
126+
127+
128+
@pytest.fixture
129+
def sample_config_files(temp_dir, sample_config, sample_plagiarism_config):
130+
"""Create sample configuration files."""
131+
data_dir = temp_dir / "data"
132+
data_dir.mkdir()
133+
134+
# Create threads-config.yml
135+
config_file = data_dir / "threads-config.yml"
136+
with open(config_file, "w") as f:
137+
yaml.dump(sample_config, f)
138+
139+
# Create plagiarism.yml
140+
plagiarism_file = data_dir / "plagiarism.yml"
141+
with open(plagiarism_file, "w") as f:
142+
yaml.dump(sample_plagiarism_config, f)
143+
144+
return data_dir

scoreboard/tests/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
pytest>=7.0
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
from main import calculate_performance_metrics
2+
3+
4+
class TestCalculatePerformanceMetrics:
5+
def test_calculate_performance_metrics_valid_values(self):
6+
acceleration, efficiency = calculate_performance_metrics("0.5", 4)
7+
assert acceleration == "2.00"
8+
assert efficiency == "50.00%"
9+
10+
acceleration, efficiency = calculate_performance_metrics("0.25", 4)
11+
assert acceleration == "4.00"
12+
assert efficiency == "100.00%"
13+
14+
acceleration, efficiency = calculate_performance_metrics("0.5", 2)
15+
assert acceleration == "2.00"
16+
assert efficiency == "100.00%"
17+
18+
def test_calculate_performance_metrics_edge_cases(self):
19+
acceleration, efficiency = calculate_performance_metrics("0.1", 4)
20+
assert acceleration == "10.00"
21+
assert efficiency == "250.00%"
22+
23+
acceleration, efficiency = calculate_performance_metrics("1.0", 4)
24+
assert acceleration == "1.00"
25+
assert efficiency == "25.00%"
26+
27+
acceleration, efficiency = calculate_performance_metrics("2.0", 4)
28+
assert acceleration == "0.50"
29+
assert efficiency == "12.50%"
30+
31+
def test_calculate_performance_metrics_invalid_values(self):
32+
acceleration, efficiency = calculate_performance_metrics("0.0", 4)
33+
assert acceleration == "?"
34+
assert efficiency == "?"
35+
36+
acceleration, efficiency = calculate_performance_metrics("-1.0", 4)
37+
assert acceleration == "?"
38+
assert efficiency == "?"
39+
40+
acceleration, efficiency = calculate_performance_metrics("invalid", 4)
41+
assert acceleration == "?"
42+
assert efficiency == "?"
43+
44+
acceleration, efficiency = calculate_performance_metrics("", 4)
45+
assert acceleration == "?"
46+
assert efficiency == "?"
47+
48+
acceleration, efficiency = calculate_performance_metrics("inf", 4)
49+
assert acceleration == "?"
50+
assert efficiency == "?"
51+
52+
acceleration, efficiency = calculate_performance_metrics("nan", 4)
53+
assert acceleration == "?"
54+
assert efficiency == "?"
55+
56+
def test_calculate_performance_metrics_special_strings(self):
57+
acceleration, efficiency = calculate_performance_metrics("?", 4)
58+
assert acceleration == "?"
59+
assert efficiency == "?"
60+
61+
acceleration, efficiency = calculate_performance_metrics("N/A", 4)
62+
assert acceleration == "?"
63+
assert efficiency == "?"
64+
65+
acceleration, efficiency = calculate_performance_metrics(None, 4)
66+
assert acceleration == "?"
67+
assert efficiency == "?"
68+
69+
def test_calculate_performance_metrics_different_proc_counts(self):
70+
perf_val = "0.25"
71+
72+
acceleration, efficiency = calculate_performance_metrics(perf_val, 1)
73+
assert acceleration == "4.00"
74+
assert efficiency == "400.00%"
75+
76+
acceleration, efficiency = calculate_performance_metrics(perf_val, 2)
77+
assert acceleration == "4.00"
78+
assert efficiency == "200.00%"
79+
80+
acceleration, efficiency = calculate_performance_metrics(perf_val, 8)
81+
assert acceleration == "4.00"
82+
assert efficiency == "50.00%"
83+
84+
acceleration, efficiency = calculate_performance_metrics(perf_val, 16)
85+
assert acceleration == "4.00"
86+
assert efficiency == "25.00%"
87+
88+
def test_calculate_performance_metrics_precision(self):
89+
acceleration, efficiency = calculate_performance_metrics("0.3", 3)
90+
assert acceleration == "3.33"
91+
assert efficiency == "111.11%"
92+
93+
acceleration, efficiency = calculate_performance_metrics("0.7", 6)
94+
assert acceleration == "1.43"
95+
assert efficiency == "23.81%"

0 commit comments

Comments
 (0)