Skip to content

Commit b902279

Browse files
Merge pull request rllm-org#159 from MattFisher/followup/worldsense
Followup: WorldSense
2 parents bf5a1d4 + 8c57b04 commit b902279

File tree

3 files changed

+256
-5
lines changed

3 files changed

+256
-5
lines changed

src/inspect_evals/worldsense/_utils.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,10 @@ def preprocess_scores(
5050

5151
# Calculate weighted values and biases
5252
score_df["weight"] = score_df["answer"].map(weight_mapping).astype(float)
53-
score_df["bias"] = score_df["answer"].map(bias_mapping).astype(float)
53+
score_df["bias"] = (
54+
score_df["answer"].map(bias_mapping).astype(float) * score_df["weight"]
55+
)
5456
score_df["value"] = score_df["value"].astype(float) * score_df["weight"]
55-
score_df["bias"] *= score_df["weight"]
5657

5758
# Group and normalize
5859
grouped_scores = (

src/inspect_evals/worldsense/worldsense.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,8 @@ def worldsense(problemnames: str | list[str] = []) -> Task:
5252
Returns:
5353
Task: A task object configured with a dataset filtered by problem names (if specified), a solver, a scoring pattern for evaluating task responses, and custom metrics.
5454
"""
55+
dataset = load_worldsense_dataset(sample_fields=record_to_sample, shuffle=True)
56+
5557
# filter dataset if requested
5658
problemnames = problemnames if isinstance(problemnames, list) else [problemnames]
5759
if len(problemnames) > 0:
@@ -139,9 +141,6 @@ def load_worldsense_dataset(
139141
return dataset
140142

141143

142-
dataset = load_worldsense_dataset(sample_fields=record_to_sample, shuffle=True)
143-
144-
145144
@scorer(metrics=[accuracy(), stderr()])
146145
def pattern_with_metadata(
147146
pattern_str: str, ignore_case: bool = True, match_all: bool = False
Lines changed: 251 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,251 @@
1+
import pandas as pd
2+
import pytest
3+
from inspect_ai.scorer import Score
4+
5+
from inspect_evals.worldsense._utils import (
6+
compute_accuracy,
7+
compute_bias,
8+
preprocess_scores,
9+
)
10+
from inspect_evals.worldsense.worldsense import record_to_sample
11+
12+
# Adapted from the first records in the dataset
13+
example_records = [
14+
{
15+
"Key": -176741083417243227,
16+
"problemname": "Compl.trivial",
17+
"tuple_ID": "shared_tuple_id_for_this_problem",
18+
"text": "Jeremy is enrolled in 3 courses per week: Jeremy takes philosophy before biology and finance before philosophy. \nChoose one of the following alternatives: (1) Jeremy takes philosophy in between finance and biology, (2) Jeremy takes philosophy outside of the time range between finance and biology, or (3) it is impossible to decide.\nThink carefully, and only respond with one of these possible options (1), (2), or (3).",
19+
"expectedresp": ["1", "2", "3"],
20+
"span": 2,
21+
"descr": "[(1, 2),(0, 1)]",
22+
"query": "(1, 0, 2)",
23+
"objects": ["finance", "philosophy", "biology"],
24+
"problemsize": 3,
25+
"skin": "courses",
26+
"qskin": "directQ2",
27+
"complexity": "Complexity_0",
28+
"query_len": 3,
29+
"goldresp_obfusc": "Mark",
30+
},
31+
{
32+
"Key": 1747235547611487721,
33+
"problemname": "Compl.trivial",
34+
"tuple_ID": "shared_tuple_id_for_this_problem",
35+
"text": "Jeremy is enrolled in 3 courses per week: Jeremy takes philosophy before biology and finance before philosophy. \nChoose one of the following alternatives: (1) Jeremy takes finance in between philosophy and biology, (2) Jeremy takes finance outside of the time range between philosophy and biology, or (3) it is impossible to decide.\nThink carefully, and only respond with one of these possible options (1), (2), or (3).",
36+
"expectedresp": ["1", "2", "3"],
37+
"span": 2,
38+
"descr": "[(1, 2),(0, 1)]",
39+
"query": "(0, 1, 2)",
40+
"objects": ["finance", "philosophy", "biology"],
41+
"problemsize": 3,
42+
"skin": "courses",
43+
"qskin": "directQ2",
44+
"complexity": "Complexity_0",
45+
"query_len": 3,
46+
"goldresp_obfusc": "Youssef",
47+
},
48+
{
49+
"Key": 1917042815647934077,
50+
"problemname": "Compl.trivial",
51+
"tuple_ID": "shared_tuple_id_for_this_problem",
52+
"text": "Jeremy is enrolled in 3 courses per week: Jeremy takes philosophy before biology and finance before philosophy. \nChoose one of the following alternatives: (1) Jeremy takes geography in between philosophy and biology, (2) Jeremy takes geography outside of the time range between philosophy and biology, or (3) it is impossible to decide.\nThink carefully, and only respond with one of these possible options (1), (2), or (3).",
53+
"expectedresp": ["1", "2", "3"],
54+
"span": 2,
55+
"descr": "[(1, 2),(0, 1)]",
56+
"query": "(-1, 1, 2)",
57+
"objects": ["finance", "philosophy", "biology"],
58+
"problemsize": 3,
59+
"skin": "courses",
60+
"qskin": "directQ2",
61+
"complexity": "Complexity_0",
62+
"query_len": 3,
63+
"goldresp_obfusc": "Yoda",
64+
},
65+
]
66+
67+
68+
@pytest.mark.parametrize(
69+
"record, expected_target",
70+
[
71+
(example_records[0], "1"),
72+
(example_records[1], "2"),
73+
(example_records[2], "3"),
74+
],
75+
)
76+
def test_record_to_sample(record, expected_target):
77+
sample = record_to_sample(record)
78+
79+
assert sample.input == record["text"]
80+
assert sample.choices == record["expectedresp"]
81+
assert sample.target == expected_target
82+
# assert sample.id == record["tuple_ID"]
83+
assert sample.metadata == {
84+
"tuple_ID": record["tuple_ID"],
85+
"problemname": record["problemname"],
86+
"problemsize": record["problemsize"],
87+
}
88+
89+
90+
example_scores = [
91+
Score(
92+
value=0,
93+
answer="IMPOSSIBLE",
94+
explanation="IMPOSSIBLE",
95+
metadata={
96+
"tuple_ID": "fast_spec_r20_seed07_results_10-12-2023_06h28m50s_13726",
97+
"problemname": "Consist.normal",
98+
"problemsize": 4,
99+
},
100+
),
101+
Score(
102+
value=0,
103+
answer="IMPOSSIBLE",
104+
explanation="IMPOSSIBLE",
105+
metadata={
106+
"tuple_ID": "fast_spec_r20_seed07_results_10-12-2023_06h28m50s_22690",
107+
"problemname": "Consist.trivial",
108+
"problemsize": 4,
109+
},
110+
),
111+
Score(
112+
value=1.0,
113+
answer="1",
114+
explanation="(1)",
115+
metadata={
116+
"tuple_ID": "fast_spec_r20_seed07_results_10-12-2023_06h28m50s_25962",
117+
"problemname": "Compl.normal",
118+
"problemsize": 5,
119+
},
120+
),
121+
]
122+
123+
124+
def test_preprocess_scores():
125+
scores = example_scores
126+
processed_scores_df = preprocess_scores(scores)
127+
assert isinstance(processed_scores_df, pd.DataFrame)
128+
assert not processed_scores_df.empty
129+
assert "value" in processed_scores_df.columns
130+
assert "bias" in processed_scores_df.columns
131+
assert "weight" in processed_scores_df.columns
132+
133+
for col in ["value", "bias", "weight"]:
134+
assert processed_scores_df[col].dtype == float
135+
136+
for score, processed_score in zip(scores, processed_scores_df.itertuples()):
137+
print(processed_score)
138+
assert score.metadata["tuple_ID"] == processed_score.tuple_ID
139+
assert score.metadata["problemname"] == processed_score.problemname
140+
assert score.metadata["problemsize"] == processed_score.problemsize
141+
assert -1 <= processed_score.bias <= 1
142+
assert 0 <= processed_score.weight <= 1
143+
assert 0 <= processed_score.value <= 1
144+
145+
146+
def test_preprocess_scores_grouping():
147+
scores = [
148+
Score(
149+
value="1",
150+
answer="1", # weight = 0.25, bias = 1
151+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 3},
152+
),
153+
Score(
154+
value="0",
155+
answer="1", # weight = 0.25, bias = 1
156+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 3},
157+
),
158+
]
159+
processed_scores_df = preprocess_scores(scores)
160+
161+
# Check that the grouped scores match the expected values
162+
# tuple_ID problemname problemsize value bias weight
163+
# 0 1 Infer.trivial 3 0.5 1.0 0.5
164+
expected_data = {
165+
"tuple_ID": [1],
166+
"problemname": ["Infer.trivial"],
167+
"problemsize": [3],
168+
"value": [0.5], # (1 * 0.25 + 0 * 0.25) / group weight 0.5
169+
"bias": [1.0], # (1 * 0.25 + 1 * 0.25) / group weight 0.5
170+
"weight": [0.5], # Sum of wieghts: 0.25 + 0.25
171+
}
172+
expected_df = pd.DataFrame(expected_data)
173+
assert expected_df.equals(processed_scores_df), "DataFrames are not equal"
174+
175+
176+
def test_compute_accuracy():
177+
# NOTE: Unsure of what constitutes a valid combination of tuple_ID, problemname, and problemsize
178+
scores = [
179+
Score(
180+
value="1",
181+
answer="1",
182+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 1},
183+
),
184+
Score(
185+
value="1",
186+
answer="1",
187+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 3},
188+
),
189+
Score(
190+
value="0",
191+
answer="2",
192+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 3},
193+
),
194+
Score(
195+
value="1",
196+
answer="3",
197+
metadata={"tuple_ID": 2, "problemname": "Compl.normal", "problemsize": 2},
198+
),
199+
Score(
200+
value="0",
201+
answer="TRUE",
202+
metadata={"tuple_ID": 2, "problemname": "Compl.normal", "problemsize": 2},
203+
),
204+
]
205+
expected_preprocessed_scores = pd.DataFrame(
206+
{
207+
"tuple_ID": [1, 1, 2],
208+
"problemname": ["Infer.trivial", "Infer.trivial", "Compl.normal"],
209+
"problemsize": [1, 3, 2],
210+
"value": [1.0, 0.5, 0.5],
211+
"bias": [1.0, 1.0, 0.0],
212+
"weight": [0.25, 0.5, 1.0],
213+
}
214+
)
215+
df = preprocess_scores(scores)
216+
assert df.equals(expected_preprocessed_scores), "DataFrames are not equal"
217+
218+
accuracy = compute_accuracy(df)
219+
assert isinstance(accuracy, float)
220+
assert 0 <= accuracy <= 1
221+
assert accuracy == 0.625
222+
223+
224+
def test_compute_bias():
225+
scores = [
226+
Score(
227+
value="1",
228+
answer="1",
229+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 3},
230+
),
231+
Score(
232+
value="0",
233+
answer="2",
234+
metadata={"tuple_ID": 1, "problemname": "Infer.trivial", "problemsize": 3},
235+
),
236+
Score(
237+
value="1",
238+
answer="3",
239+
metadata={"tuple_ID": 2, "problemname": "Compl.normal", "problemsize": 2},
240+
),
241+
Score(
242+
value="0",
243+
answer="TRUE",
244+
metadata={"tuple_ID": 2, "problemname": "Compl.normal", "problemsize": 2},
245+
),
246+
]
247+
df = preprocess_scores(scores)
248+
bias = compute_bias(df)
249+
assert isinstance(bias, float)
250+
assert -1 <= bias <= 1
251+
assert bias == 0.5

0 commit comments

Comments
 (0)