Skip to content

Commit 48285e4

Browse files
author
Val Brodsky
committed
Add instrumentation for fixtures(temp)
1 parent 29f0d3f commit 48285e4

File tree

3 files changed

+54
-3
lines changed

3 files changed

+54
-3
lines changed

tests/integration/annotation_import/conftest.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from typing import Type
1010
from labelbox.schema.labeling_frontend import LabelingFrontend
1111
from labelbox.schema.annotation_import import LabelImport, AnnotationImportState
12+
from labelbox.schema.project import Project
1213
from labelbox.schema.queue_mode import QueueMode
1314

1415
DATA_ROW_PROCESSING_WAIT_TIMEOUT_SECONDS = 40
@@ -486,6 +487,7 @@ def initial_dataset(client, rand_gen):
486487

487488
@pytest.fixture
488489
def configured_project(client, initial_dataset, ontology, rand_gen, image_url):
490+
start_time = time.time()
489491
dataset = initial_dataset
490492
project = client.create_project(
491493
name=rand_gen(str),
@@ -496,14 +498,21 @@ def configured_project(client, initial_dataset, ontology, rand_gen, image_url):
496498
where=LabelingFrontend.name == "editor"))[0]
497499
project.setup(editor, ontology)
498500
data_row_ids = []
499-
501+
# print("Before creating data rows ", time.time() - start_time)
502+
num_rows = 0
500503
for _ in range(len(ontology['tools']) + len(ontology['classifications'])):
501504
data_row_ids.append(dataset.create_data_row(row_data=image_url).uid)
505+
num_rows += 1
506+
# print("After creating data rows ", time.time() - start_time)
507+
508+
pytest.data_row_report['times'] += time.time() - start_time
509+
pytest.data_row_report['num_rows'] += num_rows
502510
project.create_batch(
503511
rand_gen(str),
504512
data_row_ids, # sample of data row objects
505513
5 # priority between 1(Highest) - 5(lowest)
506514
)
515+
print("After creating batch ", time.time() - start_time)
507516
project.data_row_ids = data_row_ids
508517
yield project
509518
project.delete()
@@ -1006,6 +1015,7 @@ def model_run_with_training_metadata(rand_gen, model):
10061015
@pytest.fixture
10071016
def model_run_with_data_rows(client, configured_project, model_run_predictions,
10081017
model_run, wait_for_label_processing):
1018+
start_time = time.time()
10091019
configured_project.enable_model_assisted_labeling()
10101020

10111021
upload_task = LabelImport.create_from_objects(
@@ -1019,6 +1029,7 @@ def model_run_with_data_rows(client, configured_project, model_run_predictions,
10191029
labels = wait_for_label_processing(configured_project)
10201030
label_ids = [label.uid for label in labels]
10211031
model_run.upsert_labels(label_ids)
1032+
print(f"model_run_with_data_rows: {time.time() - start_time}")
10221033
yield model_run
10231034
model_run.delete()
10241035
# TODO: Delete resources when that is possible ..

tests/integration/conftest.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from collections import defaultdict
2+
from itertools import islice
13
import json
24
import os
35
import re
@@ -807,3 +809,37 @@ def upload_invalid_data_rows_for_dataset(dataset: Dataset):
807809
},
808810
] * 2)
809811
task.wait_till_done()
812+
813+
814+
def pytest_configure():
815+
pytest.report = defaultdict(int)
816+
pytest.data_row_report = {'times': 0, 'num_rows': 0}
817+
818+
819+
@pytest.hookimpl(hookwrapper=True)
820+
def pytest_fixture_setup(fixturedef, request):
821+
start = time.time()
822+
yield
823+
824+
end = time.time()
825+
826+
exec_time = end - start
827+
pytest.report[fixturedef.argname] += exec_time
828+
829+
# print('pytest_fixture_setup'
830+
# f', request={request}'
831+
# f', create_data_row_time={end - start}')
832+
833+
834+
@pytest.fixture(scope='session', autouse=True)
835+
def print_perf_summary():
836+
yield
837+
838+
sorted_dict = dict(
839+
sorted(pytest.report.items(), key=lambda item: item[1], reverse=True))
840+
num_of_entries = 10 if len(sorted_dict) >= 10 else len(sorted_dict)
841+
slowest_fixtures = [
842+
(aaa, sorted_dict[aaa]) for aaa in islice(sorted_dict, num_of_entries)
843+
]
844+
print("\nTop slowest fixtures:\n", slowest_fixtures)
845+
print("Data row report:\n", pytest.data_row_report)

tests/integration/test_dataset.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,12 @@ def dataset_for_filtering(client, rand_gen):
5353

5454
yield name_1, d1, name_2, d2
5555

56-
d1.delete()
57-
d2.delete()
56+
57+
def test_dataset_filtering(client, dataset_for_filtering):
58+
name_1, d1, name_2, d2 = dataset_for_filtering
59+
60+
assert list(client.get_datasets(where=Dataset.name == name_1)) == [d1]
61+
assert list(client.get_datasets(where=Dataset.name == name_2)) == [d2]
5862

5963

6064
def test_dataset_filtering(client, dataset_for_filtering):

0 commit comments

Comments
 (0)