|
| 1 | +import json |
| 2 | +import os |
| 3 | +from typing import Dict, Any, Generator |
| 4 | + |
| 5 | +import pytest |
| 6 | +from test_model import TEST_CONFIGS |
| 7 | + |
| 8 | +UNKNOWN_CONFIGS = { |
| 9 | + name: config |
| 10 | + for name, config in TEST_CONFIGS.items() |
| 11 | + if config["expected_greedy_output"] == "unknown" |
| 12 | + or config["expected_batch_output"] == "unknown" |
| 13 | +} |
| 14 | + |
| 15 | + |
| 16 | +@pytest.fixture(scope="module", params=UNKNOWN_CONFIGS.keys()) |
| 17 | +def test_config(request) -> Dict[str, Any]: |
| 18 | + """Fixture that provides model configurations for testing.""" |
| 19 | + test_config = UNKNOWN_CONFIGS[request.param] |
| 20 | + test_config["test_name"] = request.param |
| 21 | + return test_config |
| 22 | + |
| 23 | + |
| 24 | +@pytest.fixture(scope="module") |
| 25 | +def test_name(test_config): |
| 26 | + yield test_config["test_name"] |
| 27 | + |
| 28 | + |
| 29 | +@pytest.fixture(scope="module") |
| 30 | +def tgi_service(launcher, test_config, test_name) -> Generator: |
| 31 | + """Fixture that provides a TGI service for testing.""" |
| 32 | + with launcher(test_config["model_id"], test_name) as service: |
| 33 | + yield service |
| 34 | + |
| 35 | + |
| 36 | +@pytest.mark.asyncio |
| 37 | +async def test_capture_expected_outputs(tgi_service, test_config, test_name): |
| 38 | + """Test that captures expected outputs for models with unknown outputs.""" |
| 39 | + print(f"Testing {test_name} with {test_config['model_id']}") |
| 40 | + |
| 41 | + # Wait for service to be ready |
| 42 | + await tgi_service.health(1000) |
| 43 | + client = tgi_service.client |
| 44 | + |
| 45 | + # Test single request (greedy) |
| 46 | + print("Testing single request...") |
| 47 | + response = await client.generate( |
| 48 | + test_config["input"], |
| 49 | + max_new_tokens=32, |
| 50 | + ) |
| 51 | + greedy_output = response.generated_text |
| 52 | + |
| 53 | + # Test multiple requests (batch) |
| 54 | + print("Testing batch requests...") |
| 55 | + responses = [] |
| 56 | + for _ in range(4): |
| 57 | + response = await client.generate( |
| 58 | + test_config["input"], |
| 59 | + max_new_tokens=32, |
| 60 | + ) |
| 61 | + responses.append(response.generated_text) |
| 62 | + |
| 63 | + # Store results in a JSON file |
| 64 | + output_file = "server/integration-tests/expected_outputs.json" |
| 65 | + results = {} |
| 66 | + |
| 67 | + # Try to load existing results if file exists |
| 68 | + if os.path.exists(output_file): |
| 69 | + with open(output_file, "r") as f: |
| 70 | + results = json.load(f) |
| 71 | + |
| 72 | + # Update results for this model |
| 73 | + results[test_name] = { |
| 74 | + "model_id": test_config["model_id"], |
| 75 | + "input": test_config["input"], |
| 76 | + "greedy_output": greedy_output, |
| 77 | + "batch_outputs": responses, |
| 78 | + "args": test_config["args"], |
| 79 | + } |
| 80 | + |
| 81 | + # Save updated results |
| 82 | + with open(output_file, "w") as f: |
| 83 | + json.dump(results, f, indent=2) |
| 84 | + |
| 85 | + print(f"\nResults for {test_name} saved to {output_file}") |
0 commit comments