Skip to content

Commit aa9eead

Browse files
committed
Add langchain standard integration tests
1 parent 9b7c35c commit aa9eead

File tree

5 files changed

+1120
-203
lines changed

5 files changed

+1120
-203
lines changed

langchain/pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ dev = [
3434
"mypy>=1.13.0",
3535
"pytest>=8.3.3",
3636
"ruff>=0.9.0,<0.10",
37+
"langchain-tests>=0.3.20",
3738
]
3839

3940
[tool.ruff.lint]

langchain/tests/conftest.py

Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
1+
import json
2+
import logging
3+
import os
4+
import time
5+
from collections.abc import Iterator
6+
from pathlib import Path
7+
from typing import Literal
8+
9+
import pytest
10+
import urllib3
11+
import vectorize_client as v
12+
from vectorize_client import ApiClient, ApiException, RetrieveDocumentsRequest
13+
14+
15+
@pytest.fixture(scope="session")
16+
def api_token() -> str:
17+
token = os.getenv("VECTORIZE_TOKEN")
18+
if not token:
19+
msg = "Please set the VECTORIZE_TOKEN environment variable"
20+
raise ValueError(msg)
21+
return token
22+
23+
24+
@pytest.fixture(scope="session")
25+
def org_id() -> str:
26+
org = os.getenv("VECTORIZE_ORG")
27+
if not org:
28+
msg = "Please set the VECTORIZE_ORG environment variable"
29+
raise ValueError(msg)
30+
return org
31+
32+
33+
@pytest.fixture(scope="session")
34+
def environment() -> Literal["prod", "dev", "local", "staging"]:
35+
env = os.getenv("VECTORIZE_ENV", "prod")
36+
if env not in ["prod", "dev", "local", "staging"]:
37+
msg = "Invalid VECTORIZE_ENV environment variable."
38+
raise ValueError(msg)
39+
return env
40+
41+
42+
@pytest.fixture(scope="session")
43+
def api_client(api_token: str, environment: str) -> Iterator[ApiClient]:
44+
header_name = None
45+
header_value = None
46+
if environment == "prod":
47+
host = "https://api.vectorize.io/v1"
48+
elif environment == "dev":
49+
host = "https://api-dev.vectorize.io/v1"
50+
elif environment == "local":
51+
host = "http://localhost:3000/api"
52+
header_name = "x-lambda-api-key"
53+
header_value = api_token
54+
else:
55+
host = "https://api-staging.vectorize.io/v1"
56+
57+
with v.ApiClient(
58+
v.Configuration(host=host, access_token=api_token, debug=True),
59+
header_name,
60+
header_value,
61+
) as api:
62+
yield api
63+
64+
65+
@pytest.fixture(scope="session")
66+
def pipeline_id(api_client: v.ApiClient, org_id: str) -> Iterator[str]:
67+
pipelines = v.PipelinesApi(api_client)
68+
69+
connectors_api = v.SourceConnectorsApi(api_client)
70+
response = connectors_api.create_source_connector(
71+
org_id,
72+
v.CreateSourceConnectorRequest(
73+
v.FileUpload(name="from api", type="FILE_UPLOAD")
74+
),
75+
)
76+
source_connector_id = response.connector.id
77+
logging.info("Created source connector %s", source_connector_id)
78+
79+
uploads_api = v.UploadsApi(api_client)
80+
upload_response = uploads_api.start_file_upload_to_connector(
81+
org_id,
82+
source_connector_id,
83+
v.StartFileUploadToConnectorRequest(
84+
name="research.pdf",
85+
content_type="application/pdf",
86+
metadata=json.dumps({"created-from-api": True}),
87+
),
88+
)
89+
90+
http = urllib3.PoolManager()
91+
this_dir = Path(__file__).parent
92+
file_path = this_dir / "research.pdf"
93+
94+
with file_path.open("rb") as f:
95+
http_response = http.request(
96+
"PUT",
97+
upload_response.upload_url,
98+
body=f,
99+
headers={
100+
"Content-Type": "application/pdf",
101+
"Content-Length": str(file_path.stat().st_size),
102+
},
103+
)
104+
if http_response.status != 200:
105+
msg = "Upload failed:"
106+
raise ValueError(msg)
107+
else:
108+
logging.info("Upload successful")
109+
110+
ai_platforms = v.AIPlatformConnectorsApi(api_client).get_ai_platform_connectors(
111+
org_id
112+
)
113+
builtin_ai_platform = next(
114+
c.id for c in ai_platforms.ai_platform_connectors if c.type == "VECTORIZE"
115+
)
116+
logging.info("Using AI platform %s", builtin_ai_platform)
117+
118+
vector_databases = v.DestinationConnectorsApi(
119+
api_client
120+
).get_destination_connectors(org_id)
121+
builtin_vector_db = next(
122+
c.id for c in vector_databases.destination_connectors if c.type == "VECTORIZE"
123+
)
124+
logging.info("Using destination connector %s", builtin_vector_db)
125+
126+
pipeline_response = pipelines.create_pipeline(
127+
org_id,
128+
v.PipelineConfigurationSchema(
129+
source_connectors=[
130+
v.PipelineSourceConnectorSchema(
131+
id=source_connector_id,
132+
type=v.SourceConnectorType.FILE_UPLOAD,
133+
config={},
134+
)
135+
],
136+
destination_connector=v.PipelineDestinationConnectorSchema(
137+
id=builtin_vector_db,
138+
type="VECTORIZE",
139+
config={},
140+
),
141+
ai_platform_connector=v.PipelineAIPlatformConnectorSchema(
142+
id=builtin_ai_platform,
143+
type="VECTORIZE",
144+
config={},
145+
),
146+
pipeline_name="Test pipeline",
147+
schedule=v.ScheduleSchema(type="manual"),
148+
),
149+
)
150+
pipeline_id = pipeline_response.data.id
151+
logging.info("Created pipeline %s", pipeline_id)
152+
153+
yield pipeline_id
154+
155+
try:
156+
pipelines.delete_pipeline(org_id, pipeline_id)
157+
except Exception:
158+
logging.exception("Failed to delete pipeline %s", pipeline_id)
159+
160+
# Wait for the pipeline to be created
161+
request = RetrieveDocumentsRequest(
162+
question="query",
163+
num_results=2,
164+
)
165+
start = time.time()
166+
while True:
167+
try:
168+
response = pipelines.retrieve_documents(org_id, pipeline_id, request)
169+
except ApiException as e:
170+
if "503" not in str(e):
171+
raise
172+
else:
173+
docs = response.documents
174+
if len(docs) == 2:
175+
break
176+
if time.time() - start > 180:
177+
msg = "Docs not retrieved in time"
178+
raise RuntimeError(msg)
179+
time.sleep(1)
180+
181+
yield pipeline_id
182+
183+
try:
184+
pipelines.delete_pipeline(org_id, pipeline_id)
185+
except Exception:
186+
logging.exception("Failed to delete pipeline %s", pipeline_id)
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
from typing import Literal
2+
3+
import pytest
4+
from langchain_tests.integration_tests import RetrieversIntegrationTests
5+
6+
from langchain_vectorize import VectorizeRetriever
7+
8+
9+
class TestVectorizeRetrieverIntegration(RetrieversIntegrationTests):
10+
@classmethod
11+
@pytest.fixture(autouse=True, scope="class")
12+
def setup(
13+
cls,
14+
environment: Literal["prod", "dev", "local", "staging"],
15+
api_token: str,
16+
org_id: str,
17+
pipeline_id: str,
18+
) -> None:
19+
cls.environment = environment
20+
cls.api_token = api_token
21+
cls.org_id = org_id
22+
cls.pipeline_id = pipeline_id
23+
24+
@property
25+
def retriever_constructor(self) -> type[VectorizeRetriever]:
26+
return VectorizeRetriever
27+
28+
@property
29+
def retriever_constructor_params(self) -> dict:
30+
return {
31+
"environment": self.environment,
32+
"api_token": self.api_token,
33+
"organization": self.org_id,
34+
"pipeline_id": self.pipeline_id,
35+
}
36+
37+
@property
38+
def retriever_query_example(self) -> str:
39+
return "What are you?"
40+
41+
@pytest.mark.xfail(
42+
reason="VectorizeRetriever does not support k parameter in constructor"
43+
)
44+
def test_k_constructor_param(self) -> None:
45+
raise NotImplementedError
46+
47+
@pytest.mark.xfail(
48+
reason="VectorizeRetriever does not support k parameter in invoke"
49+
)
50+
def test_invoke_with_k_kwarg(self) -> None:
51+
raise NotImplementedError

0 commit comments

Comments
 (0)