@@ -32,11 +32,26 @@ client = Openlayer(
3232 api_key = os.environ.get(" OPENLAYER_API_KEY" ),
3333)
3434
35- project_create_response = client.projects.create(
36- name = " My Project" ,
37- task_type = " llm-base" ,
35+ data_stream_response = client.inference_pipelines.data.stream(
36+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
37+ config = {
38+ " input_variable_names" : [" user_query" ],
39+ " output_column_name" : " output" ,
40+ " num_of_token_column_name" : " tokens" ,
41+ " cost_column_name" : " cost" ,
42+ " timestamp_column_name" : " timestamp" ,
43+ },
44+ rows = [
45+ {
46+ " user_query" : " what's the meaning of life?" ,
47+ " output" : " 42" ,
48+ " tokens" : 7 ,
49+ " cost" : 0.02 ,
50+ " timestamp" : 1620000000 ,
51+ }
52+ ],
3853)
39- print (project_create_response.id )
54+ print (data_stream_response.success )
4055```
4156
4257While you can provide an ` api_key ` keyword argument,
@@ -60,11 +75,26 @@ client = AsyncOpenlayer(
6075
6176
6277async def main () -> None :
63- project_create_response = await client.projects.create(
64- name = " My Project" ,
65- task_type = " llm-base" ,
78+ data_stream_response = await client.inference_pipelines.data.stream(
79+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
80+ config = {
81+ " input_variable_names" : [" user_query" ],
82+ " output_column_name" : " output" ,
83+ " num_of_token_column_name" : " tokens" ,
84+ " cost_column_name" : " cost" ,
85+ " timestamp_column_name" : " timestamp" ,
86+ },
87+ rows = [
88+ {
89+ " user_query" : " what's the meaning of life?" ,
90+ " output" : " 42" ,
91+ " tokens" : 7 ,
92+ " cost" : 0.02 ,
93+ " timestamp" : 1620000000 ,
94+ }
95+ ],
6696 )
67- print (project_create_response.id )
97+ print (data_stream_response.success )
6898
6999
70100asyncio.run(main())
@@ -97,9 +127,24 @@ from openlayer import Openlayer
97127client = Openlayer()
98128
99129try :
100- client.projects.create(
101- name = " My Project" ,
102- task_type = " llm-base" ,
130+ client.inference_pipelines.data.stream(
131+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
132+ config = {
133+ " input_variable_names" : [" user_query" ],
134+ " output_column_name" : " output" ,
135+ " num_of_token_column_name" : " tokens" ,
136+ " cost_column_name" : " cost" ,
137+ " timestamp_column_name" : " timestamp" ,
138+ },
139+ rows = [
140+ {
141+ " user_query" : " what's the meaning of life?" ,
142+ " output" : " 42" ,
143+ " tokens" : 7 ,
144+ " cost" : 0.02 ,
145+ " timestamp" : 1620000000 ,
146+ }
147+ ],
103148 )
104149except openlayer.APIConnectionError as e:
105150 print (" The server could not be reached" )
@@ -143,9 +188,24 @@ client = Openlayer(
143188)
144189
145190# Or, configure per-request:
146- client.with_options(max_retries = 5 ).projects.create(
147- name = " My Project" ,
148- task_type = " llm-base" ,
191+ client.with_options(max_retries = 5 ).inference_pipelines.data.stream(
192+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
193+ config = {
194+ " input_variable_names" : [" user_query" ],
195+ " output_column_name" : " output" ,
196+ " num_of_token_column_name" : " tokens" ,
197+ " cost_column_name" : " cost" ,
198+ " timestamp_column_name" : " timestamp" ,
199+ },
200+ rows = [
201+ {
202+ " user_query" : " what's the meaning of life?" ,
203+ " output" : " 42" ,
204+ " tokens" : 7 ,
205+ " cost" : 0.02 ,
206+ " timestamp" : 1620000000 ,
207+ }
208+ ],
149209)
150210```
151211
@@ -169,9 +229,24 @@ client = Openlayer(
169229)
170230
171231# Override per-request:
172- client.with_options(timeout = 5.0 ).projects.create(
173- name = " My Project" ,
174- task_type = " llm-base" ,
232+ client.with_options(timeout = 5.0 ).inference_pipelines.data.stream(
233+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
234+ config = {
235+ " input_variable_names" : [" user_query" ],
236+ " output_column_name" : " output" ,
237+ " num_of_token_column_name" : " tokens" ,
238+ " cost_column_name" : " cost" ,
239+ " timestamp_column_name" : " timestamp" ,
240+ },
241+ rows = [
242+ {
243+ " user_query" : " what's the meaning of life?" ,
244+ " output" : " 42" ,
245+ " tokens" : 7 ,
246+ " cost" : 0.02 ,
247+ " timestamp" : 1620000000 ,
248+ }
249+ ],
175250)
176251```
177252
@@ -211,14 +286,27 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
211286from openlayer import Openlayer
212287
213288client = Openlayer()
214- response = client.projects.with_raw_response.create(
215- name = " My Project" ,
216- task_type = " llm-base" ,
289+ response = client.inference_pipelines.data.with_raw_response.stream(
290+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
291+ config = {
292+ " input_variable_names" : [" user_query" ],
293+ " output_column_name" : " output" ,
294+ " num_of_token_column_name" : " tokens" ,
295+ " cost_column_name" : " cost" ,
296+ " timestamp_column_name" : " timestamp" ,
297+ },
298+ rows = [{
299+ " user_query" : " what's the meaning of life?" ,
300+ " output" : " 42" ,
301+ " tokens" : 7 ,
302+ " cost" : 0.02 ,
303+ " timestamp" : 1620000000 ,
304+ }],
217305)
218306print (response.headers.get(' X-My-Header' ))
219307
220- project = response.parse() # get the object that `projects.create ()` would have returned
221- print (project.id )
308+ data = response.parse() # get the object that `inference_pipelines.data.stream ()` would have returned
309+ print (data.success )
222310```
223311
224312These methods return an [ ` APIResponse ` ] ( https://github.com/openlayer-ai/openlayer-python/tree/main/src/openlayer/_response.py ) object.
@@ -232,9 +320,24 @@ The above interface eagerly reads the full response body when you make the reque
232320To stream the response body, use ` .with_streaming_response ` instead, which requires a context manager and only reads the response body once you call ` .read() ` , ` .text() ` , ` .json() ` , ` .iter_bytes() ` , ` .iter_text() ` , ` .iter_lines() ` or ` .parse() ` . In the async client, these are async methods.
233321
234322``` python
235- with client.projects.with_streaming_response.create(
236- name = " My Project" ,
237- task_type = " llm-base" ,
323+ with client.inference_pipelines.data.with_streaming_response.stream(
324+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
325+ config = {
326+ " input_variable_names" : [" user_query" ],
327+ " output_column_name" : " output" ,
328+ " num_of_token_column_name" : " tokens" ,
329+ " cost_column_name" : " cost" ,
330+ " timestamp_column_name" : " timestamp" ,
331+ },
332+ rows = [
333+ {
334+ " user_query" : " what's the meaning of life?" ,
335+ " output" : " 42" ,
336+ " tokens" : 7 ,
337+ " cost" : 0.02 ,
338+ " timestamp" : 1620000000 ,
339+ }
340+ ],
238341) as response:
239342 print (response.headers.get(" X-My-Header" ))
240343
0 commit comments