|
2 | 2 |
|
3 | 3 | from openlayer import Openlayer |
4 | 4 |
|
5 | | -# Prepare the config for the data, which depends on your project's task type. In this |
6 | | -# case, we have an LLM project: |
7 | | -from openlayer.types.inference_pipelines import data_stream_params |
8 | | - |
9 | | -# Let's say we want to stream the following row, which represents a model prediction: |
10 | | -data = {"user_query": "what's the meaning of life?", "output": "42", "tokens": 7, "cost": 0.02, "timestamp": 1620000000} |
11 | | - |
12 | 5 | client = Openlayer( |
13 | 6 | # This is the default and can be omitted |
14 | 7 | api_key=os.environ.get("OPENLAYER_API_KEY"), |
15 | 8 | ) |
16 | 9 |
|
17 | | -config = data_stream_params.ConfigLlmData( |
18 | | - input_variable_names=["user_query"], |
19 | | - output_column_name="output", |
20 | | - num_of_token_column_name="tokens", |
21 | | - cost_column_name="cost", |
22 | | - timestamp_column_name="timestamp", |
23 | | - prompt=[{"role": "user", "content": "{{ user_query }}"}], |
24 | | -) |
| 10 | +# Let's say we want to stream the following row, which represents a tabular |
| 11 | +# classification model prediction, with features and a prediction: |
| 12 | +data = { |
| 13 | + "CreditScore": 600, |
| 14 | + "Geography": "France", |
| 15 | + "Gender": "Male", |
| 16 | + "Age": 42, |
| 17 | + "Tenure": 5, |
| 18 | + "Balance": 100000, |
| 19 | + "NumOfProducts": 1, |
| 20 | + "HasCrCard": 1, |
| 21 | + "IsActiveMember": 1, |
| 22 | + "EstimatedSalary": 50000, |
| 23 | + "AggregateRate": 0.5, |
| 24 | + "Year": 2020, |
| 25 | + "Prediction": 1, |
| 26 | +} |
25 | 27 |
|
| 28 | +# Prepare the config for the data, which depends on your project's task type. In this |
| 29 | +# case, we have an Tabular Classification project: |
| 30 | +from openlayer.types.inference_pipelines import data_stream_params |
| 31 | + |
| 32 | +config = data_stream_params.ConfigTabularClassificationData( |
| 33 | + categorical_feature_names=["Gender", "Geography"], |
| 34 | + class_names=["Retained", "Exited"], |
| 35 | + feature_names=[ |
| 36 | + "CreditScore", |
| 37 | + "Geography", |
| 38 | + "Gender", |
| 39 | + "Age", |
| 40 | + "Tenure", |
| 41 | + "Balance", |
| 42 | + "NumOfProducts", |
| 43 | + "HasCrCard", |
| 44 | + "IsActiveMember", |
| 45 | + "EstimatedSalary", |
| 46 | + "AggregateRate", |
| 47 | + "Year", |
| 48 | + ], |
| 49 | + predictions_column_name="Prediction", |
| 50 | +) |
26 | 51 |
|
| 52 | +# Now, you can stream the data to the inference pipeline: |
27 | 53 | data_stream_response = client.inference_pipelines.data.stream( |
28 | 54 | inference_pipeline_id="YOUR_INFERENCE_PIPELINE_ID", |
29 | 55 | rows=[data], |
|
0 commit comments