You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: docs/aws/install.md
+15-15Lines changed: 15 additions & 15 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -18,7 +18,7 @@ cortex env default aws
18
18
```
19
19
20
20
<!-- CORTEX_VERSION_MINOR -->
21
-
Try the [tutorial](../../examples/pytorch/text-generator/README.md) or deploy one of our [examples](https://github.com/cortexlabs/cortex/tree/master/examples).
21
+
Try the [tutorial](../../examples/pytorch/text-generator/README.md) or deploy one of our [examples](https://github.com/cortexlabs/cortex/tree/0.23/examples).
# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator (https://docs.cortex.dev/v/master/aws/vpc-peering)
66
+
# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator (https://docs.cortex.dev/v/0.23/aws/vpc-peering)
67
67
operator_load_balancer_scheme: internet-facing
68
68
69
69
# API Gateway [public (API Gateway will be used by default, can be disabled per API) | none (API Gateway will be disabled for all APIs)]
@@ -86,19 +86,19 @@ The docker images used by the Cortex cluster can also be overridden, although th
The default docker images used for your Predictors are listed in the instructions for [system packages](../deployments/system-packages.md), and can be overridden in your [Realtime API configuration](../deployments/realtime-api/api-configuration.md) and in your [Batch API configuration](../deployments/batch-api/api-configuration.md).
Copy file name to clipboardExpand all lines: docs/deployments/batch-api/api-configuration.md
+4-4Lines changed: 4 additions & 4 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -15,7 +15,7 @@ Reference the section below which corresponds to your Predictor type: [Python](#
15
15
path: <string> # path to a python file with a PythonPredictor class definition, relative to the Cortex root (required)
16
16
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional)
17
17
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
18
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master or quay.io/cortexlabs/python-predictor-gpu:master based on compute)
18
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.23.0 or quay.io/cortexlabs/python-predictor-gpu:0.23.0 based on compute)
19
19
env: <string: string> # dictionary of environment variables
20
20
networking:
21
21
endpoint: <string> # the endpoint for the API (default: <api_name>)
@@ -50,8 +50,8 @@ See additional documentation for [compute](../compute.md), [networking](../../aw
50
50
batch_interval: <duration> # the maximum amount of time to spend waiting for additional requests before running inference on the batch of requests
51
51
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional)
52
52
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
53
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:master)
54
-
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:master or quay.io/cortexlabs/tensorflow-serving-cpu:master based on compute)
53
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.23.0)
54
+
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:0.23.0 or quay.io/cortexlabs/tensorflow-serving-cpu:0.23.0 based on compute)
55
55
env: <string: string> # dictionary of environment variables
56
56
networking:
57
57
endpoint: <string> # the endpoint for the API (default: <api_name>)
@@ -82,7 +82,7 @@ See additional documentation for [compute](../compute.md), [networking](../../aw
82
82
...
83
83
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional)
84
84
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
85
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:master or quay.io/cortexlabs/onnx-predictor-cpu:master based on compute)
85
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:0.23.0 or quay.io/cortexlabs/onnx-predictor-cpu:0.23.0 based on compute)
86
86
env: <string: string> # dictionary of environment variables
87
87
networking:
88
88
endpoint: <string> # the endpoint for the API (default: <api_name>)
0 commit comments