You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: docs/workloads/batch/configuration.md
+3Lines changed: 3 additions & 0 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -14,6 +14,7 @@
14
14
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master or quay.io/cortexlabs/python-predictor-gpu:master based on compute)
15
15
env: <string: string> # dictionary of environment variables
16
16
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
17
+
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
17
18
networking:
18
19
endpoint: <string> # the endpoint for the API (default: <api_name>)
19
20
api_gateway: public | none # whether to create a public API Gateway endpoint for this API (if not, the API will still be accessible via the load balancer) (default: public, unless disabled cluster-wide)
@@ -50,6 +51,7 @@
50
51
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:master or quay.io/cortexlabs/tensorflow-serving-cpu:master based on compute)
51
52
env: <string: string> # dictionary of environment variables
52
53
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
54
+
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
53
55
networking:
54
56
endpoint: <string> # the endpoint for the API (default: <api_name>)
55
57
api_gateway: public | none # whether to create a public API Gateway endpoint for this API (if not, the API will still be accessible via the load balancer) (default: public, unless disabled cluster-wide)
@@ -80,6 +82,7 @@
80
82
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:master or quay.io/cortexlabs/onnx-predictor-cpu:master based on compute)
81
83
env: <string: string> # dictionary of environment variables
82
84
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
85
+
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
83
86
networking:
84
87
endpoint: <string> # the endpoint for the API (default: <api_name>)
85
88
api_gateway: public | none # whether to create a public API Gateway endpoint for this API (if not, the API will still be accessible via the load balancer) (default: public, unless disabled cluster-wide)
Copy file name to clipboardExpand all lines: docs/workloads/realtime/configuration.md
+3Lines changed: 3 additions & 0 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -28,6 +28,7 @@
28
28
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master or quay.io/cortexlabs/python-predictor-gpu:master based on compute)
29
29
env: <string: string> # dictionary of environment variables
30
30
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
31
+
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
31
32
networking:
32
33
endpoint: <string> # the endpoint for the API (default: <api_name>)
33
34
api_gateway: public | none # whether to create a public API Gateway endpoint for this API (if not, the API will still be accessible via the load balancer) (default: public, unless disabled cluster-wide) (aws only)
@@ -85,6 +86,7 @@
85
86
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:master or quay.io/cortexlabs/tensorflow-serving-cpu:master based on compute)
86
87
env: <string: string> # dictionary of environment variables
87
88
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
89
+
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
88
90
networking:
89
91
endpoint: <string> # the endpoint for the API (default: <api_name>)
90
92
api_gateway: public | none # whether to create a public API Gateway endpoint for this API (if not, the API will still be accessible via the load balancer) (default: public, unless disabled cluster-wide) (aws only)
@@ -136,6 +138,7 @@
136
138
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:master or quay.io/cortexlabs/onnx-predictor-cpu:master based on compute)
137
139
env: <string: string> # dictionary of environment variables
138
140
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
141
+
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
139
142
networking:
140
143
endpoint: <string> # the endpoint for the API (default: <api_name>)
141
144
api_gateway: public | none # whether to create a public API Gateway endpoint for this API (if not, the API will still be accessible via the load balancer) (default: public, unless disabled cluster-wide) (aws only)
0 commit comments