From 66bc21ca6639bbcd1d76cf8ec578e84a85fda131 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 4 Nov 2025 16:06:04 +0000 Subject: [PATCH] chore(schema): update --- samtranslator/schema/schema.json | 52 ++-- schema_source/cloudformation-docs.json | 318 ++++++++++++++++++----- schema_source/cloudformation.schema.json | 52 ++-- 3 files changed, 312 insertions(+), 110 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 4e0bcc39d..287526eb4 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -18100,7 +18100,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", + "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "title": "InstanceType", "type": "string" }, @@ -18346,7 +18346,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge", + "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge", "title": "InstanceType", "type": "string" }, @@ -33169,7 +33169,7 @@ "items": { "type": "string" }, - "markdownDescription": "The columns within the underlying AWS Glue table that can be utilized within collaborations.", + "markdownDescription": "The columns within the underlying AWS Glue table that can be used within collaborations.", "title": "AllowedColumns", "type": "array" }, @@ -83126,17 +83126,17 @@ "additionalProperties": false, "properties": { "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", "title": "Base", "type": "number" }, "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", + "markdownDescription": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", "title": "CapacityProvider", "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", "title": "Weight", "type": "number" } @@ -83322,17 +83322,17 @@ "additionalProperties": false, "properties": { "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", "title": "Base", "type": "number" }, "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", + "markdownDescription": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", "title": "CapacityProvider", "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", "title": "Weight", "type": "number" } @@ -83646,17 +83646,17 @@ "additionalProperties": false, "properties": { "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", "title": "Base", "type": "number" }, "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", + "markdownDescription": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", "title": "CapacityProvider", "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", "title": "Weight", "type": "number" } @@ -84237,7 +84237,7 @@ "type": "string" }, "PidMode": { - "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the The default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "title": "PidMode", "type": "string" }, @@ -84264,7 +84264,7 @@ }, "RuntimePlatform": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.RuntimePlatform", - "markdownDescription": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", + "markdownDescription": "The operating system that your tasks definitions run on.", "title": "RuntimePlatform" }, "Tags": { @@ -84339,7 +84339,7 @@ "type": "array" }, "Cpu": { - "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create command and the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "title": "Cpu", "type": "number" }, @@ -85118,7 +85118,7 @@ "additionalProperties": false, "properties": { "CpuArchitecture": { - "markdownDescription": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.", + "markdownDescription": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance, Amazon ECS Managed Instances, or Linux containers on Fargate.", "title": "CpuArchitecture", "type": "string" }, @@ -134958,7 +134958,7 @@ }, "WorkDocsConfiguration": { "$ref": "#/definitions/AWS::Kendra::DataSource.WorkDocsConfiguration", - "markdownDescription": "Provides the configuration information to connect to Amazon WorkDocs as your data source.", + "markdownDescription": "Provides the configuration information to connect to WorkDocs as your data source.", "title": "WorkDocsConfiguration" } }, @@ -136064,7 +136064,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", + "markdownDescription": "A list of regular expression patterns to exclude certain files in your WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", "title": "ExclusionPatterns", "type": "array" }, @@ -136072,7 +136072,7 @@ "items": { "$ref": "#/definitions/AWS::Kendra::DataSource.DataSourceToIndexFieldMapping" }, - "markdownDescription": "A list of `DataSourceToIndexFieldMapping` objects that map Amazon WorkDocs data source attributes or field names to Amazon Kendra index field names. To create custom fields, use the `UpdateIndex` API before you map to Amazon WorkDocs fields. For more information, see [Mapping data source fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The Amazon WorkDocs data source field names must exist in your Amazon WorkDocs custom metadata.", + "markdownDescription": "A list of `DataSourceToIndexFieldMapping` objects that map WorkDocs data source attributes or field names to Amazon Kendra index field names. To create custom fields, use the `UpdateIndex` API before you map to WorkDocs fields. For more information, see [Mapping data source fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The WorkDocs data source field names must exist in your WorkDocs custom metadata.", "title": "FieldMappings", "type": "array" }, @@ -136080,17 +136080,17 @@ "items": { "type": "string" }, - "markdownDescription": "A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", + "markdownDescription": "A list of regular expression patterns to include certain files in your WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", "title": "InclusionPatterns", "type": "array" }, "OrganizationId": { - "markdownDescription": "The identifier of the directory corresponding to your Amazon WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.", + "markdownDescription": "The identifier of the directory corresponding to your WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your WorkDocs site directory has an ID, which is the organization ID. You can also set up a new WorkDocs directory in the AWS Directory Service console and enable a WorkDocs site for the directory in the WorkDocs console.", "title": "OrganizationId", "type": "string" }, "UseChangeLog": { - "markdownDescription": "`TRUE` to use the Amazon WorkDocs change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in Amazon WorkDocs.", + "markdownDescription": "`TRUE` to use the WorkDocs change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in WorkDocs.", "title": "UseChangeLog", "type": "boolean" } @@ -192855,7 +192855,7 @@ "type": "array" }, "Name": { - "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "title": "Name", "type": "string" }, @@ -205399,7 +205399,7 @@ "type": "array" }, "Name": { - "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "title": "Name", "type": "string" }, @@ -219779,7 +219779,7 @@ "type": "array" }, "Name": { - "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "title": "Name", "type": "string" }, @@ -252117,7 +252117,7 @@ "type": "string" }, "PlatformIdentifier": { - "markdownDescription": "The platform identifier of the notebook instance runtime environment.", + "markdownDescription": "The platform identifier of the notebook instance runtime environment. The default value is `notebook-al2-v2` .", "title": "PlatformIdentifier", "type": "string" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 21b8388d6..b3ef66d74 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -232,39 +232,39 @@ "Value": "A list of key-value pairs to associate with the investigation group. You can associate as many as 50 tags with an investigation group. To be able to associate tags when you create the investigation group, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values." }, "AWS::APS::AnomalyDetector": { - "Alias": "", - "Configuration": "", - "EvaluationIntervalInSeconds": "", - "Labels": "", - "MissingDataAction": "", - "Tags": "", + "Alias": "The user-friendly name of the anomaly detector.", + "Configuration": "The algorithm configuration of the anomaly detector.", + "EvaluationIntervalInSeconds": "The frequency, in seconds, at which the anomaly detector evaluates metrics.", + "Labels": "The Amazon Managed Service for Prometheus metric labels associated with the anomaly detector.", + "MissingDataAction": "The action taken when data is missing during evaluation.", + "Tags": "The tags applied to the anomaly detector.", "Workspace": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics." }, "AWS::APS::AnomalyDetector AnomalyDetectorConfiguration": { - "RandomCutForest": "" + "RandomCutForest": "The Random Cut Forest algorithm configuration for anomaly detection." }, "AWS::APS::AnomalyDetector IgnoreNearExpected": { - "Amount": "", - "Ratio": "" + "Amount": "The absolute amount by which values can differ from expected values before being considered anomalous.", + "Ratio": "The ratio by which values can differ from expected values before being considered anomalous." }, "AWS::APS::AnomalyDetector Label": { - "Key": "", - "Value": "" + "Key": "The key of the label.", + "Value": "The value for this label." }, "AWS::APS::AnomalyDetector MissingDataAction": { - "MarkAsAnomaly": "", - "Skip": "" + "MarkAsAnomaly": "Marks missing data points as anomalies.", + "Skip": "Skips evaluation when data is missing." }, "AWS::APS::AnomalyDetector RandomCutForestConfiguration": { - "IgnoreNearExpectedFromAbove": "", - "IgnoreNearExpectedFromBelow": "", - "Query": "", - "SampleSize": "", - "ShingleSize": "" + "IgnoreNearExpectedFromAbove": "Configuration for ignoring values that are near expected values from above during anomaly detection.", + "IgnoreNearExpectedFromBelow": "Configuration for ignoring values that are near expected values from below during anomaly detection.", + "Query": "The Prometheus query used to retrieve the time-series data for anomaly detection.\n\n> Random Cut Forest queries must be wrapped by a supported PromQL aggregation operator. For more information, see [Aggregation operators](https://docs.aws.amazon.com/https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators) on the *Prometheus docs* website.\n> \n> *Supported PromQL aggregation operators* : `avg` , `count` , `group` , `max` , `min` , `quantile` , `stddev` , `stdvar` , and `sum` .", + "SampleSize": "The number of data points sampled from the input stream for the Random Cut Forest algorithm. The default number is 256 consecutive data points.", + "ShingleSize": "The number of consecutive data points used to create a shingle for the Random Cut Forest algorithm. The default number is 8 consecutive data points." }, "AWS::APS::AnomalyDetector Tag": { - "Key": "", - "Value": "" + "Key": "The key of the tag. Must not begin with `aws:` .", + "Value": "The value of the tag." }, "AWS::APS::ResourcePolicy": { "PolicyDocument": "The JSON to use as the Resource-based Policy.", @@ -477,10 +477,6 @@ "AWS::ARCRegionSwitch::Plan GlobalAuroraUngraceful": { "Ungraceful": "The settings for ungraceful execution." }, - "AWS::ARCRegionSwitch::Plan HealthCheckState": { - "HealthCheckId": "", - "Region": "" - }, "AWS::ARCRegionSwitch::Plan KubernetesResourceType": { "ApiVersion": "The API version type for the Kubernetes resource.", "Kind": "The kind for the Kubernetes resource." @@ -3312,7 +3308,7 @@ "IdleDisconnectTimeoutInSeconds": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", "ImageArn": "The ARN of the public, private, or shared image to use.", "ImageName": "The name of the image used to create the fleet.", - "InstanceType": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", + "InstanceType": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "MaxConcurrentSessions": "The maximum number of concurrent sessions that can be run on an Elastic fleet. This setting is required for Elastic fleets, but is not used for other fleet types.", "MaxSessionsPerInstance": "Max number of user sessions on an instance. This is applicable only for multi-session fleets.", "MaxUserDurationInSeconds": "The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.\n\nSpecify a value between 600 and 432000.", @@ -3354,7 +3350,7 @@ "IamRoleArn": "The ARN of the IAM role that is applied to the image builder. To assume a role, the image builder calls the AWS Security Token Service `AssumeRole` API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the *appstream_machine_role* credential profile on the instance.\n\nFor more information, see [Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances](https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) in the *Amazon AppStream 2.0 Administration Guide* .", "ImageArn": "The ARN of the public, private, or shared image to use.", "ImageName": "The name of the image used to create the image builder.", - "InstanceType": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge", + "InstanceType": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge", "Name": "A unique name for the image builder.", "Tags": "An array of key-value pairs.", "VpcConfig": "The VPC configuration for the image builder. You can specify only one subnet." @@ -5302,6 +5298,7 @@ "Parameters": "Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a `SubmitJob` request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see [Job definition parameters](https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) in the *AWS Batch User Guide* .", "PlatformCapabilities": "The platform capabilities required by the job definition. If no value is specified, it defaults to `EC2` . Jobs run on Fargate resources specify `FARGATE` .", "PropagateTags": "Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks when the tasks are created. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the `FAILED` state.", + "ResourceRetentionPolicy": "Specifies the resource retention policy settings for the job definition.", "RetryStrategy": "The retry strategy to use for failed jobs that are submitted with this job definition.", "SchedulingPriority": "The scheduling priority of the job definition. This only affects jobs in job queues with a fair-share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority.", "Tags": "The tags that are applied to the job definition.", @@ -5552,6 +5549,9 @@ "Type": "The type of resource to assign to a container. The supported resources include `GPU` , `MEMORY` , and `VCPU` .", "Value": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on Amazon EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For Amazon EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880" }, + "AWS::Batch::JobDefinition ResourceRetentionPolicy": { + "SkipDeregisterOnUpdate": "Specifies whether the previous revision of the job definition is retained in an active status after UPDATE events for the resource. The default value is `false` . When the property is set to `false` , the previous revision of the job definition is de-registered after a new revision is created. When the property is set to `true` , the previous revision of the job definition is not de-registered." + }, "AWS::Batch::JobDefinition RetryStrategy": { "Attempts": "The number of times to move a job to the `RUNNABLE` status. You can specify between 1 and 10 attempts. If the value of `attempts` is greater than one, the job is retried on failure the same number of attempts as the value.", "EvaluateOnExit": "Array of up to 5 objects that specify the conditions where jobs are retried or failed. If this parameter is specified, then the `attempts` parameter must also be specified. If none of the listed conditions match, then the job is retried." @@ -7440,9 +7440,11 @@ "AccountId": "The AWS account in which this custom line item will be applied to.", "BillingGroupArn": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.", "BillingPeriodRange": "A time range for which the custom line item is effective.", + "ComputationRule": "", "CustomLineItemChargeDetails": "The charge details of a custom line item. It should contain only one of `Flat` or `Percentage` .", "Description": "The custom line item's description. This is shown on the Bills page in association with the charge value.", "Name": "The custom line item's name.", + "PresentationDetails": "", "Tags": "A map that contains tag keys and tag values that are attached to a custom line item." }, "AWS::BillingConductor::CustomLineItem BillingPeriodRange": { @@ -7467,6 +7469,9 @@ "MatchOption": "The match criteria of the line item filter. This parameter specifies whether not to include the resource value from the billing group total cost.", "Values": "The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plans discounts." }, + "AWS::BillingConductor::CustomLineItem PresentationDetails": { + "Service": "" + }, "AWS::BillingConductor::CustomLineItem Tag": { "Key": "The key in a key-value pair.", "Value": "The value in a key-value pair of a tag." @@ -7982,7 +7987,7 @@ "Value": "The value of the tag." }, "AWS::CleanRooms::ConfiguredTable": { - "AllowedColumns": "The columns within the underlying AWS Glue table that can be utilized within collaborations.", + "AllowedColumns": "The columns within the underlying AWS Glue table that can be used within collaborations.", "AnalysisMethod": "The analysis method for the configured table.\n\n`DIRECT_QUERY` allows SQL queries to be run directly on this table.\n\n`DIRECT_JOB` allows PySpark jobs to be run directly on this table.\n\n`MULTIPLE` allows both SQL queries and PySpark jobs to be run directly on this table.", "AnalysisRules": "The analysis rule that was created for the configured table.", "Description": "A description for the configured table.", @@ -11654,6 +11659,11 @@ "AWS::ConnectCampaignsV2::Campaign PredictiveConfig": { "BandwidthAllocation": "Bandwidth allocation for the predictive outbound mode." }, + "AWS::ConnectCampaignsV2::Campaign PreviewConfig": { + "AgentActions": "Agent actions for the preview outbound mode.", + "BandwidthAllocation": "Bandwidth allocation for the preview outbound mode.", + "TimeoutConfig": "Countdown timer configuration for preview outbound mode." + }, "AWS::ConnectCampaignsV2::Campaign ProgressiveConfig": { "BandwidthAllocation": "Bandwidth allocation for the progressive outbound mode." }, @@ -11704,6 +11714,7 @@ "AWS::ConnectCampaignsV2::Campaign TelephonyOutboundMode": { "AgentlessConfig": "The agentless outbound mode configuration for telephony.", "PredictiveConfig": "Contains predictive outbound mode configuration.", + "PreviewConfig": "", "ProgressiveConfig": "Contains progressive telephony outbound mode configuration." }, "AWS::ConnectCampaignsV2::Campaign TimeRange": { @@ -11714,6 +11725,9 @@ "OpenHours": "The open hours configuration.", "RestrictedPeriods": "The restricted periods configuration." }, + "AWS::ConnectCampaignsV2::Campaign TimeoutConfig": { + "DurationInSeconds": "Duration in seconds for the countdown timer." + }, "AWS::ControlTower::EnabledBaseline": { "BaselineIdentifier": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", "BaselineVersion": "The enabled version of the `Baseline` .", @@ -13693,8 +13707,10 @@ "AwsLocation": "The location where the connection is created.", "Description": "Connection description.", "DomainIdentifier": "The ID of the domain where the connection is created.", + "EnableTrustedIdentityPropagation": "", "EnvironmentIdentifier": "The ID of the environment where the connection is created.", "Name": "The name of the connection.", + "ProjectIdentifier": "", "Props": "Connection props." }, "AWS::DataZone::Connection AthenaPropertiesInput": { @@ -13728,6 +13744,7 @@ "HyperPodProperties": "The hyper pod properties of a connection.", "IamProperties": "The IAM properties of a connection.", "RedshiftProperties": "The Amazon Redshift properties of a connection.", + "S3Properties": "", "SparkEmrProperties": "The Spark EMR properties of a connection.", "SparkGlueProperties": "The Spark AWS Glue properties of a connection." }, @@ -13801,6 +13818,10 @@ "ClusterName": "The cluster name in the Amazon Redshift storage properties.", "WorkgroupName": "The workgroup name in the Amazon Redshift storage properties." }, + "AWS::DataZone::Connection S3PropertiesInput": { + "S3AccessGrantLocationId": "", + "S3Uri": "" + }, "AWS::DataZone::Connection SparkEmrPropertiesInput": { "ComputeArn": "The compute ARN of Spark EMR.", "InstanceProfileArn": "The instance profile ARN of Spark EMR.", @@ -14917,6 +14938,17 @@ "ReadUnitsPerSecond": "Represents the number of read operations your base table can instantaneously support.", "WriteUnitsPerSecond": "Represents the number of write operations your base table can instantaneously support." }, + "AWS::EC2::CapacityManagerDataExport": { + "OutputFormat": "The file format of the exported data.", + "S3BucketName": "The name of the S3 bucket where export files are delivered.", + "S3BucketPrefix": "The S3 key prefix used for organizing export files within the bucket.", + "Schedule": "The frequency at which data exports are generated.", + "Tags": "The tags associated with the data export configuration." + }, + "AWS::EC2::CapacityManagerDataExport Tag": { + "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", + "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." + }, "AWS::EC2::CapacityReservation": { "AvailabilityZone": "The Availability Zone in which to create the Capacity Reservation.", "AvailabilityZoneId": "The ID of the Availability Zone in which the capacity is reserved.", @@ -17502,9 +17534,9 @@ "Tags": "The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit." }, "AWS::ECS::Cluster CapacityProviderStrategyItem": { - "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", - "CapacityProvider": "The short name of the capacity provider.", - "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." + "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "CapacityProvider": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." }, "AWS::ECS::Cluster ClusterConfiguration": { "ExecuteCommandConfiguration": "The details of the execute command configuration.", @@ -17543,9 +17575,9 @@ "DefaultCapacityProviderStrategy": "The default capacity provider strategy to associate with the cluster." }, "AWS::ECS::ClusterCapacityProviderAssociations CapacityProviderStrategy": { - "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", - "CapacityProvider": "The short name of the capacity provider.", - "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." + "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "CapacityProvider": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." }, "AWS::ECS::PrimaryTaskSet": { "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set exists in.", @@ -17591,10 +17623,14 @@ "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified.\n\n> All specified security groups must be from the same VPC.", "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified.\n\n> All specified subnets must be from the same VPC." }, + "AWS::ECS::Service CanaryConfiguration": { + "CanaryBakeTimeInMinutes": "The amount of time in minutes to wait during the canary phase before shifting the remaining production traffic to the new service revision. Valid values are 0 to 1440 minutes (24 hours). The default value is 10.", + "CanaryPercent": "The percentage of production traffic to shift to the new service revision during the canary phase. Valid values are multiples of 0.1 from 0.1 to 100.0. The default value is 5.0." + }, "AWS::ECS::Service CapacityProviderStrategyItem": { - "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", - "CapacityProvider": "The short name of the capacity provider.", - "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." + "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "CapacityProvider": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." }, "AWS::ECS::Service DeploymentAlarms": { "AlarmNames": "One or more CloudWatch alarm names. Use a \",\" to separate the alarms.", @@ -17608,10 +17644,10 @@ "AWS::ECS::Service DeploymentConfiguration": { "Alarms": "Information about the CloudWatch alarms.", "BakeTimeInMinutes": "The duration when both blue and green service revisions are running simultaneously after the production traffic has shifted.\n\nThe following rules apply when you don't specify a value:\n\n- For rolling deployments, the value is set to 3 hours (180 minutes).\n- When you use an external deployment controller ( `EXTERNAL` ), or the CodeDeploy blue/green deployment controller ( `CODE_DEPLOY` ), the value is set to 3 hours (180 minutes).\n- For all other cases, the value is set to 36 hours (2160 minutes).", - "CanaryConfiguration": "", + "CanaryConfiguration": "Configuration for canary deployment strategy. Only valid when the deployment strategy is `CANARY` . This configuration enables shifting a fixed percentage of traffic for testing, followed by shifting the remaining traffic after a bake period.", "DeploymentCircuitBreaker": "> The deployment circuit breaker can only be used for services using the rolling update ( `ECS` ) deployment type. \n\nThe *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", "LifecycleHooks": "An array of deployment lifecycle hook objects to run custom logic at specific stages of the deployment lifecycle.", - "LinearConfiguration": "", + "LinearConfiguration": "Configuration for linear deployment strategy. Only valid when the deployment strategy is `LINEAR` . This configuration enables progressive traffic shifting in equal percentage increments with configurable bake times between each step.", "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the service uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.", "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nIf any tasks are unhealthy and if `maximumPercent` doesn't allow the Amazon ECS scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one \u2014 using the `minimumHealthyPercent` as a constraint \u2014 to clear up capacity to launch replacement tasks. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "Strategy": "The deployment strategy for the service. Choose from these valid values:\n\n- `ROLLING` - When you create a service which uses the rolling update ( `ROLLING` ) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.\n- `BLUE_GREEN` - A blue/green deployment strategy ( `BLUE_GREEN` ) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed." @@ -17634,6 +17670,10 @@ "EnableForceNewDeployment": "Determines whether to force a new deployment of the service. By default, deployments aren't forced. You can use this option to start a new deployment with no service definition changes. For example, you can update a service's tasks to use a newer Docker image with the same image/tag combination ( `my_image:latest` ) or to roll Fargate tasks onto a newer platform version.", "ForceNewDeploymentNonce": "When you change the `ForceNewDeploymentNonce` value in your template, it signals Amazon ECS to start a new deployment even though no other service parameters have changed. The value must be a unique, time- varying value like a timestamp, random string, or sequence number. Use this property when you want to ensure your tasks pick up the latest version of a Docker image that uses the same tag but has been updated in the registry." }, + "AWS::ECS::Service LinearConfiguration": { + "StepBakeTimeInMinutes": "The amount of time in minutes to wait between each traffic shifting step during a linear deployment. Valid values are 0 to 1440 minutes (24 hours). The default value is 6. This bake time is not applied after reaching 100 percent traffic.", + "StepPercent": "The percentage of production traffic to shift in each step during a linear deployment. Valid values are multiples of 0.1 from 3.0 to 100.0. The default value is 10.0." + }, "AWS::ECS::Service LoadBalancer": { "AdvancedConfiguration": "The advanced settings for the load balancer used in blue/green deployments. Specify the alternate target group, listener rules, and IAM role required for traffic shifting during blue/green deployments.", "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", @@ -17661,12 +17701,17 @@ "Name": "The name of the secret.", "ValueFrom": "The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.\n\nFor information about the require AWS Identity and Access Management permissions, see [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html#secrets-iam) (for Secrets Manager) or [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-parameters.html) (for Systems Manager Parameter store) in the *Amazon Elastic Container Service Developer Guide* .\n\n> If the SSM Parameter Store parameter exists in the same Region as the task you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified." }, + "AWS::ECS::Service ServiceConnectAccessLogConfiguration": { + "Format": "The format for Service Connect access log output. Choose TEXT for human-readable logs or JSON for structured data that integrates well with log analysis tools.", + "IncludeQueryParameters": "Specifies whether to include query parameters in Service Connect access logs.\n\nWhen enabled, query parameters from HTTP requests are included in the access logs. Consider security and privacy implications when enabling this feature, as query parameters may contain sensitive information such as request IDs and tokens. By default, this parameter is `DISABLED` ." + }, "AWS::ECS::Service ServiceConnectClientAlias": { "DnsName": "The `dnsName` is the name that you use in the applications of client tasks to connect to this service. The name must be a valid DNS name but doesn't need to be fully-qualified. The name can include up to 127 characters. The name can include lowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name can't start with a hyphen.\n\nIf this parameter isn't specified, the default value of `discoveryName.namespace` is used. If the `discoveryName` isn't specified, the port mapping name from the task definition is used in `portName.namespace` .\n\nTo avoid changing your applications in client Amazon ECS services, set this to the same name that the client application uses by default. For example, a few common names are `database` , `db` , or the lowercase name of a database, such as `mysql` or `redis` . For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", "Port": "The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace.\n\nTo avoid changing your applications in client Amazon ECS services, set this to the same port that the client application uses by default. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", "TestTrafficRules": "The configuration for test traffic routing rules used during blue/green deployments with Amazon ECS Service Connect. This allows you to route a portion of traffic to the new service revision of your service for testing before shifting all production traffic." }, "AWS::ECS::Service ServiceConnectConfiguration": { + "AccessLogConfiguration": "The configuration for Service Connect access logging. Access logs capture detailed information about requests made to your service, including request patterns, response codes, and timing data. They can be useful for debugging connectivity issues, monitoring service performance, and auditing service-to-service communication for security and compliance purposes.\n\n> To enable access logs, you must also specify a `logConfiguration` in the `serviceConnectConfiguration` .", "Enabled": "Specifies whether to use Service Connect with this service.", "LogConfiguration": "The log configuration for the container. This parameter maps to `LogConfig` in the docker container create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "Namespace": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace for use with Service Connect. The namespace must be in the same AWS Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", @@ -17744,11 +17789,11 @@ "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "Memory": "The amount (in MiB) of memory used by the task.\n\nIf your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified, the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see [ContainerDefinition](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) .\n\nIf your tasks runs on AWS Fargate , this field is required. You must use one of the following values. The value you choose determines your range of valid values for the `cpu` parameter.\n\n- 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available `cpu` values: 256 (.25 vCPU)\n- 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available `cpu` values: 512 (.5 vCPU)\n- 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available `cpu` values: 1024 (1 vCPU)\n- Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available `cpu` values: 2048 (2 vCPU)\n- Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available `cpu` values: 4096 (4 vCPU)\n- Between 16 GB and 60 GB in 4 GB increments - Available `cpu` values: 8192 (8 vCPU)\n\nThis option requires Linux platform `1.4.0` or later.\n- Between 32GB and 120 GB in 8 GB increments - Available `cpu` values: 16384 (16 vCPU)\n\nThis option requires Linux platform `1.4.0` or later.", "NetworkMode": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", - "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the The default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "PlacementConstraints": "An array of placement constraint objects to use for tasks.\n\n> This parameter isn't supported for tasks run on AWS Fargate .", "ProxyConfiguration": "The configuration details for the App Mesh proxy.\n\nYour Amazon ECS container instances require at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the `ecs-init` package to use a proxy configuration. If your container instances are launched from the Amazon ECS optimized AMI version `20190301` or later, they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .", "RequiresCompatibilities": "The task launch types the task definition was validated against. The valid values are `MANAGED_INSTANCES` , `EC2` , `FARGATE` , and `EXTERNAL` . For more information, see [Amazon ECS launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .", - "RuntimePlatform": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", + "RuntimePlatform": "The operating system that your tasks definitions run on.", "Tags": "The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both of them.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "TaskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "Volumes": "The list of data volume definitions for the task. For more information, see [Using data volumes in tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> The `host` and `sourcePath` parameters aren't supported for tasks run on AWS Fargate ." @@ -17759,7 +17804,7 @@ }, "AWS::ECS::TaskDefinition ContainerDefinition": { "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the docker container create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", - "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create command and the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "CredentialSpecs": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", "DependsOn": "The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.\n\nFor tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nIf the task definition is used in a blue/green deployment that uses [AWS::CodeDeploy::DeploymentGroup BlueGreenDeploymentConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-bluegreendeploymentconfiguration.html) , the `dependsOn` parameter is not supported.", "DisableNetworking": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker container create command.\n\n> This parameter is not supported for Windows containers.", @@ -17911,7 +17956,7 @@ "RestartAttemptPeriod": "A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be restarted only once every `restartAttemptPeriod` seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum `restartAttemptPeriod` of 60 seconds and a maximum `restartAttemptPeriod` of 1800 seconds. By default, a container must run for 300 seconds before it can be restarted." }, "AWS::ECS::TaskDefinition RuntimePlatform": { - "CpuArchitecture": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.", + "CpuArchitecture": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance, Amazon ECS Managed Instances, or Linux containers on Fargate.", "OperatingSystemFamily": "The operating system." }, "AWS::ECS::TaskDefinition Secret": { @@ -17972,9 +18017,9 @@ "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified.\n\n> All specified subnets must be from the same VPC." }, "AWS::ECS::TaskSet CapacityProviderStrategyItem": { - "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", - "CapacityProvider": "The short name of the capacity provider.", - "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." + "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "CapacityProvider": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B." }, "AWS::ECS::TaskSet LoadBalancer": { "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", @@ -23502,6 +23547,7 @@ }, "AWS::ImageBuilder::Image": { "ContainerRecipeArn": "The Amazon Resource Name (ARN) of the container recipe that defines how images are configured and tested.", + "DeletionSettings": "", "DistributionConfigurationArn": "The Amazon Resource Name (ARN) of the distribution configuration that defines and configures the outputs of your pipeline.", "EnhancedImageMetadataEnabled": "Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.", "ExecutionRole": "The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access to perform workflow actions.", @@ -23514,6 +23560,9 @@ "Tags": "The tags of the image.", "Workflows": "Contains an array of workflow configuration objects." }, + "AWS::ImageBuilder::Image DeletionSettings": { + "ExecutionRole": "" + }, "AWS::ImageBuilder::Image EcrConfiguration": { "ContainerTags": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "RepositoryName": "The name of the container repository that Amazon Inspector scans to identify findings for your container images. The name includes the path for the repository location. If you don\u2019t provide this information, Image Builder creates a repository in your account named `image-builder-image-scanning-repository` for vulnerability scans of your output container images." @@ -26692,7 +26741,7 @@ "SharePointConfiguration": "Provides the configuration information to connect to Microsoft SharePoint as your data source.", "TemplateConfiguration": "Provides a template for the configuration information to connect to your data source.", "WebCrawlerConfiguration": "Provides the configuration information required for Amazon Kendra Web Crawler.", - "WorkDocsConfiguration": "Provides the configuration information to connect to Amazon WorkDocs as your data source." + "WorkDocsConfiguration": "Provides the configuration information to connect to WorkDocs as your data source." }, "AWS::Kendra::DataSource DataSourceToIndexFieldMapping": { "DataSourceFieldName": "The name of the field in the data source. You must first create the index field using the `UpdateIndex` API.", @@ -26903,11 +26952,11 @@ }, "AWS::Kendra::DataSource WorkDocsConfiguration": { "CrawlComments": "`TRUE` to include comments on documents in your index. Including comments in your index means each comment is a document that can be searched on.\n\nThe default is set to `FALSE` .", - "ExclusionPatterns": "A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", - "FieldMappings": "A list of `DataSourceToIndexFieldMapping` objects that map Amazon WorkDocs data source attributes or field names to Amazon Kendra index field names. To create custom fields, use the `UpdateIndex` API before you map to Amazon WorkDocs fields. For more information, see [Mapping data source fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The Amazon WorkDocs data source field names must exist in your Amazon WorkDocs custom metadata.", - "InclusionPatterns": "A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", - "OrganizationId": "The identifier of the directory corresponding to your Amazon WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.", - "UseChangeLog": "`TRUE` to use the Amazon WorkDocs change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in Amazon WorkDocs." + "ExclusionPatterns": "A list of regular expression patterns to exclude certain files in your WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", + "FieldMappings": "A list of `DataSourceToIndexFieldMapping` objects that map WorkDocs data source attributes or field names to Amazon Kendra index field names. To create custom fields, use the `UpdateIndex` API before you map to WorkDocs fields. For more information, see [Mapping data source fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The WorkDocs data source field names must exist in your WorkDocs custom metadata.", + "InclusionPatterns": "A list of regular expression patterns to include certain files in your WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", + "OrganizationId": "The identifier of the directory corresponding to your WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your WorkDocs site directory has an ID, which is the organization ID. You can also set up a new WorkDocs directory in the AWS Directory Service console and enable a WorkDocs site for the directory in the WorkDocs console.", + "UseChangeLog": "`TRUE` to use the WorkDocs change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in WorkDocs." }, "AWS::Kendra::Faq": { "Description": "A description for the FAQ.", @@ -33119,6 +33168,7 @@ "Value": "The value to use in the custom metric dimension." }, "AWS::NetworkFirewall::FirewallPolicy FirewallPolicy": { + "EnableTLSSessionHolding": "When true, prevents TCP and TLS packets from reaching destination servers until TLS Inspection has evaluated Server Name Indication (SNI) rules. Requires an associated TLS Inspection configuration.", "PolicyVariables": "Contains variables that you can use to override default Suricata settings in your firewall policy.", "StatefulDefaultActions": "The default actions to take on a packet that doesn't match any stateful rules. The stateful default action is optional, and is only valid when using the strict rule order.\n\nValid values of the stateful default action:\n\n- aws:drop_strict\n- aws:drop_established\n- aws:alert_strict\n- aws:alert_established\n\nFor more information, see [Strict evaluation order](https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html#suricata-strict-rule-evaluation-order.html) in the *AWS Network Firewall Developer Guide* .", "StatefulEngineOptions": "Additional options governing how Network Firewall handles stateful rules. The stateful rule groups that you use in your policy must have stateful rule options settings that are compatible with these settings.", @@ -36156,7 +36206,7 @@ "IdentityType": "The authentication type being used by a Amazon Q Business application.", "PersonalizationConfiguration": "Configuration information about chat response personalization. For more information, see [Personalizing chat responses](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/personalizing-chat-responses.html) .", "QAppsConfiguration": "Configuration information about Amazon Q Apps.", - "QuickSightConfiguration": "The Amazon QuickSight configuration for an Amazon Q Business application that uses QuickSight as the identity provider.", + "QuickSightConfiguration": "The Amazon Quick Suite configuration for an Amazon Q Business application that uses Quick Suite as the identity provider.", "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a [service linked role (SLR)](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/using-service-linked-roles.html#slr-permissions) and use it as the application's role.", "Tags": "A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @." }, @@ -36177,7 +36227,7 @@ "QAppsControlMode": "Status information about whether end users can create and use Amazon Q Apps in the web experience." }, "AWS::QBusiness::Application QuickSightConfiguration": { - "ClientNamespace": "The Amazon QuickSight namespace that is used as the identity provider. For more information about QuickSight namespaces, see [Namespace operations](https://docs.aws.amazon.com/quicksight/latest/developerguide/namespace-operations.html) ." + "ClientNamespace": "The Amazon Quick Suite namespace that is used as the identity provider. For more information about Quick Suite namespaces, see [Namespace operations](https://docs.aws.amazon.com/quicksight/latest/developerguide/namespace-operations.html) ." }, "AWS::QBusiness::Application Tag": { "Key": "The key for the tag. Keys are not case sensitive and must be unique for the Amazon Q Business application or data source.", @@ -38841,7 +38891,7 @@ "FilterControls": "The list of filter controls that are on a sheet.\n\nFor more information, see [Adding filter controls to analysis sheets](https://docs.aws.amazon.com/quicksight/latest/user/filter-controls.html) in the *Amazon Quick Suite User Guide* .", "Images": "A list of images on a sheet.", "Layouts": "Layouts define how the components of a sheet are arranged.\n\nFor more information, see [Types of layout](https://docs.aws.amazon.com/quicksight/latest/user/types-of-layout.html) in the *Amazon Quick Suite User Guide* .", - "Name": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "Name": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "ParameterControls": "The list of parameter controls that are on a sheet.\n\nFor more information, see [Using a Control with a Parameter in Amazon Quick Sight](https://docs.aws.amazon.com/quicksight/latest/user/parameters-controls.html) in the *Amazon Quick Suite User Guide* .", "SheetControlLayouts": "The control layouts of the sheet.", "SheetId": "The unique identifier of a sheet.", @@ -41912,7 +41962,7 @@ "FilterControls": "The list of filter controls that are on a sheet.\n\nFor more information, see [Adding filter controls to analysis sheets](https://docs.aws.amazon.com/quicksight/latest/user/filter-controls.html) in the *Amazon Quick Suite User Guide* .", "Images": "A list of images on a sheet.", "Layouts": "Layouts define how the components of a sheet are arranged.\n\nFor more information, see [Types of layout](https://docs.aws.amazon.com/quicksight/latest/user/types-of-layout.html) in the *Amazon Quick Suite User Guide* .", - "Name": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "Name": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "ParameterControls": "The list of parameter controls that are on a sheet.\n\nFor more information, see [Using a Control with a Parameter in Amazon Quick Sight](https://docs.aws.amazon.com/quicksight/latest/user/parameters-controls.html) in the *Amazon Quick Suite User Guide* .", "SheetControlLayouts": "The control layouts of the sheet.", "SheetId": "The unique identifier of a sheet.", @@ -45194,7 +45244,7 @@ "FilterControls": "The list of filter controls that are on a sheet.\n\nFor more information, see [Adding filter controls to analysis sheets](https://docs.aws.amazon.com/quicksight/latest/user/filter-controls.html) in the *Amazon Quick Suite User Guide* .", "Images": "A list of images on a sheet.", "Layouts": "Layouts define how the components of a sheet are arranged.\n\nFor more information, see [Types of layout](https://docs.aws.amazon.com/quicksight/latest/user/types-of-layout.html) in the *Amazon Quick Suite User Guide* .", - "Name": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "Name": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "ParameterControls": "The list of parameter controls that are on a sheet.\n\nFor more information, see [Using a Control with a Parameter in Amazon Quick Sight](https://docs.aws.amazon.com/quicksight/latest/user/parameters-controls.html) in the *Amazon Quick Suite User Guide* .", "SheetControlLayouts": "The control layouts of the sheet.", "SheetId": "The unique identifier of a sheet.", @@ -46542,6 +46592,124 @@ "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, + "AWS::RTBFabric::Link": { + "GatewayId": "The unique identifier of the gateway.", + "HttpResponderAllowed": "Boolean to specify if an HTTP responder is allowed.", + "LinkAttributes": "Attributes of the link.", + "LinkLogSettings": "Settings for the application logs.", + "ModuleConfigurationList": "", + "PeerGatewayId": "The unique identifier of the peer gateway.", + "Tags": "A map of the key-value pairs of the tag or tags to assign to the resource." + }, + "AWS::RTBFabric::Link Action": { + "HeaderTag": "Describes the header tag for a bid action.", + "NoBid": "Describes the parameters of a no bid module." + }, + "AWS::RTBFabric::Link ApplicationLogs": { + "LinkApplicationLogSampling": "Describes a link application log sample." + }, + "AWS::RTBFabric::Link Filter": { + "Criteria": "Describes the criteria for a filter." + }, + "AWS::RTBFabric::Link FilterCriterion": { + "Path": "The path to filter.", + "Values": "The value to filter." + }, + "AWS::RTBFabric::Link HeaderTagAction": { + "Name": "The name of the bid action.", + "Value": "The value of the bid action." + }, + "AWS::RTBFabric::Link LinkApplicationLogSampling": { + "ErrorLog": "An error log entry.", + "FilterLog": "A filter log entry." + }, + "AWS::RTBFabric::Link LinkAttributes": { + "CustomerProvidedId": "The customer-provided unique identifier of the link.", + "ResponderErrorMasking": "Describes the masking for HTTP error codes." + }, + "AWS::RTBFabric::Link LinkLogSettings": { + "ApplicationLogs": "Describes the configuration of a link application log." + }, + "AWS::RTBFabric::Link ModuleConfiguration": { + "DependsOn": "The dependencies of the module.", + "ModuleParameters": "Describes the parameters of a module.", + "Name": "The name of the module.", + "Version": "The version of the module." + }, + "AWS::RTBFabric::Link ModuleParameters": { + "NoBid": "Describes the parameters of a no bid module.", + "OpenRtbAttribute": "Describes the parameters of an open RTB attribute module." + }, + "AWS::RTBFabric::Link NoBidAction": { + "NoBidReasonCode": "The reason code for the no bid action." + }, + "AWS::RTBFabric::Link NoBidModuleParameters": { + "PassThroughPercentage": "The pass through percentage.", + "Reason": "The reason description.", + "ReasonCode": "The reason code." + }, + "AWS::RTBFabric::Link OpenRtbAttributeModuleParameters": { + "Action": "Describes a bid action.", + "FilterConfiguration": "Describes the configuration of a filter.", + "FilterType": "The filter type.", + "HoldbackPercentage": "The hold back percentage." + }, + "AWS::RTBFabric::Link ResponderErrorMaskingForHttpCode": { + "Action": "The action for the error..", + "HttpCode": "The HTTP error code.", + "LoggingTypes": "The error log type.", + "ResponseLoggingPercentage": "The percentage of response logging." + }, + "AWS::RTBFabric::Link Tag": { + "Key": "The key name of the tag.", + "Value": "The value for the tag." + }, + "AWS::RTBFabric::RequesterGateway": { + "Description": "An optional description for the requester gateway.", + "SecurityGroupIds": "The unique identifiers of the security groups.", + "SubnetIds": "The unique identifiers of the subnets.", + "Tags": "A map of the key-value pairs of the tag or tags to assign to the resource.", + "VpcId": "The unique identifier of the Virtual Private Cloud (VPC)." + }, + "AWS::RTBFabric::RequesterGateway Tag": { + "Key": "The key name of the tag.", + "Value": "The value for the tag." + }, + "AWS::RTBFabric::ResponderGateway": { + "Description": "An optional description for the responder gateway.", + "DomainName": "The domain name for the responder gateway.", + "ManagedEndpointConfiguration": "The configuration for the managed endpoint.", + "Port": "The networking port to use.", + "Protocol": "The networking protocol to use.", + "SecurityGroupIds": "The unique identifiers of the security groups.", + "SubnetIds": "The unique identifiers of the subnets.", + "Tags": "A map of the key-value pairs of the tag or tags to assign to the resource.", + "TrustStoreConfiguration": "The configuration of the trust store.", + "VpcId": "The unique identifier of the Virtual Private Cloud (VPC)." + }, + "AWS::RTBFabric::ResponderGateway AutoScalingGroupsConfiguration": { + "AutoScalingGroupNameList": "The names of the auto scaling group.", + "RoleArn": "The role ARN of the auto scaling group." + }, + "AWS::RTBFabric::ResponderGateway EksEndpointsConfiguration": { + "ClusterApiServerCaCertificateChain": "The CA certificate chain of the cluster API server.", + "ClusterApiServerEndpointUri": "The URI of the cluster API server endpoint.", + "ClusterName": "The name of the cluster.", + "EndpointsResourceName": "The name of the endpoint resource.", + "EndpointsResourceNamespace": "The namespace of the endpoint resource.", + "RoleArn": "The role ARN for the cluster." + }, + "AWS::RTBFabric::ResponderGateway ManagedEndpointConfiguration": { + "AutoScalingGroupsConfiguration": "Describes the configuration of an auto scaling group.", + "EksEndpointsConfiguration": "Describes the configuration of an Amazon Elastic Kubernetes Service endpoint." + }, + "AWS::RTBFabric::ResponderGateway Tag": { + "Key": "The key name of the tag.", + "Value": "The value for the tag." + }, + "AWS::RTBFabric::ResponderGateway TrustStoreConfiguration": { + "CertificateAuthorityCertificates": "The certificate authority certificate." + }, "AWS::RUM::AppMonitor": { "AppMonitorConfiguration": "A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include `AppMonitorConfiguration` , you must set up your own authorization method. For more information, see [Authorize your application to send data to AWS](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html) .\n\nIf you omit this argument, the sample rate used for CloudWatch RUM is set to 10% of the user sessions.", "CustomEvents": "Specifies whether this app monitor allows the web client to define and send custom events. If you omit this parameter, custom events are `DISABLED` .", @@ -48513,6 +48681,31 @@ "ResourcePolicy": "The `JSON` that defines the policy.", "TableARN": "The Amazon Resource Name (ARN) of the table." }, + "AWS::S3Vectors::Index": { + "DataType": "The data type of the vectors to be inserted into the vector index. Currently, only `float32` is supported, which represents 32-bit floating-point numbers.", + "Dimension": "The dimensions of the vectors to be inserted into the vector index. This value must be between 1 and 4096, inclusive. All vectors stored in the index must have the same number of dimensions.\n\nThe dimension value affects the storage requirements and search performance. Higher dimensions require more storage space and may impact search latency.", + "DistanceMetric": "The distance metric to be used for similarity search. Valid values are:\n\n- `cosine` - Measures the cosine of the angle between two vectors.\n- `euclidean` - Measures the straight-line distance between two points in multi-dimensional space. Lower values indicate greater similarity.", + "IndexName": "The name of the vector index to create. The index name must be between 3 and 63 characters long and can contain only lowercase letters, numbers, hyphens (-), and dots (.). The index name must be unique within the vector bucket.\n\nIf you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the index name.\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "MetadataConfiguration": "The metadata configuration for the vector index.", + "VectorBucketArn": "The Amazon Resource Name (ARN) of the vector bucket that contains the vector index.", + "VectorBucketName": "The name of the vector bucket that contains the vector index." + }, + "AWS::S3Vectors::Index MetadataConfiguration": { + "NonFilterableMetadataKeys": "Non-filterable metadata keys allow you to enrich vectors with additional context during storage and retrieval. Unlike default metadata keys, these keys can't be used as query filters. Non-filterable metadata keys can be retrieved but can't be searched, queried, or filtered. You can access non-filterable metadata keys of your vectors after finding the vectors.\n\nYou can specify 1 to 10 non-filterable metadata keys. Each key must be 1 to 63 characters long." + }, + "AWS::S3Vectors::VectorBucket": { + "EncryptionConfiguration": "The encryption configuration for the vector bucket.", + "VectorBucketName": "A name for the vector bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). The bucket name must be unique in the same AWS account for each AWS Region. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name.\n\nThe bucket name must be between 3 and 63 characters long and must not contain uppercase characters or underscores.\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name." + }, + "AWS::S3Vectors::VectorBucket EncryptionConfiguration": { + "KmsKeyArn": "AWS Key Management Service (KMS) customer managed key ARN to use for the encryption configuration. This parameter is required if and only if `SseType` is set to `aws:kms` .\n\nYou must specify the full ARN of the KMS key. Key IDs or key aliases aren't supported.\n\n> Amazon S3 Vectors only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", + "SseType": "The server-side encryption type to use for the encryption configuration of the vector bucket. Valid values are `AES256` for Amazon S3 managed keys and `aws:kms` for AWS KMS keys." + }, + "AWS::S3Vectors::VectorBucketPolicy": { + "Policy": "A policy document containing permissions to add to the specified vector bucket. In IAM , you must provide policy documents in JSON format. However, in CloudFormation you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to IAM .", + "VectorBucketArn": "The Amazon Resource Name (ARN) of the S3 vector bucket to which the policy applies.", + "VectorBucketName": "The name of the S3 vector bucket to which the policy applies." + }, "AWS::SDB::Domain": { "Description": "Information about the SimpleDB domain." }, @@ -51470,7 +51663,7 @@ "KmsKeyId": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", "LifecycleConfigName": "The name of a lifecycle configuration to associate with the notebook instance. For information about lifecycle configurations, see [Customize a Notebook Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html) in the *Amazon SageMaker Developer Guide* .", "NotebookInstanceName": "The name of the new notebook instance.", - "PlatformIdentifier": "The platform identifier of the notebook instance runtime environment.", + "PlatformIdentifier": "The platform identifier of the notebook instance runtime environment. The default value is `notebook-al2-v2` .", "RoleArn": "When you send any requests to AWS resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker AI Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker AI, the caller of this API must have the `iam:PassRole` permission.", "RootAccess": "Whether root access is enabled or disabled for users of the notebook instance. The default value is `Enabled` .\n\n> Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users.", "SecurityGroupIds": "The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.", @@ -51630,7 +51823,7 @@ "AWS::SageMaker::ProcessingJob S3Input": { "LocalPath": "The local path in your container where you want Amazon SageMaker to write input data to. `LocalPath` is an absolute path to the input data and must begin with `/opt/ml/processing/` . `LocalPath` is a required parameter when `AppManaged` is `False` (default).", "S3CompressionType": "Whether to GZIP-decompress the data in Amazon S3 as it is streamed into the processing container. `Gzip` can only be used when `Pipe` mode is specified as the `S3InputMode` . In `Pipe` mode, Amazon SageMaker streams input data from the source directly to your container without using the EBS volume.", - "S3DataDistributionType": "Whether to distribute the data from Amazon S3 to all processing instances with `FullyReplicated` , or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.", + "S3DataDistributionType": "Whether to distribute the data from Amazon S3 to all processing instances with `FullyReplicated` , or whether the data from Amazon S3 is sharded by Amazon S3 key, downloading one shard of data to each processing instance.", "S3DataType": "Whether you use an `S3Prefix` or a `ManifestFile` for the data type. If you choose `S3Prefix` , `S3Uri` identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for the processing job. If you choose `ManifestFile` , `S3Uri` identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for the processing job.", "S3InputMode": "Whether to use `File` or `Pipe` input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In `Pipe` mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.", "S3Uri": "The URI of the Amazon S3 prefix Amazon SageMaker downloads data required to run a processing job." @@ -53342,6 +53535,8 @@ "AWS::Transfer::Connector": { "AccessRole": "Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the AWS Identity and Access Management role to use.\n\n*For AS2 connectors*\n\nWith AS2, you can send files by calling `StartFileTransfer` and specifying the file paths in the request parameter, `SendFilePaths` . We use the file\u2019s parent directory (for example, for `--send-file-paths /bucket/dir/file.txt` , parent directory is `/bucket/dir/` ) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the `AccessRole` needs to provide read and write access to the parent directory of the file location used in the `StartFileTransfer` request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with `StartFileTransfer` .\n\nIf you are using Basic authentication for your AS2 connector, the access role requires the `secretsmanager:GetSecretValue` permission for the secret. If the secret is encrypted using a customer-managed key instead of the AWS managed key in Secrets Manager, then the role also needs the `kms:Decrypt` permission for that key.\n\n*For SFTP connectors*\n\nMake sure that the access role provides read and write access to the parent directory of the file location that's used in the `StartFileTransfer` request. Additionally, make sure that the role provides `secretsmanager:GetSecretValue` permission to AWS Secrets Manager .", "As2Config": "A structure that contains the parameters for an AS2 connector object.", + "EgressConfig": "Current egress configuration of the connector, showing how traffic is routed to the SFTP server. Contains VPC Lattice settings when using VPC_LATTICE egress type.\n\nWhen using the VPC_LATTICE egress type, AWS Transfer Family uses a managed Service Network to simplify the resource sharing process.", + "EgressType": "Type of egress configuration for the connector. SERVICE_MANAGED uses Transfer Family managed NAT gateways, while VPC_LATTICE routes traffic through customer VPCs using VPC Lattice.", "LoggingRole": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.", "SecurityPolicyName": "The text name of the security policy for the specified connector.", "SftpConfig": "A structure that contains the parameters for an SFTP connector object.", @@ -53360,6 +53555,13 @@ "PreserveContentType": "", "SigningAlgorithm": "The algorithm that is used to sign the AS2 messages sent with the connector." }, + "AWS::Transfer::Connector ConnectorEgressConfig": { + "VpcLattice": "VPC_LATTICE configuration for routing connector traffic through customer VPCs. Enables private connectivity to SFTP servers without requiring public internet access or complex network configurations." + }, + "AWS::Transfer::Connector ConnectorVpcLatticeEgressConfig": { + "PortNumber": "Port number for connecting to the SFTP server through VPC_LATTICE. Defaults to 22 if not specified. Must match the port on which the target SFTP server is listening.", + "ResourceConfigurationArn": "ARN of the VPC_LATTICE Resource Configuration that defines the target SFTP server location. Must point to a valid Resource Configuration in the customer's VPC with appropriate network connectivity to the SFTP server." + }, "AWS::Transfer::Connector SftpConfig": { "MaxConcurrentConnections": "Specify the number of concurrent connections that your connector creates to the remote server. The default value is `1` . The maximum values is `5` .\n\n> If you are using the AWS Management Console , the default value is `5` . \n\nThis parameter specifies the number of active connections that your connector can establish with the remote server at the same time. Increasing this value can enhance connector performance when transferring large file batches by enabling parallel operations.", "TrustedHostKeys": "The public portion of the host key, or keys, that are used to identify the external server to which you are connecting. You can use the `ssh-keyscan` command against the SFTP server to retrieve the necessary key.\n\n> `TrustedHostKeys` is optional for `CreateConnector` . If not provided, you can use `TestConnection` to retrieve the server host key during the initial connection attempt, and subsequently update the connector with the observed host key. \n\nWhen creating connectors with egress config (VPC_LATTICE type connectors), since host name is not something we can verify, the only accepted trusted host key format is `key-type key-body` without the host name. For example: `ssh-rsa AAAAB3Nza...`\n\nThe three standard SSH public key format elements are `` , `` , and an optional `` , with spaces between each element. Specify only the `` and `` : do not enter the `` portion of the key.\n\nFor the trusted host key, AWS Transfer Family accepts RSA and ECDSA keys.\n\n- For RSA keys, the `` string is `ssh-rsa` .\n- For ECDSA keys, the `` string is either `ecdsa-sha2-nistp256` , `ecdsa-sha2-nistp384` , or `ecdsa-sha2-nistp521` , depending on the size of the key you generated.\n\nRun this command to retrieve the SFTP server host key, where your SFTP server name is `ftp.host.com` .\n\n`ssh-keyscan ftp.host.com`\n\nThis prints the public host key to standard output.\n\n`ftp.host.com ssh-rsa AAAAB3Nza...`\n\nCopy and paste this string into the `TrustedHostKeys` field for the `create-connector` command or into the *Trusted host keys* field in the console.\n\nFor VPC Lattice type connectors (VPC_LATTICE), remove the hostname from the key and use only the `key-type key-body` format. In this example, it should be: `ssh-rsa AAAAB3Nza...`", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index a90540b59..e72392592 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -18086,7 +18086,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", + "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "title": "InstanceType", "type": "string" }, @@ -18332,7 +18332,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge", + "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n- stream.graphics.g6f.large\n- stream.graphics.g6f.xlarge\n- stream.graphics.g6f.2xlarge\n- stream.graphics.g6f.4xlarge\n- stream.graphics.gr6f.4xlarge", "title": "InstanceType", "type": "string" }, @@ -33141,7 +33141,7 @@ "items": { "type": "string" }, - "markdownDescription": "The columns within the underlying AWS Glue table that can be utilized within collaborations.", + "markdownDescription": "The columns within the underlying AWS Glue table that can be used within collaborations.", "title": "AllowedColumns", "type": "array" }, @@ -83091,17 +83091,17 @@ "additionalProperties": false, "properties": { "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", "title": "Base", "type": "number" }, "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", + "markdownDescription": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", "title": "CapacityProvider", "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", "title": "Weight", "type": "number" } @@ -83287,17 +83287,17 @@ "additionalProperties": false, "properties": { "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", "title": "Base", "type": "number" }, "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", + "markdownDescription": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", "title": "CapacityProvider", "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", "title": "Weight", "type": "number" } @@ -83611,17 +83611,17 @@ "additionalProperties": false, "properties": { "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- Default value is `0` if not specified\n- Valid range: 0 to 100,000\n- Base requirements are satisfied first before weight distribution", + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider for each service. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.\n\nBase value characteristics:\n\n- Only one capacity provider in a strategy can have a base defined\n- The default value is `0` if not specified\n- The valid range is 0 to 100,000\n- Base requirements are satisfied first before weight distribution", "title": "Base", "type": "number" }, "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", + "markdownDescription": "The short name of the capacity provider. This can be either an AWS managed capacity provider ( `FARGATE` or `FARGATE_SPOT` ) or the name of a custom capacity provider that you created.", "title": "CapacityProvider", "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- Default value is `0` if not specified\n- Valid range: 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nWeight value characteristics:\n\n- Weight is considered after the base value is satisfied\n- The default value is `0` if not specified\n- The valid range is 0 to 1,000\n- At least one capacity provider must have a weight greater than zero\n- Capacity providers with weight of `0` cannot place tasks\n\nTask distribution logic:\n\n- Base satisfaction: The minimum number of tasks specified by the base value are placed on that capacity provider\n- Weight distribution: After base requirements are met, additional tasks are distributed according to weight ratios\n\nExamples:\n\nEqual Distribution: Two capacity providers both with weight `1` will split tasks evenly after base requirements are met.\n\nWeighted Distribution: If capacityProviderA has weight `1` and capacityProviderB has weight `4` , then for every 1 task on A, 4 tasks will run on B.", "title": "Weight", "type": "number" } @@ -84202,7 +84202,7 @@ "type": "string" }, "PidMode": { - "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the The default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "title": "PidMode", "type": "string" }, @@ -84229,7 +84229,7 @@ }, "RuntimePlatform": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.RuntimePlatform", - "markdownDescription": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", + "markdownDescription": "The operating system that your tasks definitions run on.", "title": "RuntimePlatform" }, "Tags": { @@ -84304,7 +84304,7 @@ "type": "array" }, "Cpu": { - "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create command and the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "title": "Cpu", "type": "number" }, @@ -85083,7 +85083,7 @@ "additionalProperties": false, "properties": { "CpuArchitecture": { - "markdownDescription": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.", + "markdownDescription": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance, Amazon ECS Managed Instances, or Linux containers on Fargate.", "title": "CpuArchitecture", "type": "string" }, @@ -134916,7 +134916,7 @@ }, "WorkDocsConfiguration": { "$ref": "#/definitions/AWS::Kendra::DataSource.WorkDocsConfiguration", - "markdownDescription": "Provides the configuration information to connect to Amazon WorkDocs as your data source.", + "markdownDescription": "Provides the configuration information to connect to WorkDocs as your data source.", "title": "WorkDocsConfiguration" } }, @@ -136022,7 +136022,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", + "markdownDescription": "A list of regular expression patterns to exclude certain files in your WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", "title": "ExclusionPatterns", "type": "array" }, @@ -136030,7 +136030,7 @@ "items": { "$ref": "#/definitions/AWS::Kendra::DataSource.DataSourceToIndexFieldMapping" }, - "markdownDescription": "A list of `DataSourceToIndexFieldMapping` objects that map Amazon WorkDocs data source attributes or field names to Amazon Kendra index field names. To create custom fields, use the `UpdateIndex` API before you map to Amazon WorkDocs fields. For more information, see [Mapping data source fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The Amazon WorkDocs data source field names must exist in your Amazon WorkDocs custom metadata.", + "markdownDescription": "A list of `DataSourceToIndexFieldMapping` objects that map WorkDocs data source attributes or field names to Amazon Kendra index field names. To create custom fields, use the `UpdateIndex` API before you map to WorkDocs fields. For more information, see [Mapping data source fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The WorkDocs data source field names must exist in your WorkDocs custom metadata.", "title": "FieldMappings", "type": "array" }, @@ -136038,17 +136038,17 @@ "items": { "type": "string" }, - "markdownDescription": "A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", + "markdownDescription": "A list of regular expression patterns to include certain files in your WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the file isn't included in the index.", "title": "InclusionPatterns", "type": "array" }, "OrganizationId": { - "markdownDescription": "The identifier of the directory corresponding to your Amazon WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.", + "markdownDescription": "The identifier of the directory corresponding to your WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your WorkDocs site directory has an ID, which is the organization ID. You can also set up a new WorkDocs directory in the AWS Directory Service console and enable a WorkDocs site for the directory in the WorkDocs console.", "title": "OrganizationId", "type": "string" }, "UseChangeLog": { - "markdownDescription": "`TRUE` to use the Amazon WorkDocs change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in Amazon WorkDocs.", + "markdownDescription": "`TRUE` to use the WorkDocs change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in WorkDocs.", "title": "UseChangeLog", "type": "boolean" } @@ -192806,7 +192806,7 @@ "type": "array" }, "Name": { - "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "title": "Name", "type": "string" }, @@ -205350,7 +205350,7 @@ "type": "array" }, "Name": { - "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "title": "Name", "type": "string" }, @@ -219730,7 +219730,7 @@ "type": "array" }, "Name": { - "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Amazon QuickSight console.", + "markdownDescription": "The name of the sheet. This name is displayed on the sheet's tab in the Quick Suite console.", "title": "Name", "type": "string" }, @@ -252047,7 +252047,7 @@ "type": "string" }, "PlatformIdentifier": { - "markdownDescription": "The platform identifier of the notebook instance runtime environment.", + "markdownDescription": "The platform identifier of the notebook instance runtime environment. The default value is `notebook-al2-v2` .", "title": "PlatformIdentifier", "type": "string" },