Skip to content

Commit ddfdbdd

Browse files
committed
resource tencentcloud_kubernetes_cluster add node_pool_global_config
1 parent dcd423e commit ddfdbdd

File tree

3 files changed

+182
-9
lines changed

3 files changed

+182
-9
lines changed

tencentcloud/resource_tc_kubernetes_cluster.go

Lines changed: 168 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ import (
207207
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
208208
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
209209
cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312"
210+
tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525"
210211
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
211212
)
212213

@@ -471,6 +472,65 @@ func TkeCvmCreateInfo() map[string]*schema.Schema {
471472
}
472473
}
473474

475+
func TkeNodePoolGlobalConfig() map[string]*schema.Schema {
476+
return map[string]*schema.Schema{
477+
"is_scale_in_enabled": {
478+
Type: schema.TypeBool,
479+
Optional: true,
480+
Computed: true,
481+
Description: "Indicates whether to enable scale-in.",
482+
},
483+
"expander": {
484+
Type: schema.TypeString,
485+
Optional: true,
486+
Computed: true,
487+
Description: "Indicates which scale-out method will be used when there are multiple scaling groups. Valid values: `random` - select a random scaling group, `most-pods` - select the scaling group that can schedule the most pods, `least-waste` - select the scaling group that can ensure the fewest remaining resources after Pod scheduling.",
488+
},
489+
"max_concurrent_scale_in": {
490+
Type: schema.TypeInt,
491+
Optional: true,
492+
Computed: true,
493+
Description: "Max concurrent scale-in volume.",
494+
},
495+
"scale_in_delay": {
496+
Type: schema.TypeInt,
497+
Optional: true,
498+
Computed: true,
499+
Description: "Number of minutes after cluster scale-out when the system starts judging whether to perform scale-in.",
500+
},
501+
"scale_in_unneeded_time": {
502+
Type: schema.TypeInt,
503+
Optional: true,
504+
Computed: true,
505+
Description: "Number of consecutive minutes of idleness after which the node is subject to scale-in.",
506+
},
507+
"scale_in_utilization_threshold": {
508+
Type: schema.TypeInt,
509+
Optional: true,
510+
Computed: true,
511+
Description: "Percentage of node resource usage below which the node is considered to be idle.",
512+
},
513+
"ignore_daemon_sets_utilization": {
514+
Type: schema.TypeBool,
515+
Optional: true,
516+
Computed: true,
517+
Description: "Whether to ignore DaemonSet pods by default when calculating resource usage.",
518+
},
519+
"skip_nodes_with_local_storage": {
520+
Type: schema.TypeBool,
521+
Optional: true,
522+
Computed: true,
523+
Description: "During scale-in, ignore nodes with local storage pods.",
524+
},
525+
"skip_nodes_with_system_pods": {
526+
Type: schema.TypeBool,
527+
Optional: true,
528+
Computed: true,
529+
Description: "During scale-in, ignore nodes with pods in the kube-system namespace that are not managed by DaemonSet.",
530+
},
531+
}
532+
}
533+
474534
func resourceTencentCloudTkeCluster() *schema.Resource {
475535
schemaBody := map[string]*schema.Schema{
476536
"cluster_name": {
@@ -536,6 +596,15 @@ func resourceTencentCloudTkeCluster() *schema.Resource {
536596
Default: false,
537597
Description: "Indicates whether to enable cluster node auto scaler.",
538598
},
599+
"node_pool_global_config": {
600+
Type: schema.TypeList,
601+
Optional: true,
602+
Computed: true,
603+
Elem: &schema.Resource{
604+
Schema: TkeNodePoolGlobalConfig(),
605+
},
606+
Description: "Global config effective for all node pools.",
607+
},
539608
"cluster_extra_args": {
540609
Type: schema.TypeList,
541610
ForceNew: true,
@@ -1098,6 +1167,43 @@ func tkeGetCvmRunInstancesPara(dMap map[string]interface{}, meta interface{},
10981167
return
10991168
}
11001169

1170+
func tkeGetNodePoolGlobalConfig(d *schema.ResourceData) *tke.ModifyClusterAsGroupOptionAttributeRequest {
1171+
request := tke.NewModifyClusterAsGroupOptionAttributeRequest()
1172+
request.ClusterId = helper.String(d.Id())
1173+
1174+
clusterAsGroupOption := &tke.ClusterAsGroupOption{}
1175+
if v, ok := d.GetOk("node_pool_global_config.0.is_scale_in_enabled"); ok {
1176+
clusterAsGroupOption.IsScaleDownEnabled = helper.Bool(v.(bool))
1177+
}
1178+
if v, ok := d.GetOk("node_pool_global_config.0.expander"); ok {
1179+
clusterAsGroupOption.Expander = helper.String(v.(string))
1180+
}
1181+
if v, ok := d.GetOk("node_pool_global_config.0.max_concurrent_scale_in"); ok {
1182+
clusterAsGroupOption.MaxEmptyBulkDelete = helper.IntInt64(v.(int))
1183+
}
1184+
if v, ok := d.GetOk("node_pool_global_config.0.scale_in_delay"); ok {
1185+
clusterAsGroupOption.ScaleDownDelay = helper.IntInt64(v.(int))
1186+
}
1187+
if v, ok := d.GetOk("node_pool_global_config.0.scale_in_unneeded_time"); ok {
1188+
clusterAsGroupOption.ScaleDownUnneededTime = helper.IntInt64(v.(int))
1189+
}
1190+
if v, ok := d.GetOk("node_pool_global_config.0.scale_in_utilization_threshold"); ok {
1191+
clusterAsGroupOption.ScaleDownUtilizationThreshold = helper.IntInt64(v.(int))
1192+
}
1193+
if v, ok := d.GetOk("node_pool_global_config.0.ignore_daemon_sets_utilization"); ok {
1194+
clusterAsGroupOption.IgnoreDaemonSetsUtilization = helper.Bool(v.(bool))
1195+
}
1196+
if v, ok := d.GetOk("node_pool_global_config.0.skip_nodes_with_local_storage"); ok {
1197+
clusterAsGroupOption.SkipNodesWithLocalStorage = helper.Bool(v.(bool))
1198+
}
1199+
if v, ok := d.GetOk("node_pool_global_config.0.skip_nodes_with_system_pods"); ok {
1200+
clusterAsGroupOption.SkipNodesWithSystemPods = helper.Bool(v.(bool))
1201+
}
1202+
1203+
request.ClusterAsGroupOption = clusterAsGroupOption
1204+
return request
1205+
}
1206+
11011207
func resourceTencentCloudTkeClusterCreate(d *schema.ResourceData, meta interface{}) error {
11021208
defer logElapsed("resource.tencentcloud_kubernetes_cluster.create")()
11031209

@@ -1436,6 +1542,21 @@ func resourceTencentCloudTkeClusterCreate(d *schema.ResourceData, meta interface
14361542
}
14371543
}
14381544

1545+
//Modify node pool global config
1546+
if _, ok := d.GetOk("node_pool_global_config"); ok {
1547+
request := tkeGetNodePoolGlobalConfig(d)
1548+
err = resource.Retry(writeRetryTimeout, func() *resource.RetryError {
1549+
inErr := service.ModifyClusterNodePoolGlobalConfig(ctx, request)
1550+
if inErr != nil {
1551+
return retryError(inErr)
1552+
}
1553+
return nil
1554+
})
1555+
if err != nil {
1556+
return err
1557+
}
1558+
}
1559+
14391560
if err = resourceTencentCloudTkeClusterRead(d, meta); err != nil {
14401561
log.Printf("[WARN]%s resource.kubernetes_cluster.read after create fail , %s", logId, err.Error())
14411562
return err
@@ -1588,6 +1709,37 @@ func resourceTencentCloudTkeClusterRead(d *schema.ResourceData, meta interface{}
15881709
_ = d.Set("cluster_intranet", true)
15891710
}
15901711

1712+
var globalConfig *tke.ClusterAsGroupOption
1713+
err = resource.Retry(readRetryTimeout, func() *resource.RetryError {
1714+
globalConfig, err = service.DescribeClusterNodePoolGlobalConfig(ctx, d.Id())
1715+
if e, ok := err.(*errors.TencentCloudSDKError); ok {
1716+
if e.GetCode() == "InternalError.ClusterNotFound" {
1717+
return nil
1718+
}
1719+
}
1720+
if err != nil {
1721+
return resource.RetryableError(err)
1722+
}
1723+
return nil
1724+
})
1725+
if err != nil {
1726+
return err
1727+
}
1728+
1729+
if globalConfig != nil {
1730+
temp := make(map[string]interface{})
1731+
temp["is_scale_in_enabled"] = globalConfig.IsScaleDownEnabled
1732+
temp["expander"] = globalConfig.Expander
1733+
temp["max_concurrent_scale_in"] = globalConfig.MaxEmptyBulkDelete
1734+
temp["scale_in_delay"] = globalConfig.ScaleDownDelay
1735+
temp["scale_in_unneeded_time"] = globalConfig.ScaleDownUnneededTime
1736+
temp["scale_in_utilization_threshold"] = globalConfig.ScaleDownUtilizationThreshold
1737+
temp["ignore_daemon_sets_utilization"] = globalConfig.IgnoreDaemonSetsUtilization
1738+
temp["skip_nodes_with_local_storage"] = globalConfig.SkipNodesWithLocalStorage
1739+
temp["skip_nodes_with_system_pods"] = globalConfig.SkipNodesWithSystemPods
1740+
1741+
_ = d.Set("node_pool_global_config", []map[string]interface{}{temp})
1742+
}
15911743
return nil
15921744
}
15931745

@@ -1908,6 +2060,22 @@ func resourceTencentCloudTkeClusterUpdate(d *schema.ResourceData, meta interface
19082060
}
19092061
}
19102062

2063+
// update node pool global config
2064+
if d.HasChange("node_pool_global_config") {
2065+
request := tkeGetNodePoolGlobalConfig(d)
2066+
err := resource.Retry(writeRetryTimeout, func() *resource.RetryError {
2067+
inErr := tkeService.ModifyClusterNodePoolGlobalConfig(ctx, request)
2068+
if inErr != nil {
2069+
return retryError(inErr)
2070+
}
2071+
return nil
2072+
})
2073+
if err != nil {
2074+
return err
2075+
}
2076+
d.SetPartial("node_pool_global_config")
2077+
}
2078+
19112079
d.Partial(false)
19122080
if err := resourceTencentCloudTkeClusterRead(d, meta); err != nil {
19132081
log.Printf("[WARN]%s resource.kubernetes_cluster.read after update fail , %s", logId, err.Error())

tencentcloud/service_tencentcloud_tke.go

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,21 +1113,13 @@ func (me *TkeService) DescribeNodePool(ctx context.Context, clusterId string, no
11131113
}
11141114

11151115
//node pool global config
1116-
func (me *TkeService) ModifyClusterNodePoolGlobalConfig(ctx context.Context, clusterId string, isScaleDown bool, expanderStrategy string) (errRet error) {
1117-
1116+
func (me *TkeService) ModifyClusterNodePoolGlobalConfig(ctx context.Context, request *tke.ModifyClusterAsGroupOptionAttributeRequest) (errRet error) {
11181117
logId := getLogId(ctx)
1119-
request := tke.NewModifyClusterAsGroupOptionAttributeRequest()
1120-
11211118
defer func() {
11221119
if errRet != nil {
11231120
log.Printf("[CRITAL]%s api[%s] fail, reason[%s]\n", logId, request.GetAction(), errRet.Error())
11241121
}
11251122
}()
1126-
request.ClusterId = &clusterId
1127-
request.ClusterAsGroupOption = &tke.ClusterAsGroupOption{
1128-
IsScaleDownEnabled: &isScaleDown,
1129-
Expander: &expanderStrategy,
1130-
}
11311123

11321124
ratelimit.Check(request.GetAction())
11331125
_, err := me.client.UseTkeClient().ModifyClusterAsGroupOptionAttribute(request)

website/docs/r/kubernetes_cluster.html.markdown

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,7 @@ The following arguments are supported:
237237
* `mount_target` - (Optional, ForceNew) Mount target. Default is not mounting.
238238
* `network_type` - (Optional, ForceNew) Cluster network type, GR or VPC-CNI. Default is GR.
239239
* `node_name_type` - (Optional, ForceNew) Node name type of Cluster, the available values include: 'lan-ip' and 'hostname', Default is 'lan-ip'.
240+
* `node_pool_global_config` - (Optional) Global config effective for all node pools.
240241
* `project_id` - (Optional) Project ID, default value is 0.
241242
* `service_cidr` - (Optional, ForceNew) A network address block of the service. Different from vpc cidr and cidr of other clusters within this vpc. Must be in 10./192.168/172.[16-31] segments.
242243
* `tags` - (Optional) The tags of the cluster.
@@ -280,6 +281,18 @@ The `master_config` object supports the following:
280281
* `system_disk_type` - (Optional, ForceNew) System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_BASIC`: HDD cloud disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `LOCAL_BASIC` and `LOCAL_SSD` are deprecated.
281282
* `user_data` - (Optional, ForceNew) ase64-encoded User Data text, the length limit is 16KB.
282283

284+
The `node_pool_global_config` object supports the following:
285+
286+
* `expander` - (Optional) Indicates which scale-out method will be used when there are multiple scaling groups. Valid values: `random` - select a random scaling group, `most-pods` - select the scaling group that can schedule the most pods, `least-waste` - select the scaling group that can ensure the fewest remaining resources after Pod scheduling.
287+
* `ignore_daemon_sets_utilization` - (Optional) Whether to ignore DaemonSet pods by default when calculating resource usage.
288+
* `is_scale_in_enabled` - (Optional) Indicates whether to enable scale-in.
289+
* `max_concurrent_scale_in` - (Optional) Max concurrent scale-in volume.
290+
* `scale_in_delay` - (Optional) Number of minutes after cluster scale-out when the system starts judging whether to perform scale-in.
291+
* `scale_in_unneeded_time` - (Optional) Number of consecutive minutes of idleness after which the node is subject to scale-in.
292+
* `scale_in_utilization_threshold` - (Optional) Percentage of node resource usage below which the node is considered to be idle.
293+
* `skip_nodes_with_local_storage` - (Optional) During scale-in, ignore nodes with local storage pods.
294+
* `skip_nodes_with_system_pods` - (Optional) During scale-in, ignore nodes with pods in the kube-system namespace that are not managed by DaemonSet.
295+
283296
The `worker_config` object supports the following:
284297

285298
* `instance_type` - (Required, ForceNew) Specified types of CVM instance.

0 commit comments

Comments
 (0)