Skip to content

Commit ab6a418

Browse files
authored
fix: nodepool support tag specifications (#1317)
* fix: nodepool support tag specifications * changelog 1317
1 parent 3e199cc commit ab6a418

File tree

5 files changed

+46
-2
lines changed

5 files changed

+46
-2
lines changed

.changelog/1317.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
resource/tencentcloud_kubernetes_node_pool: Support tag specifications
3+
```

tencentcloud/resource_tc_kubernetes_node_pool.go

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -559,6 +559,11 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource {
559559
Description: "Policy of scaling group termination. Available values: `[\"OLDEST_INSTANCE\"]`, `[\"NEWEST_INSTANCE\"]`.",
560560
Elem: &schema.Schema{Type: schema.TypeString},
561561
},
562+
"tags": {
563+
Type: schema.TypeMap,
564+
Optional: true,
565+
Description: "Node pool tag specifications, will passthroughs to the scaling instances.",
566+
},
562567
//computed
563568
"status": {
564569
Type: schema.TypeString,
@@ -1031,6 +1036,15 @@ func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) er
10311036
_ = d.Set("node_os_type", nodePool.OsCustomizeType)
10321037
}
10331038

1039+
if tags := nodePool.Tags; tags != nil {
1040+
tagMap := make(map[string]string)
1041+
for i := range tags {
1042+
tag := tags[i]
1043+
tagMap[*tag.Key] = *tag.Value
1044+
}
1045+
_ = d.Set("tags", tagMap)
1046+
}
1047+
10341048
//if nodePool.DeletionProtection != nil {
10351049
// _ = d.Set("deletion_protection", nodePool.DeletionProtection)
10361050
//}
@@ -1358,8 +1372,9 @@ func resourceKubernetesNodePoolUpdate(d *schema.ResourceData, meta interface{})
13581372
nodeOsType := d.Get("node_os_type").(string)
13591373
labels := GetTkeLabels(d, "labels")
13601374
taints := GetTkeTaints(d, "taints")
1375+
tags := helper.GetTags(d, "tags")
13611376
err := resource.Retry(writeRetryTimeout, func() *resource.RetryError {
1362-
errRet := service.ModifyClusterNodePool(ctx, clusterId, nodePoolId, name, enableAutoScale, minSize, maxSize, nodeOs, nodeOsType, labels, taints)
1377+
errRet := service.ModifyClusterNodePool(ctx, clusterId, nodePoolId, name, enableAutoScale, minSize, maxSize, nodeOs, nodeOsType, labels, taints, tags)
13631378
if errRet != nil {
13641379
return retryError(errRet)
13651380
}

tencentcloud/resource_tc_kubernetes_node_pool_test.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,8 @@ func TestAccTencentCloudTkeNodePoolResourceBasic(t *testing.T) {
101101
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "node_count", "1"),
102102
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "autoscaling_added_total", "1"),
103103
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "manually_added_total", "0"),
104+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "tags.keep-test-np1", "test1"),
105+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "tags.keep-test-np2", "test2"),
104106
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "auto_scaling_config.0.security_group_ids.#", "1"),
105107
),
106108
},
@@ -131,6 +133,8 @@ func TestAccTencentCloudTkeNodePoolResourceBasic(t *testing.T) {
131133
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "default_cooldown", "350"),
132134
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "termination_policies.#", "1"),
133135
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "termination_policies.0", "NEWEST_INSTANCE"),
136+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "tags.keep-test-np1", "testI"),
137+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "tags.keep-test-np3", "testIII"),
134138
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "auto_scaling_config.0.security_group_ids.#", "2"),
135139
),
136140
},
@@ -298,6 +302,11 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" {
298302
effect = "PreferNoSchedule"
299303
}
300304
305+
tags = {
306+
keep-test-np1 = "test1"
307+
keep-test-np2 = "test2"
308+
}
309+
301310
node_config {
302311
extra_args = [
303312
"root-dir=/var/lib/kubelet"
@@ -366,6 +375,11 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" {
366375
effect = "PreferNoSchedule"
367376
}
368377
378+
tags = {
379+
keep-test-np1 = "testI"
380+
keep-test-np3 = "testIII"
381+
}
382+
369383
node_config {
370384
extra_args = [
371385
"root-dir=/var/lib/kubelet"

tencentcloud/service_tencentcloud_tke.go

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1215,7 +1215,7 @@ func (me *TkeService) CreateClusterNodePool(ctx context.Context, clusterId, name
12151215
return
12161216
}
12171217

1218-
func (me *TkeService) ModifyClusterNodePool(ctx context.Context, clusterId, nodePoolId string, name string, enableAutoScale bool, minSize int64, maxSize int64, nodeOs string, nodeOsType string, labels []*tke.Label, taints []*tke.Taint) (errRet error) {
1218+
func (me *TkeService) ModifyClusterNodePool(ctx context.Context, clusterId, nodePoolId string, name string, enableAutoScale bool, minSize int64, maxSize int64, nodeOs string, nodeOsType string, labels []*tke.Label, taints []*tke.Taint, tags map[string]string) (errRet error) {
12191219
logId := getLogId(ctx)
12201220
request := tke.NewModifyClusterNodePoolRequest()
12211221

@@ -1240,6 +1240,17 @@ func (me *TkeService) ModifyClusterNodePool(ctx context.Context, clusterId, node
12401240
request.Labels = labels
12411241
}
12421242

1243+
if len(tags) > 0 {
1244+
for k, v := range tags {
1245+
key := k
1246+
val := v
1247+
request.Tags = append(request.Tags, &tke.Tag{
1248+
Key: &key,
1249+
Value: &val,
1250+
})
1251+
}
1252+
}
1253+
12431254
ratelimit.Check(request.GetAction())
12441255
_, err := me.client.UseTkeClient().ModifyClusterNodePool(request)
12451256
if err != nil {

website/docs/r/kubernetes_node_pool.html.markdown

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@ The following arguments are supported:
172172
* `scaling_group_project_id` - (Optional, Int) Project ID the scaling group belongs to.
173173
* `scaling_mode` - (Optional, String, ForceNew) Auto scaling mode. Valid values are `CLASSIC_SCALING`(scaling by create/destroy instances), `WAKE_UP_STOPPED_SCALING`(Boot priority for expansion. When expanding the capacity, the shutdown operation is given priority to the shutdown of the instance. If the number of instances is still lower than the expected number of instances after the startup, the instance will be created, and the method of destroying the instance will still be used for shrinking).
174174
* `subnet_ids` - (Optional, List: [`String`], ForceNew) ID list of subnet, and for VPC it is required.
175+
* `tags` - (Optional, Map) Node pool tag specifications, will passthroughs to the scaling instances.
175176
* `taints` - (Optional, List) Taints of kubernetes node pool created nodes.
176177
* `termination_policies` - (Optional, List: [`String`]) Policy of scaling group termination. Available values: `["OLDEST_INSTANCE"]`, `["NEWEST_INSTANCE"]`.
177178
* `unschedulable` - (Optional, Int, ForceNew) Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.

0 commit comments

Comments
 (0)