@@ -108,6 +108,7 @@ import (
108108 "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
109109)
110110
111+ // merge `instance_type` to `backup_instance_types` as param `instance_types`
111112func getNodePoolInstanceTypes (d * schema.ResourceData ) []* string {
112113 configParas := d .Get ("auto_scaling_config" ).([]interface {})
113114 dMap := configParas [0 ].(map [string ]interface {})
@@ -414,6 +415,7 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource {
414415 "scaling_group_name" : {
415416 Type : schema .TypeString ,
416417 Optional : true ,
418+ Computed : true ,
417419 Description : "Name of relative scaling group." ,
418420 },
419421 "zones" : {
@@ -431,12 +433,14 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource {
431433 "default_cooldown" : {
432434 Type : schema .TypeInt ,
433435 Optional : true ,
436+ Computed : true ,
434437 Description : "Seconds of scaling group cool down. Default value is `300`." ,
435438 },
436439 "termination_policies" : {
437440 Type : schema .TypeList ,
438441 MaxItems : 1 ,
439442 Optional : true ,
443+ Computed : true ,
440444 Description : "Policy of scaling group termination. Available values: `[\" OLDEST_INSTANCE\" ]`, `[\" NEWEST_INSTANCE\" ]`." ,
441445 Elem : & schema.Schema {Type : schema .TypeString },
442446 },
@@ -462,6 +466,9 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource {
462466 Description : "The auto scaling group ID." ,
463467 },
464468 },
469+ Importer : & schema.ResourceImporter {
470+ State : schema .ImportStatePassthrough ,
471+ },
465472 //compare to console, miss cam_role and running_version and lock_initial_node and security_proof
466473 }
467474}
@@ -500,11 +507,7 @@ func composeParameterToAsScalingGroupParaSerial(d *schema.ResourceData) (string,
500507
501508 if v , ok := d .GetOk ("subnet_ids" ); ok {
502509 subnetIds := v .([]interface {})
503- request .SubnetIds = make ([]* string , 0 , len (subnetIds ))
504- for i := range subnetIds {
505- subnetId := subnetIds [i ].(string )
506- request .SubnetIds = append (request .SubnetIds , & subnetId )
507- }
510+ request .SubnetIds = helper .InterfacesStringsPoint (subnetIds )
508511 }
509512
510513 if v , ok := d .GetOk ("scaling_mode" ); ok {
@@ -668,6 +671,8 @@ func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) er
668671 return nil
669672 }
670673
674+ _ = d .Set ("cluster_id" , clusterId )
675+
671676 //Describe Node Pool
672677 nodePool , has , err := service .DescribeNodePool (ctx , clusterId , nodePoolId )
673678 if err != nil {
@@ -689,25 +694,121 @@ func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) er
689694 return nil
690695 }
691696
697+ _ = d .Set ("name" , nodePool .Name )
698+ _ = d .Set ("status" , nodePool .LifeState )
699+ _ = d .Set ("node_count" , nodePool .NodeCountSummary )
700+ _ = d .Set ("auto_scaling_group_id" , nodePool .AutoscalingGroupId )
701+ _ = d .Set ("launch_config_id" , nodePool .LaunchConfigurationId )
692702 //set not force new parameters
693- d .Set ("max_size" , nodePool .MaxNodesNum )
694- d .Set ("min_size" , nodePool .MinNodesNum )
695- d .Set ("desired_capacity" , nodePool .DesiredNodesNum )
696- d .Set ("name" , nodePool .Name )
697- d .Set ("status" , nodePool .LifeState )
698- d .Set ("node_count" , nodePool .NodeCountSummary )
699- d .Set ("auto_scaling_group_id" , nodePool .AutoscalingGroupId )
700- d .Set ("launch_config_id" , nodePool .LaunchConfigurationId )
701- d .Set ("enable_auto_scale" , * nodePool .AutoscalingGroupStatus == "enabled" )
702- d .Set ("node_os" , * nodePool .NodePoolOs )
703- d .Set ("node_system_type" , * nodePool .OsCustomizeType )
703+ if nodePool .MaxNodesNum != nil {
704+ _ = d .Set ("max_size" , nodePool .MaxNodesNum )
705+ }
706+ if nodePool .MinNodesNum != nil {
707+ _ = d .Set ("min_size" , nodePool .MinNodesNum )
708+ }
709+ if nodePool .DesiredNodesNum != nil {
710+ _ = d .Set ("desired_capacity" , nodePool .DesiredNodesNum )
711+ }
712+ if nodePool .AutoscalingGroupStatus != nil {
713+ _ = d .Set ("enable_auto_scale" , * nodePool .AutoscalingGroupStatus == "enabled" )
714+ }
715+ if nodePool .NodePoolOs != nil {
716+ _ = d .Set ("node_os" , nodePool .NodePoolOs )
717+ }
718+ if nodePool .OsCustomizeType != nil {
719+ _ = d .Set ("node_os_type" , nodePool .OsCustomizeType )
720+ }
704721
705722 //set composed struct
706723 lables := make (map [string ]interface {}, len (nodePool .Labels ))
707724 for _ , v := range nodePool .Labels {
708725 lables [* v .Name ] = * v .Value
709726 }
710- d .Set ("labels" , lables )
727+ _ = d .Set ("labels" , lables )
728+
729+ // set launch config
730+ launchCfg , hasLC , err := asService .DescribeLaunchConfigurationById (ctx , * nodePool .LaunchConfigurationId )
731+
732+ if hasLC > 0 {
733+ launchConfig := make (map [string ]interface {})
734+ if launchCfg .InstanceTypes != nil {
735+ backupInsTypes := launchCfg .InstanceTypes
736+ launchConfig ["instance_type" ] = backupInsTypes [0 ]
737+ launchConfig ["backup_instance_types" ] = helper .StringsInterfaces (backupInsTypes [1 :])
738+ } else {
739+ launchConfig ["instance_type" ] = launchCfg .InstanceType
740+ }
741+ if launchCfg .SystemDisk .DiskType != nil {
742+ launchConfig ["system_disk_type" ] = launchCfg .SystemDisk .DiskType
743+ }
744+ if launchCfg .SystemDisk .DiskSize != nil {
745+ launchConfig ["system_disk_size" ] = launchCfg .SystemDisk .DiskSize
746+ }
747+ if launchCfg .InternetAccessible .InternetChargeType != nil {
748+ launchConfig ["internet_charge_type" ] = launchCfg .InternetAccessible .InternetChargeType
749+ }
750+ if launchCfg .InternetAccessible .InternetMaxBandwidthOut != nil {
751+ launchConfig ["internet_max_bandwidth_out" ] = launchCfg .InternetAccessible .InternetMaxBandwidthOut
752+ }
753+ if launchCfg .InternetAccessible .BandwidthPackageId != nil {
754+ launchConfig ["bandwidth_package_id" ] = launchCfg .InternetAccessible .BandwidthPackageId
755+ }
756+ if launchCfg .InternetAccessible .PublicIpAssigned != nil {
757+ launchConfig ["public_ip_assigned" ] = launchCfg .InternetAccessible .PublicIpAssigned
758+ }
759+ if len (launchCfg .DataDisks ) > 0 {
760+ dataDisks := make ([]map [string ]interface {}, 0 , len (launchCfg .DataDisks ))
761+ for i := range launchCfg .DataDisks {
762+ item := launchCfg .DataDisks [i ]
763+ disk := make (map [string ]interface {})
764+ disk ["disk_type" ] = * item .DiskType
765+ disk ["disk_size" ] = * item .DiskSize
766+ if item .SnapshotId != nil {
767+ disk ["snapshot_id" ] = * item .SnapshotId
768+ }
769+ dataDisks = append (dataDisks , disk )
770+ }
771+ launchConfig ["data_disk" ] = dataDisks
772+ }
773+ if launchCfg .LoginSettings != nil {
774+ launchConfig ["key_ids" ] = helper .StringsInterfaces (launchCfg .LoginSettings .KeyIds )
775+ }
776+ // keep existing password in new launchConfig object
777+ if v , ok := d .GetOk ("auto_scaling_config.0.password" ); ok {
778+ launchConfig ["password" ] = v .(string )
779+ }
780+ launchConfig ["security_group_ids" ] = helper .StringsInterfaces (launchCfg .SecurityGroupIds )
781+
782+ enableSecurity := launchCfg .EnhancedService .SecurityService .Enabled
783+ enableMonitor := launchCfg .EnhancedService .MonitorService .Enabled
784+ // Only declared or diff from exist will set.
785+ if _ , ok := d .GetOk ("enhanced_security_service" ); ok || enableSecurity != nil {
786+ launchConfig ["enhanced_security_service" ] = * enableSecurity
787+ }
788+ if _ , ok := d .GetOk ("enhanced_monitor_service" ); ok || enableMonitor != nil {
789+ launchConfig ["enhanced_monitor_service" ] = * enableMonitor
790+ }
791+ asgConfig := make ([]interface {}, 0 , 1 )
792+ asgConfig = append (asgConfig , launchConfig )
793+ if err := d .Set ("auto_scaling_config" , asgConfig ); err != nil {
794+ return err
795+ }
796+ }
797+
798+ // asg node unschedulable
799+ clusterAsg , err := service .DescribeClusterAsGroupsByGroupId (ctx , clusterId , * nodePool .AutoscalingGroupId )
800+
801+ if err != nil {
802+ return err
803+ }
804+
805+ unschedulable := 0
806+ if clusterAsg != nil {
807+ if clusterAsg .IsUnschedulable != nil && * clusterAsg .IsUnschedulable {
808+ unschedulable = 1
809+ }
810+ }
811+ _ = d .Set ("unschedulable" , unschedulable )
711812
712813 // Relative scaling group status
713814 asg , hasAsg , err := asService .DescribeAutoScalingGroupById (ctx , * nodePool .AutoscalingGroupId )
@@ -725,12 +826,15 @@ func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) er
725826 return nil
726827 }
727828
728- if hasAsg >= 1 {
829+ if hasAsg > 0 {
729830 _ = d .Set ("scaling_group_name" , asg .AutoScalingGroupName )
730831 _ = d .Set ("zones" , asg .ZoneSet )
731832 _ = d .Set ("scaling_group_project_id" , asg .ProjectId )
732833 _ = d .Set ("default_cooldown" , asg .DefaultCooldown )
733- _ = d .Set ("termination_policies" , asg .TerminationPolicySet )
834+ _ = d .Set ("termination_policies" , helper .StringsInterfaces (asg .TerminationPolicySet ))
835+ _ = d .Set ("vpc_id" , asg .VpcId )
836+ _ = d .Set ("retry_policy" , asg .RetryPolicy )
837+ _ = d .Set ("subnet_ids" , helper .StringsInterfaces (asg .SubnetIdSet ))
734838 }
735839
736840 taints := make ([]map [string ]interface {}, len (nodePool .Taints ))
0 commit comments