Skip to content

Commit 99f28ab

Browse files
tongyimingmikatong
andauthored
add lighthouse disk datasource (#1895)
* add lighthouse disk datasource * update --------- Co-authored-by: mikatong <mikatong@tencent.com>
1 parent 6f6d0d0 commit 99f28ab

File tree

7 files changed

+445
-0
lines changed

7 files changed

+445
-0
lines changed

.changelog/1895.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:new-data-source
2+
tencentcloud_lighthouse_disks
3+
```
Lines changed: 309 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,309 @@
1+
/*
2+
Use this data source to query detailed information of lighthouse disk
3+
4+
Example Usage
5+
6+
```hcl
7+
data "tencentcloud_lighthouse_disks" "disks" {
8+
disk_ids = ["lhdisk-xxxxxx"]
9+
}
10+
```
11+
*/
12+
package tencentcloud
13+
14+
import (
15+
"context"
16+
17+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
18+
lighthouse "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/lighthouse/v20200324"
19+
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
20+
)
21+
22+
func dataSourceTencentCloudLighthouseInstanceDisks() *schema.Resource {
23+
return &schema.Resource{
24+
Read: dataSourceTencentCloudLighthouseInstanceDisksRead,
25+
Schema: map[string]*schema.Schema{
26+
"disk_ids": {
27+
Optional: true,
28+
Type: schema.TypeSet,
29+
Elem: &schema.Schema{
30+
Type: schema.TypeString,
31+
},
32+
Description: "List of disk ids.",
33+
},
34+
35+
"filters": {
36+
Optional: true,
37+
Type: schema.TypeList,
38+
Description: "Filter list.",
39+
Elem: &schema.Resource{
40+
Schema: map[string]*schema.Schema{
41+
"name": {
42+
Type: schema.TypeString,
43+
Required: true,
44+
Description: "Fields to be filtered. Valid names: `disk-id`: Filters by disk id; `instance-id`: Filter by instance id; `disk-name`: Filter by disk name; `zone`: Filter by zone; `disk-usage`: Filter by disk usage(Values: `SYSTEM_DISK` or `DATA_DISK`); `disk-state`: Filter by disk state.",
45+
},
46+
"values": {
47+
Type: schema.TypeSet,
48+
Elem: &schema.Schema{
49+
Type: schema.TypeString,
50+
},
51+
Required: true,
52+
Description: "Value of the field.",
53+
},
54+
},
55+
},
56+
},
57+
58+
"disk_list": {
59+
Computed: true,
60+
Type: schema.TypeList,
61+
Description: "Cloud disk information list.",
62+
Elem: &schema.Resource{
63+
Schema: map[string]*schema.Schema{
64+
"disk_id": {
65+
Type: schema.TypeString,
66+
Computed: true,
67+
Description: "Disk id.",
68+
},
69+
"instance_id": {
70+
Type: schema.TypeString,
71+
Computed: true,
72+
Description: "Instance id.",
73+
},
74+
"zone": {
75+
Type: schema.TypeString,
76+
Computed: true,
77+
Description: "Availability zone.",
78+
},
79+
"disk_name": {
80+
Type: schema.TypeString,
81+
Computed: true,
82+
Description: "Disk name.",
83+
},
84+
"disk_usage": {
85+
Type: schema.TypeString,
86+
Computed: true,
87+
Description: "Disk usage.",
88+
},
89+
"disk_type": {
90+
Type: schema.TypeString,
91+
Computed: true,
92+
Description: "Disk type.",
93+
},
94+
"disk_charge_type": {
95+
Type: schema.TypeString,
96+
Computed: true,
97+
Description: "Disk charge type.",
98+
},
99+
"disk_size": {
100+
Type: schema.TypeInt,
101+
Computed: true,
102+
Description: "Disk size.",
103+
},
104+
"renew_flag": {
105+
Type: schema.TypeString,
106+
Computed: true,
107+
Description: "Renew flag.",
108+
},
109+
"disk_state": {
110+
Type: schema.TypeString,
111+
Computed: true,
112+
Description: "Disk state. Valid values:`PENDING`, `UNATTACHED`, `ATTACHING`, `ATTACHED`, `DETACHING`, `SHUTDOWN`, `CREATED_FAILED`, `TERMINATING`, `DELETING`, `FREEZING`.",
113+
},
114+
"attached": {
115+
Type: schema.TypeBool,
116+
Computed: true,
117+
Description: "Disk attach state.",
118+
},
119+
"delete_with_instance": {
120+
Type: schema.TypeBool,
121+
Computed: true,
122+
Description: "Whether to release with the instance.",
123+
},
124+
"latest_operation": {
125+
Type: schema.TypeString,
126+
Computed: true,
127+
Description: "Latest operation.",
128+
},
129+
"latest_operation_state": {
130+
Type: schema.TypeString,
131+
Computed: true,
132+
Description: "Latest operation state.",
133+
},
134+
"latest_operation_request_id": {
135+
Type: schema.TypeString,
136+
Computed: true,
137+
Description: "Latest operation request id.",
138+
},
139+
"created_time": {
140+
Type: schema.TypeString,
141+
Computed: true,
142+
Description: "Created time. Expressed according to the ISO8601 standard, and using UTC time. The format is `YYYY-MM-DDThh:mm:ssZ`.",
143+
},
144+
"expired_time": {
145+
Type: schema.TypeString,
146+
Computed: true,
147+
Description: "Expired time. Expressed according to the ISO8601 standard, and using UTC time. The format is `YYYY-MM-DDThh:mm:ssZ`.",
148+
},
149+
"isolated_time": {
150+
Type: schema.TypeString,
151+
Computed: true,
152+
Description: "Isolated time. Expressed according to the ISO8601 standard, and using UTC time. The format is `YYYY-MM-DDThh:mm:ssZ`.",
153+
},
154+
"disk_backup_count": {
155+
Type: schema.TypeInt,
156+
Computed: true,
157+
Description: "Number of existing backup points of cloud disk.",
158+
},
159+
"disk_backup_quota": {
160+
Type: schema.TypeInt,
161+
Computed: true,
162+
Description: "Number of backup points quota for cloud disk.",
163+
},
164+
},
165+
},
166+
},
167+
168+
"result_output_file": {
169+
Type: schema.TypeString,
170+
Optional: true,
171+
Description: "Used to save results.",
172+
},
173+
},
174+
}
175+
}
176+
177+
func dataSourceTencentCloudLighthouseInstanceDisksRead(d *schema.ResourceData, meta interface{}) error {
178+
defer logElapsed("data_source.tencentcloud_lighthouse_instance_disks.read")()
179+
defer inconsistentCheck(d, meta)()
180+
181+
logId := getLogId(contextNil)
182+
183+
ctx := context.WithValue(context.TODO(), logIdKey, logId)
184+
185+
diskIds := make([]string, 0)
186+
for _, diskId := range d.Get("disk_ids").(*schema.Set).List() {
187+
diskIds = append(diskIds, diskId.(string))
188+
}
189+
filters := make([]*lighthouse.Filter, 0)
190+
if v, ok := d.GetOk("filters"); ok {
191+
filterSet := v.([]interface{})
192+
193+
for _, item := range filterSet {
194+
filter := lighthouse.Filter{}
195+
filterMap := item.(map[string]interface{})
196+
197+
if v, ok := filterMap["name"]; ok {
198+
filter.Name = helper.String(v.(string))
199+
}
200+
if v, ok := filterMap["values"]; ok {
201+
valuesSet := v.(*schema.Set).List()
202+
filter.Values = helper.InterfacesStringsPoint(valuesSet)
203+
}
204+
filters = append(filters, &filter)
205+
}
206+
}
207+
service := LightHouseService{client: meta.(*TencentCloudClient).apiV3Conn}
208+
disks, err := service.DescribeLighthouseDisk(ctx, diskIds, filters)
209+
if err != nil {
210+
return err
211+
}
212+
213+
ids := make([]string, 0)
214+
diskList := make([]map[string]interface{}, 0)
215+
for _, disk := range disks {
216+
diskMap := make(map[string]interface{})
217+
if disk.DiskId != nil {
218+
diskMap["disk_id"] = disk.DiskId
219+
ids = append(ids, *disk.DiskId)
220+
}
221+
222+
if disk.InstanceId != nil {
223+
diskMap["instance_id"] = disk.InstanceId
224+
}
225+
226+
if disk.Zone != nil {
227+
diskMap["zone"] = disk.Zone
228+
}
229+
230+
if disk.DiskName != nil {
231+
diskMap["disk_name"] = disk.DiskName
232+
}
233+
234+
if disk.DiskUsage != nil {
235+
diskMap["disk_usage"] = disk.DiskUsage
236+
}
237+
238+
if disk.DiskType != nil {
239+
diskMap["disk_type"] = disk.DiskType
240+
}
241+
242+
if disk.DiskChargeType != nil {
243+
diskMap["disk_charge_type"] = disk.DiskChargeType
244+
}
245+
246+
if disk.DiskSize != nil {
247+
diskMap["disk_size"] = disk.DiskSize
248+
}
249+
250+
if disk.RenewFlag != nil {
251+
diskMap["renew_flag"] = disk.RenewFlag
252+
}
253+
254+
if disk.DiskState != nil {
255+
diskMap["disk_state"] = disk.DiskState
256+
}
257+
258+
if disk.Attached != nil {
259+
diskMap["attached"] = disk.Attached
260+
}
261+
262+
if disk.DeleteWithInstance != nil {
263+
diskMap["delete_with_instance"] = disk.DeleteWithInstance
264+
}
265+
266+
if disk.LatestOperation != nil {
267+
diskMap["latest_operation"] = disk.LatestOperation
268+
}
269+
270+
if disk.LatestOperationState != nil {
271+
diskMap["latest_operation_state"] = disk.LatestOperationState
272+
}
273+
274+
if disk.LatestOperationRequestId != nil {
275+
diskMap["latest_operation_request_id"] = disk.LatestOperationRequestId
276+
}
277+
278+
if disk.CreatedTime != nil {
279+
diskMap["created_time"] = disk.CreatedTime
280+
}
281+
282+
if disk.ExpiredTime != nil {
283+
diskMap["expired_time"] = disk.ExpiredTime
284+
}
285+
286+
if disk.IsolatedTime != nil {
287+
diskMap["isolated_time"] = disk.IsolatedTime
288+
}
289+
290+
if disk.DiskBackupCount != nil {
291+
diskMap["disk_backup_count"] = disk.DiskBackupCount
292+
}
293+
294+
if disk.DiskBackupQuota != nil {
295+
diskMap["disk_backup_quota"] = disk.DiskBackupQuota
296+
}
297+
298+
diskList = append(diskList, diskMap)
299+
}
300+
d.SetId(helper.DataResourceIdsHash(ids))
301+
_ = d.Set("disk_list", diskList)
302+
output, ok := d.GetOk("result_output_file")
303+
if ok && output.(string) != "" {
304+
if e := writeToFile(output.(string), diskList); e != nil {
305+
return e
306+
}
307+
}
308+
return nil
309+
}
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
package tencentcloud
2+
3+
import (
4+
"testing"
5+
6+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
7+
)
8+
9+
func TestAccTencentCloudLighthouseDisksDataSource_basic(t *testing.T) {
10+
t.Parallel()
11+
resource.Test(t, resource.TestCase{
12+
PreCheck: func() { testAccPreCheckCommon(t, ACCOUNT_TYPE_PREPAY) },
13+
Providers: testAccProviders,
14+
Steps: []resource.TestStep{
15+
{
16+
Config: testAccLighthouseDisksDataSource,
17+
Check: resource.ComposeTestCheckFunc(testAccCheckTencentCloudDataSourceID("data.tencentcloud_lighthouse_disks.disks")),
18+
},
19+
},
20+
})
21+
}
22+
23+
const testAccLighthouseDisksDataSource = `
24+
25+
data "tencentcloud_lighthouse_disks" "disks" {
26+
disk_ids = ["lhdisk-cwodsc4q"]
27+
}
28+
`

tencentcloud/provider.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1053,6 +1053,7 @@ TencentCloud Lighthouse(Lighthouse)
10531053
tencentcloud_lighthouse_disk_config
10541054
tencentcloud_lighthouse_all_scene
10551055
tencentcloud_lighthouse_modify_instance_bundle
1056+
tencentcloud_lighthouse_disks
10561057
10571058
TencentCloud Elastic Microservice(TEM)
10581059
Resource
@@ -2077,6 +2078,7 @@ func Provider() *schema.Provider {
20772078
"tencentcloud_lighthouse_instance_disk_num": dataSourceTencentCloudLighthouseInstanceDiskNum(),
20782079
"tencentcloud_lighthouse_instance_blueprint": dataSourceTencentCloudLighthouseInstanceBlueprint(),
20792080
"tencentcloud_lighthouse_disk_config": dataSourceTencentCloudLighthouseDiskConfig(),
2081+
"tencentcloud_lighthouse_disks": dataSourceTencentCloudLighthouseInstanceDisks(),
20802082
"tencentcloud_cls_shipper_tasks": dataSourceTencentCloudClsShipperTasks(),
20812083
"tencentcloud_cls_machines": dataSourceTencentCloudClsMachines(),
20822084
"tencentcloud_cls_machine_group_configs": dataSourceTencentCloudClsMachineGroupConfigs(),

0 commit comments

Comments
 (0)