Skip to content

Commit bbe7321

Browse files
kamalcaLiliDeng
authored andcommitted
Allow max_data_disk_count to work with maximize_capability
Currently maximize_capability breaks any test cases that rely on max_data_disk_count. Trying to manually set the value in the runbook creates a 'no quota available' error. These changes make the default value (0,) instead of None, allowing a passed in value to merge with max_capability value.
1 parent 8777594 commit bbe7321

File tree

2 files changed

+20
-3
lines changed

2 files changed

+20
-3
lines changed

lisa/schema.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -520,7 +520,7 @@ class DiskOptionSettings(FeatureSettings):
520520
),
521521
)
522522
max_data_disk_count: search_space.CountSpace = field(
523-
default=None,
523+
default_factory=partial(search_space.IntRange, min=0),
524524
metadata=field_metadata(
525525
allow_none=True, decoder=search_space.decode_count_space
526526
),

microsoft/testsuites/core/storage.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
from lisa.util import (
4444
BadEnvironmentStateException,
4545
LisaException,
46+
SkippedException,
4647
generate_random_chars,
4748
get_matched_str,
4849
)
@@ -641,7 +642,9 @@ def _hot_add_disk_serial(
641642

642643
# get max data disk count for the node
643644
assert node.capability.disk
644-
assert isinstance(node.capability.disk.max_data_disk_count, int)
645+
assert isinstance(
646+
node.capability.disk.max_data_disk_count, int
647+
), f"actual type: {node.capability.disk.max_data_disk_count}"
645648
max_data_disk_count = node.capability.disk.max_data_disk_count
646649
log.debug(f"max_data_disk_count: {max_data_disk_count}")
647650

@@ -658,6 +661,12 @@ def _hot_add_disk_serial(
658661
# (current_data_disk_count - 1)
659662
free_luns = list(range(current_data_disk_count, max_data_disk_count))
660663

664+
if len(free_luns) < 1:
665+
raise SkippedException(
666+
"No data disks can be added. "
667+
"Consider manually setting max_data_disk_count in the runbook."
668+
)
669+
661670
# Randomize the luns if randomize_luns is set to True
662671
# Using seed 6 to get consistent randomization across runs
663672
# Create own random.Random instance, with its own seed, which will not
@@ -728,7 +737,9 @@ def _hot_add_disk_parallel(
728737

729738
# get max data disk count for the node
730739
assert node.capability.disk
731-
assert isinstance(node.capability.disk.max_data_disk_count, int)
740+
assert isinstance(
741+
node.capability.disk.max_data_disk_count, int
742+
), f"actual type: {node.capability.disk.max_data_disk_count}"
732743
max_data_disk_count = node.capability.disk.max_data_disk_count
733744
log.debug(f"max_data_disk_count: {max_data_disk_count}")
734745

@@ -740,6 +751,12 @@ def _hot_add_disk_parallel(
740751
# disks to be added to the vm
741752
disks_to_add = max_data_disk_count - current_data_disk_count
742753

754+
if disks_to_add < 1:
755+
raise SkippedException(
756+
"No data disks can be added. "
757+
"Consider manually setting max_data_disk_count in the runbook."
758+
)
759+
743760
# get partition info before adding data disks
744761
partitions_before_adding_disks = lsblk.get_disks(force_run=True)
745762

0 commit comments

Comments
 (0)