Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 25 additions & 14 deletions reframe/core/schedulers/slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def __init__(self):
self._sched_access_in_submit = self.get_option(
'sched_access_in_submit'
)
self.addl_avail_states = set()

def make_job(self, *args, **kwargs):
return _SlurmJob(*args, **kwargs)
Expand Down Expand Up @@ -323,7 +324,7 @@ def allnodes(self):
'could not retrieve node information') from e

node_descriptions = completed.stdout.splitlines()
return _create_nodes(node_descriptions)
return _create_nodes(node_descriptions, self.addl_avail_states)

def _get_default_partition(self):
completed = _run_strict('scontrol -a show -o partitions')
Expand Down Expand Up @@ -436,15 +437,23 @@ def _get_reservation_nodes(self, reservation):
raise JobSchedulerError("could not extract the node names for "
"reservation '%s'" % reservation)

flags_match = re.search(r'Flags=(\S+)', completed.stdout)
if flags_match:
if 'MAINT' in flags_match[1].split(','):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if 'MAINT' in flags_match[1].split(','):
if 'MAINT' in flags_match.group(1).split(','):

self.addl_avail_states.add('MAINTENANCE')
# else:
# raise JobSchedulerError(f"could not extract the reservation "
# f"flags for reservation '{reservation}'")
Comment on lines +444 to +446
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wasn't sure if we want to fail here or silently ignore it?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd rather simply log it with self.log().


completed = _run_strict('scontrol -a show -o %s' % reservation_nodes)
node_descriptions = completed.stdout.splitlines()
return _create_nodes(node_descriptions)
return _create_nodes(node_descriptions, self.addl_avail_states)

def _get_nodes_by_name(self, nodespec):
completed = osext.run_command('scontrol -a show -o node %s' %
nodespec)
node_descriptions = completed.stdout.splitlines()
return _create_nodes(node_descriptions)
return _create_nodes(node_descriptions, self.addl_avail_states)

def _update_completion_time(self, job, timestamps):
if job._completion_time is not None:
Expand Down Expand Up @@ -691,19 +700,19 @@ def poll(self, *jobs):
self._cancel_if_pending_too_long(job)


def _create_nodes(descriptions):
def _create_nodes(descriptions, addl_avail_states=None):
nodes = set()
for descr in descriptions:
with suppress(JobSchedulerError):
nodes.add(_SlurmNode(descr))
nodes.add(_SlurmNode(descr, addl_avail_states=addl_avail_states))

return nodes


class _SlurmNode(sched.Node):
'''Class representing a Slurm node.'''

def __init__(self, node_descr):
def __init__(self, node_descr, addl_avail_states=None):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would rather pass the flags and let the __init__() decide what to do with them, in which case you don't need the addl_avail_states. Simply, add the MAINTENANCE state in the available_states if the flag is set.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, just to make sure I understand: _create_nodes will get a flags argument (instead of addl_avail_states), and then pass it in the __init__() of each _SlurmNode and MAINTENANCE will be added in the allowed states of the node?
My thinking was that eventually we may want to allow the user to pass even more additional "available states", through the cli, for some reason. But I am okay with what you are suggesting too.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, just to make sure I understand: _create_nodes will get a flags argument (instead of addl_avail_states), and then pass it in the init() of each _SlurmNode and MAINTENANCE will be added in the allowed states of the node?

Yes.

My thinking was that eventually we may want to allow the user to pass even more additional "available states", through the cli, for some reason. But I am okay with what you are suggesting too.

Let's keep it clean at the moment. If we want define custom available states, maybe we should do it with a configuration parameter.

self._name = self._extract_attribute('NodeName', node_descr)
if not self._name:
raise JobSchedulerError(
Expand All @@ -718,6 +727,15 @@ def __init__(self, node_descr):
'State', node_descr, sep='+') or set()
self._descr = node_descr

self.addl_avail_states = addl_avail_states or set()
self.available_states = {
'ALLOCATED',
'COMPLETING',
'IDLE',
'PLANNED',
'RESERVED'
} | self.addl_avail_states

def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
Expand All @@ -735,14 +753,7 @@ def in_statex(self, state):
return self._states == set(state.upper().split('+'))

def is_avail(self):
available_states = {
'ALLOCATED',
'COMPLETING',
'IDLE',
'PLANNED',
'RESERVED'
}
return self._states <= available_states
return self._states <= self.available_states

def is_down(self):
return not self.is_avail()
Expand Down
Loading