diff --git a/README.md b/README.md index e877e6e3..b66a1713 100644 --- a/README.md +++ b/README.md @@ -8,16 +8,16 @@ pyslurm is the Python client library for the [Slurm Workload Manager](https://sl * [Python](https://www.python.org) - >= 3.6 * [Cython](https://cython.org) - >= 0.29.37 -This Version is for Slurm 25.05.x +This Version is for Slurm 25.11.x ## Versioning In pyslurm, the versioning scheme follows the official Slurm versioning. The first two numbers (`MAJOR.MINOR`) always correspond to Slurms Major-Release, -for example `25.05`. +for example `25.11`. The last number (`MICRO`) is however not tied in any way to Slurms `MICRO` version, but is instead PySlurm's internal Patch-Level. For example, any -pyslurm 25.05.X version should work with any Slurm 25.05.X release. +pyslurm 25.11.X version should work with any Slurm 25.11.X release. ## Installation @@ -29,8 +29,8 @@ the corresponding paths to the necessary files. You can specify those with environment variables (recommended), for example: ```shell -export SLURM_INCLUDE_DIR=/opt/slurm/25.05/include -export SLURM_LIB_DIR=/opt/slurm/25.05/lib +export SLURM_INCLUDE_DIR=/opt/slurm/25.11/include +export SLURM_LIB_DIR=/opt/slurm/25.11/lib ``` Then you can proceed to install pyslurm, for example by cloning the Repository: diff --git a/pyslurm.spec b/pyslurm.spec index f06ebb1e..5b78dd7a 100644 --- a/pyslurm.spec +++ b/pyslurm.spec @@ -1,7 +1,7 @@ %define python3_pkgversion 3.11 Name: python-pyslurm -Version: 25.5.0 +Version: 25.11.0 %define rel 1 Release: %{rel}%{?dist} Summary: Python interface to Slurm @@ -15,8 +15,8 @@ BuildRequires: python%{python3_pkgversion}-wheel BuildRequires: python%{python3_pkgversion}-Cython BuildRequires: python%{python3_pkgversion}-packaging BuildRequires: python-rpm-macros -BuildRequires: slurm-devel >= 25.05.0 -BuildRequires: slurm >= 25.05.0 +BuildRequires: slurm-devel >= 25.11.0 +BuildRequires: slurm >= 25.11.0 Requires: python%{python3_pkgversion} %description diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 2f91d792..de124548 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -468,7 +468,7 @@ cdef class Job: >>> pyslurm.Job(9999).modify(changes) """ changes._create_job_submit_desc(is_update=True) - changes.ptr.job_id = self.id + changes.ptr.step_id.job_id = self.id verify_rpc(slurm_update_job(changes.ptr)) def hold(self, mode=None): @@ -654,9 +654,9 @@ cdef class Job: slurm_msg_t_init(&resp) memset(&msg, 0, sizeof(msg)) - msg.job_id = self.id + msg.step_id.job_id = self.id req.msg_type = slurm.REQUEST_BATCH_SCRIPT - req.data = &msg + req.data = &msg rc = slurm_send_recv_controller_msg(&req, &resp, working_cluster_rec) verify_rpc(rc) diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index e584c6d4..e573a823 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -27,6 +27,7 @@ from .job cimport Job from libc.string cimport memcpy, memset from pyslurm cimport slurm from pyslurm.slurm cimport ( + slurm_step_id_t, job_step_info_t, slurm_get_job_steps, job_step_info_response_msg_t, @@ -49,6 +50,7 @@ from pyslurm.utils.ctime cimport time_t from pyslurm.core.job.task_dist cimport TaskDistribution from pyslurm.db.stats cimport JobStepStatistics from pyslurm.core.job cimport stats +from pyslurm.utils.helpers cimport init_step_id cdef class JobSteps(dict): diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index ad6a8e9c..0663c710 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -107,10 +107,11 @@ cdef class JobSteps(dict): cdef: JobStep step uint32_t cnt = 0 + slurm_step_id_t step_id = init_step_id() dict steps = {} - rc = slurm_get_job_steps(0, job_id, slurm.NO_VAL, &self.info, - flags) + step_id.job_id = job_id + rc = slurm_get_job_steps(&step_id, &self.info, flags) verify_rpc(rc) # zero-out a dummy job_step_info_t @@ -235,10 +236,11 @@ cdef class JobStep: cdef: job_step_info_response_msg_t *info = NULL JobStep wrap = None + slurm_step_id_t _step_id = init_step_id() - job_id = job_id.id if isinstance(job_id, Job) else job_id - rc = slurm_get_job_steps(0, job_id, dehumanize_step_id(step_id), - &info, slurm.SHOW_ALL) + _step_id.job_id = job_id.id if isinstance(job_id, Job) else job_id + _step_id.step_id = dehumanize_step_id(step_id) + rc = slurm_get_job_steps(&_step_id, &info, slurm.SHOW_ALL) verify_rpc(rc) if info and info.job_step_count == 1: @@ -311,9 +313,8 @@ cdef class JobStep: >>> pyslurm.JobStep(9999, 1).send_signal(9) """ - step_id = self.ptr.step_id.step_id sig = signal_to_num(signal) - verify_rpc(slurm_signal_job_step(self.job_id, step_id, sig)) + verify_rpc(slurm_signal_job_step(&self.ptr.step_id, sig)) def cancel(self): """Cancel a Job step. @@ -327,8 +328,7 @@ cdef class JobStep: >>> import pyslurm >>> pyslurm.JobStep(9999, 1).cancel() """ - step_id = self.ptr.step_id.step_id - verify_rpc(slurm_kill_job_step(self.job_id, step_id, 9, 0)) + verify_rpc(slurm_kill_job_step(&self.ptr.step_id, 9, 0)) def modify(self, JobStep changes): """Modify a job step. @@ -353,8 +353,7 @@ cdef class JobStep: """ cdef JobStep js = changes js._alloc_umsg() - js.umsg.step_id = self.ptr.step_id.step_id - js.umsg.job_id = self.ptr.step_id.job_id + js.umsg.step_id = self.ptr.step_id verify_rpc(slurm_update_step(js.umsg)) def as_dict(self): diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index 7e47a153..fb6ac9f8 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -102,7 +102,7 @@ cdef class JobSubmitDescription: self._create_job_submit_desc() verify_rpc(slurm_submit_batch_job(self.ptr, &resp)) - job_id = resp.job_id + job_id = resp.step_id.job_id slurm_free_submit_response_response_msg(resp) return job_id diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index e54bc37b..b4b08fd6 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -748,20 +748,20 @@ def _split_oversubscribe_str(val): def _select_type_int_to_list(stype): - # The rest of the CR_* stuff are just some extra parameters to the select + # The rest of the SELECT_* stuff are just some extra parameters to the select # plugin out = _select_type_int_to_cons_res(stype) - if stype & slurm.CR_ONE_TASK_PER_CORE: + if stype & slurm.SELECT_ONE_TASK_PER_CORE: out.append("ONE_TASK_PER_CORE") - if stype & slurm.CR_PACK_NODES: + if stype & slurm.SELECT_PACK_NODES: out.append("PACK_NODES") - if stype & slurm.CR_CORE_DEFAULT_DIST_BLOCK: + if stype & slurm.SELECT_CORE_DEFAULT_DIST_BLOCK: out.append("CORE_DEFAULT_DIST_BLOCK") - if stype & slurm.CR_LLN: + if stype & slurm.SELECT_LLN: out.append("LLN") return out @@ -772,19 +772,19 @@ def _select_type_int_to_cons_res(stype): # The 3 main select types are mutually exclusive, and may be combined with # CR_MEMORY # CR_BOARD exists but doesn't show up in the documentation, so ignore it. - if stype & slurm.CR_CPU and stype & slurm.CR_MEMORY: + if stype & slurm.SELECT_CPU and stype & slurm.SELECT_MEMORY: return "CPU_MEMORY" - elif stype & slurm.CR_CORE and stype & slurm.CR_MEMORY: + elif stype & slurm.SELECT_CORE and stype & slurm.SELECT_MEMORY: return "CORE_MEMORY" - elif stype & slurm.CR_SOCKET and stype & slurm.CR_MEMORY: + elif stype & slurm.SELECT_SOCKET and stype & slurm.SELECT_MEMORY: return "SOCKET_MEMORY" - elif stype & slurm.CR_CPU: + elif stype & slurm.SELECT_CPU: return "CPU" - elif stype & slurm.CR_CORE: + elif stype & slurm.SELECT_CORE: return "CORE" - elif stype & slurm.CR_SOCKET: + elif stype & slurm.SELECT_SOCKET: return "SOCKET" - elif stype & slurm.CR_MEMORY: + elif stype & slurm.SELECT_MEMORY: return "MEMORY" else: return [] diff --git a/pyslurm/core/slurmctld/config.pxd b/pyslurm/core/slurmctld/config.pxd index f754ea33..156bc327 100644 --- a/pyslurm/core/slurmctld/config.pxd +++ b/pyslurm/core/slurmctld/config.pxd @@ -520,6 +520,10 @@ cdef class Config: Parameters for the MCS Plugin. {slurm.conf#OPT_MCSParameters} + metrics_type (str): + Name of the Metrics plugin used. + + {slurm.conf#OPT_MetricsType} min_job_age (int): Minimum age (in seconds) of a completed Job before its record is cleared from slurmctlds memory. diff --git a/pyslurm/core/slurmctld/config.pyx b/pyslurm/core/slurmctld/config.pyx index 6f9d2ed6..8b31fdf3 100644 --- a/pyslurm/core/slurmctld/config.pyx +++ b/pyslurm/core/slurmctld/config.pyx @@ -287,10 +287,6 @@ cdef class Config: def accounting_storage_type(self): return cstr.to_unicode(self.ptr.accounting_storage_type) - @property - def accounting_storage_user(self): - return cstr.to_unicode(self.ptr.accounting_storage_user) - @property def accounting_store_flags(self): out = [] @@ -569,8 +565,8 @@ cdef class Config: return cstr.to_unicode(self.ptr.job_comp_user) @property - def job_container_type(self): - return cstr.to_unicode(self.ptr.job_container_plugin) + def namespace_plugin(self): + return cstr.to_unicode(self.ptr.namespace_plugin) @property def job_defaults(self): @@ -686,6 +682,10 @@ cdef class Config: def mcs_parameters(self): return cstr.to_list(self.ptr.mcs_plugin_params) + @property + def metrics_type(self): + return cstr.to_unicode(self.ptr.metrics_type) + @property def min_job_age(self): return u32_parse(self.ptr.min_job_age) diff --git a/pyslurm/deprecated.pyx b/pyslurm/deprecated.pyx index 9236fac1..f613bcdc 100644 --- a/pyslurm/deprecated.pyx +++ b/pyslurm/deprecated.pyx @@ -741,28 +741,6 @@ cpdef int slurm_signal_job(uint32_t JobID=0, uint16_t Signal=0) except? -1: # -cpdef int slurm_signal_job_step(uint32_t JobID=0, uint32_t JobStep=0, - uint16_t Signal=0) except? -1: - """Send a signal to a slurm job step. - - Args: - JobID (int): The job id. - JobStep: The id of the job step. - Signal (int, optional): Signal to send. - - Returns: - int: 0 for success or -1 for error and set the slurm errno. - """ - cdef int apiError = 0 - cdef int errCode = slurm.slurm_signal_job_step(JobID, JobStep, Signal) - - if errCode != 0: - apiError = slurm_get_errno() - raise ValueError(stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - - return errCode - - cpdef int slurm_kill_job(uint32_t JobID=0, uint16_t Signal=0, uint16_t BatchFlag=0) except? -1: """Terminate a running slurm job step. @@ -785,28 +763,6 @@ cpdef int slurm_kill_job(uint32_t JobID=0, uint16_t Signal=0, return errCode -cpdef int slurm_kill_job_step(uint32_t JobID=0, uint32_t JobStep=0, - uint16_t Signal=0) except? -1: - """Terminate a running slurm job step. - - Args: - JobID (int): The job id. - JobStep (int): The id of the job step. - Signal (int, optional): Signal to send. - - Returns: - int: 0 for success or -1 for error, and slurm errno is set. - """ - cdef int apiError = 0 - cdef int errCode = slurm.slurm_kill_job_step(JobID, JobStep, Signal, 0) - - if errCode != 0: - apiError = slurm_get_errno() - raise ValueError(stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - - return errCode - - cpdef int slurm_kill_job2(const char *JobID='', uint16_t Signal=0, uint16_t BatchFlag=0, char* sibling=NULL) except? -1: """Terminate a running slurm job step. @@ -830,26 +786,6 @@ cpdef int slurm_kill_job2(const char *JobID='', uint16_t Signal=0, return errCode -cpdef int slurm_complete_job(uint32_t JobID=0, uint32_t JobCode=0) except? -1: - """Complete a running slurm job step. - - Args: - JobID (int): The job id. - JobCode (int, optional): Return code for the job. - - Returns: - int: 0 for success or -1 for error and set slurm errno - """ - cdef int apiError = 0 - cdef int errCode = slurm.slurm_complete_job(JobID, JobCode) - - if errCode != 0: - apiError = slurm_get_errno() - raise ValueError(stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - - return errCode - - cpdef int slurm_notify_job(uint32_t JobID=0, char* Msg='') except? -1: """Notify a message to a running slurm job step. @@ -870,25 +806,6 @@ cpdef int slurm_notify_job(uint32_t JobID=0, char* Msg='') except? -1: return errCode -cpdef int slurm_terminate_job_step(uint32_t JobID=0, uint32_t JobStep=0) except? -1: - """Terminate a running slurm job step. - - Args: - JobID (int): The job id - JobStep (int): The id of the job step - - Returns: - 0 for success or -1 for error, and the slurm error code is set - """ - cdef int apiError = 0 - cdef int errCode = slurm.slurm_terminate_job_step(JobID, JobStep) - - if errCode != 0: - apiError = slurm_get_errno() - raise ValueError(stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - - return errCode - # # Slurm Job Class to Control Configuration Read/Update # @@ -2031,7 +1948,7 @@ cdef class job: retries += 1 p_time.sleep(retries) - job_id = resp.job_id + job_id = resp.step_id.job_id slurm.slurm_free_submit_response_response_msg(resp) #return "Submitted batch job %s" % job_id @@ -2213,251 +2130,6 @@ def slurm_perror(char* Msg=''): slurm.slurm_perror(Msg) -# -# Jobstep Class -# - - -cdef class jobstep: - """Access/Modify Slurm Jobstep Information.""" - - cdef: - slurm.time_t _lastUpdate - uint32_t JobID, StepID - uint16_t _ShowFlags - dict _JobStepDict - - def __cinit__(self): - self._ShowFlags = 0 - self._lastUpdate = 0 - self.JobID = 4294967294 # 0xfffffffe - NOVAL - self.StepID = 4294967294 # 0xfffffffe - NOVAL - self._JobStepDict = {} - - def __dealloc__(self): - self.__destroy() - - cpdef __destroy(self): - """Free the slurm job memory allocated by load jobstep method.""" - self._lastUpdate = 0 - self._ShowFlags = 0 - self._JobStepDict = {} - - def lastUpdate(self): - """Get the time (epoch seconds) the jobstep data was updated. - - Returns: - (int): Epoch seconds - """ - return self._lastUpdate - - def ids(self): - cdef dict jobsteps = {} - - if not self._JobStepDict: - self.get() - - for key, value in self._JobStepDict.items(): - for new_key in value.keys(): - jobsteps.setdefault(key, []).append(new_key) - - return jobsteps - - def find(self, jobID=-1, stepID=-1): - cdef dict retDict = {} - - # retlist = [key for key, value in self.blockID.items() - # if self.blockID[key][name] == value ] - - for key, value in self._JobStepDict.items(): - if self._JobStepDict[key]['name'] == value: - retDict.append(key) - - return retDict - - def get(self): - """Get slurm jobstep information. - - Returns: - (dict): Data whose key is the jobstep ID. - """ - self.__get() - - return self._JobStepDict - - cpdef __get(self): - """Load details about job steps. - - This method loads details about job steps that satisfy the job_id - and/or step_id specifications provided if the data has been updated - since the update_time specified. - """ - cdef: - slurm.job_step_info_response_msg_t *job_step_info_ptr = NULL - - slurm.time_t last_time = 0 - dict Steps = {} - dict StepDict = {} - uint16_t ShowFlags = self._ShowFlags ^ slurm.SHOW_ALL - size_t i = 0 - int errCode = slurm.slurm_get_job_steps( - last_time, self.JobID, self.StepID, &job_step_info_ptr, ShowFlags - ) - - if errCode != 0: - self._JobStepDict = {} - return - - if job_step_info_ptr is not NULL: - - for i in range(job_step_info_ptr.job_step_count): - - #HVB - job_id = job_step_info_ptr.job_steps[i].step_id.job_id - step_id = job_step_info_ptr.job_steps[i].step_id.step_id - - Steps[job_id] = {} - Step_dict = {} - - if job_step_info_ptr.job_steps[i].array_job_id: - Step_dict['array_job_id'] = job_step_info_ptr.job_steps[i].array_job_id - Step_dict['array_task_id'] = job_step_info_ptr.job_steps[i].array_task_id - - if step_id == SLURM_PENDING_STEP: - Step_dict['step_id_str'] = "{0}_{1}.TBD".format(Step_dict['array_job_id'], Step_dict['array_task_id']) - elif step_id == SLURM_EXTERN_CONT: - Step_dict['step_id_str'] = "{0}_{1}.extern".format(Step_dict['array_job_id'], Step_dict['array_task_id']) - else: - Step_dict['step_id_str'] = "{0}_{1}.{2}".format(Step_dict['array_job_id'], Step_dict['array_task_id'], step_id) - else: - if step_id == SLURM_PENDING_STEP: - Step_dict['step_id_str'] = "{0}.TBD".format(job_id) - elif step_id == SLURM_EXTERN_CONT: - Step_dict['step_id_str'] = "{0}.extern".format(job_id) - else: - Step_dict['step_id_str'] = "{0}.{1}".format(job_id, step_id) - - Step_dict['cluster'] = stringOrNone(job_step_info_ptr.job_steps[i].cluster, '') - Step_dict['container'] = stringOrNone(job_step_info_ptr.job_steps[i].container, '') - Step_dict['cpus_per_tres'] = stringOrNone(job_step_info_ptr.job_steps[i].cpus_per_tres, '') - - Step_dict['dist'] = stringOrNone( - slurm.slurm_step_layout_type_name( - job_step_info_ptr.job_steps[i].task_dist - ), '' - ) - - Step_dict['mem_per_tres'] = stringOrNone(job_step_info_ptr.job_steps[i].mem_per_tres, '') - Step_dict['name'] = stringOrNone( job_step_info_ptr.job_steps[i].name, '') - Step_dict['network'] = stringOrNone( job_step_info_ptr.job_steps[i].network, '') - Step_dict['nodes'] = stringOrNone(job_step_info_ptr.job_steps[i].nodes, '') - Step_dict['num_cpus'] = job_step_info_ptr.job_steps[i].num_cpus - Step_dict['num_tasks'] = job_step_info_ptr.job_steps[i].num_tasks - Step_dict['partition'] = stringOrNone(job_step_info_ptr.job_steps[i].partition, '') - Step_dict['resv_ports'] = stringOrNone(job_step_info_ptr.job_steps[i].resv_ports, '') - Step_dict['run_time'] = job_step_info_ptr.job_steps[i].run_time - Step_dict['srun_host'] = stringOrNone(job_step_info_ptr.job_steps[i].srun_host, '') - Step_dict['srun_pid'] = job_step_info_ptr.job_steps[i].srun_pid - Step_dict['start_time'] = job_step_info_ptr.job_steps[i].start_time - - job_state = slurm.slurm_job_state_string(job_step_info_ptr.job_steps[i].state) - Step_dict['state'] = stringOrNone(job_state, '') - Step_dict['submit_line'] = stringOrNone(job_step_info_ptr.job_steps[i].submit_line, '') - - if job_step_info_ptr.job_steps[i].time_limit == slurm.INFINITE: - Step_dict['time_limit'] = "UNLIMITED" - Step_dict['time_limit_str'] = "UNLIMITED" - else: - Step_dict['time_limit'] = job_step_info_ptr.job_steps[i].time_limit - Step_dict['time_limit_str'] = secs2time_str(job_step_info_ptr.job_steps[i].time_limit) - - Step_dict['tres_bind'] = stringOrNone( - job_step_info_ptr.job_steps[i].tres_bind, '' - ) - - Step_dict['tres_freq'] = stringOrNone( - job_step_info_ptr.job_steps[i].tres_freq, '' - ) - - Step_dict['tres_per_step'] = stringOrNone( - job_step_info_ptr.job_steps[i].tres_per_step, '' - ) - - Step_dict['tres_per_node'] = stringOrNone( - job_step_info_ptr.job_steps[i].tres_per_node, '' - ) - - Step_dict['tres_per_socket'] = stringOrNone( - job_step_info_ptr.job_steps[i].tres_per_socket, '' - ) - - Step_dict['tres_per_task'] = stringOrNone( - job_step_info_ptr.job_steps[i].tres_per_task, '' - ) - - Step_dict['user_id'] = job_step_info_ptr.job_steps[i].user_id - - Steps[job_id][step_id] = Step_dict - - slurm.slurm_free_job_step_info_response_msg(job_step_info_ptr) - - self._JobStepDict = Steps - - def layout(self, uint32_t JobID=0, uint32_t StepID=0): - """Get the slurm job step layout from a given job and step id. - - Args: - JobID (int): The job id. - StepID (int): The id of the job step. - - Returns: - (list): List of job step layout. - """ - cdef: - slurm.slurm_step_id_t step_id - slurm.slurm_step_layout_t *old_job_step_ptr = NULL - int i = 0, j = 0, Node_cnt = 0 - - dict Layout = {} - list Nodes = [], Node_list = [], Tids_list = [] - - self.step_id.job_id = JobID - self.step_id.step_id = StepID - self.step_id_step_het_comp = slurm.NO_VAL - - old_job_step_ptr = slurm.slurm_job_step_layout_get(&step_id) - if old_job_step_ptr is not NULL: - - Node_cnt = old_job_step_ptr.node_cnt - - Layout['node_cnt'] = Node_cnt - Layout['node_list'] = stringOrNone(old_job_step_ptr.node_list, '') - Layout['plane_size'] = old_job_step_ptr.plane_size - Layout['task_cnt'] = old_job_step_ptr.task_cnt - Layout['task_dist'] = old_job_step_ptr.task_dist - Layout['task_dist'] = stringOrNone( - slurm.slurm_step_layout_type_name(old_job_step_ptr.task_dist), '' - ) - - hl = hostlist() - node_list = stringOrNone(old_job_step_ptr.node_list, '') - hl.create(node_list) - Nodes = hl.get_list() - hl.destroy() - - for i, node in enumerate(Nodes): - Tids_list = [] - for j in range(old_job_step_ptr.tasks[i]): - Tids_list.append(old_job_step_ptr.tids[i][j]) - Node_list.append([stringOrNone(node, ''), Tids_list]) - - Layout['tasks'] = Node_list - - slurm.slurm_job_step_layout_free(old_job_step_ptr) - - return Layout - - # # Hostlist Class # @@ -3159,398 +2831,6 @@ def create_reservation_dict(): } -# -# Statistics -# - - -cdef class statistics: - """Slurm Controller statistics.""" - - cdef: - slurm.stats_info_request_msg_t _req - slurm.stats_info_response_msg_t *_buf - dict _StatsDict - - def __cinit__(self): - self._buf = NULL - self._StatsDict = {} - - def __dealloc__(self): - pass - - def get(self): - """Get slurm statistics information. - - Returns: - (dict): Slurm Controller statistics - """ - cdef: - int errCode - int apiError - uint32_t i - dict rpc_type_stats - dict rpc_user_stats - - self._req.command_id = STAT_COMMAND_GET - - errCode = slurm.slurm_get_statistics(&self._buf, - &self._req) - - if errCode == slurm.SLURM_SUCCESS: - self._StatsDict['parts_packed'] = self._buf.parts_packed - self._StatsDict['req_time'] = self._buf.req_time - self._StatsDict['req_time_start'] = self._buf.req_time_start - self._StatsDict['server_thread_count'] = self._buf.server_thread_count - self._StatsDict['agent_queue_size'] = self._buf.agent_queue_size - - self._StatsDict['schedule_cycle_max'] = self._buf.schedule_cycle_max - self._StatsDict['schedule_cycle_last'] = self._buf.schedule_cycle_last - self._StatsDict['schedule_cycle_sum'] = self._buf.schedule_cycle_sum - self._StatsDict['schedule_cycle_counter'] = self._buf.schedule_cycle_counter - self._StatsDict['schedule_cycle_depth'] = self._buf.schedule_cycle_depth - self._StatsDict['schedule_queue_len'] = self._buf.schedule_queue_len - - self._StatsDict['jobs_submitted'] = self._buf.jobs_submitted - self._StatsDict['jobs_started'] = self._buf.jobs_started - self._StatsDict['jobs_completed'] = self._buf.jobs_completed - self._StatsDict['jobs_canceled'] = self._buf.jobs_canceled - self._StatsDict['jobs_failed'] = self._buf.jobs_failed - - self._StatsDict['jobs_pending'] = self._buf.jobs_pending - self._StatsDict['jobs_running'] = self._buf.jobs_running - self._StatsDict['job_states_ts'] = self._buf.job_states_ts - - self._StatsDict['bf_backfilled_jobs'] = self._buf.bf_backfilled_jobs - self._StatsDict['bf_last_backfilled_jobs'] = self._buf.bf_last_backfilled_jobs - self._StatsDict['bf_cycle_counter'] = self._buf.bf_cycle_counter - self._StatsDict['bf_cycle_sum'] = self._buf.bf_cycle_sum - self._StatsDict['bf_cycle_last'] = self._buf.bf_cycle_last - self._StatsDict['bf_cycle_max'] = self._buf.bf_cycle_max - self._StatsDict['bf_last_depth'] = self._buf.bf_last_depth - self._StatsDict['bf_last_depth_try'] = self._buf.bf_last_depth_try - self._StatsDict['bf_depth_sum'] = self._buf.bf_depth_sum - self._StatsDict['bf_depth_try_sum'] = self._buf.bf_depth_try_sum - self._StatsDict['bf_queue_len'] = self._buf.bf_queue_len - self._StatsDict['bf_queue_len_sum'] = self._buf.bf_queue_len_sum - self._StatsDict['bf_when_last_cycle'] = self._buf.bf_when_last_cycle - self._StatsDict['bf_active'] = self._buf.bf_active - - rpc_type_stats = {} - - for i in range(self._buf.rpc_type_size): - try: - rpc_type = self.__rpc_num2string(self._buf.rpc_type_id[i]) - except KeyError: - rpc_type = "UNKNOWN" - rpc_type_stats[rpc_type] = {} - rpc_type_stats[rpc_type]['id'] = self._buf.rpc_type_id[i] - rpc_type_stats[rpc_type]['count'] = self._buf.rpc_type_cnt[i] - if self._buf.rpc_type_cnt[i] == 0: - rpc_type_stats[rpc_type]['ave_time'] = 0 - else: - rpc_type_stats[rpc_type]['ave_time'] = int(self._buf.rpc_type_time[i] / - self._buf.rpc_type_cnt[i]) - rpc_type_stats[rpc_type]['total_time'] = int(self._buf.rpc_type_time[i]) - self._StatsDict['rpc_type_stats'] = rpc_type_stats - - rpc_user_stats = {} - - for i in range(self._buf.rpc_user_size): - try: - rpc_user = getpwuid(self._buf.rpc_user_id[i])[0] - except KeyError: - rpc_user = str(self._buf.rpc_user_id[i]) - rpc_user_stats[rpc_user] = {} - rpc_user_stats[rpc_user]["id"] = self._buf.rpc_user_id[i] - rpc_user_stats[rpc_user]["count"] = self._buf.rpc_user_cnt[i] - if self._buf.rpc_user_cnt[i] == 0: - rpc_user_stats[rpc_user]["ave_time"] = 0 - else: - rpc_user_stats[rpc_user]["ave_time"] = int(self._buf.rpc_user_time[i] / - self._buf.rpc_user_cnt[i]) - rpc_user_stats[rpc_user]["total_time"] = int(self._buf.rpc_user_time[i]) - self._StatsDict['rpc_user_stats'] = rpc_user_stats - - slurm.slurm_free_stats_response_msg(self._buf) - self._buf = NULL - return self._StatsDict - else: - apiError = slurm_get_errno() - raise ValueError(stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - - def reset(self): - """Reset scheduling statistics - - This method requires root privileges. - """ - cdef: - int apiError - int errCode - - self._req.command_id = STAT_COMMAND_RESET - errCode = slurm.slurm_reset_statistics(&self._req) - - if errCode == slurm.SLURM_SUCCESS: - return errCode - else: - apiError = slurm_get_errno() - raise ValueError(stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - - cpdef __rpc_num2string(self, uint16_t opcode): - cdef dict num2string - - num2string = { - 1001: "REQUEST_NODE_REGISTRATION_STATUS", - 1002: "MESSAGE_NODE_REGISTRATION_STATUS", - 1003: "REQUEST_RECONFIGURE", - 1004: "REQUEST_RECONFIGURE_WITH_CONFIG", - 1005: "REQUEST_SHUTDOWN", - 1006: "DEFUNCT_RPC_1006", - 1007: "DEFUNCT_RPC_1007", - 1008: "REQUEST_PING", - 1009: "REQUEST_CONTROL", - 1010: "REQUEST_SET_DEBUG_LEVEL", - 1011: "REQUEST_HEALTH_CHECK", - 1012: "REQUEST_TAKEOVER", - 1013: "REQUEST_SET_SCHEDLOG_LEVEL", - 1014: "REQUEST_SET_DEBUG_FLAGS", - 1015: "REQUEST_REBOOT_NODES", - 1016: "RESPONSE_PING_SLURMD", - 1017: "REQUEST_ACCT_GATHER_UPDATE", - 1018: "RESPONSE_ACCT_GATHER_UPDATE", - 1019: "REQUEST_ACCT_GATHER_ENERGY", - 1020: "RESPONSE_ACCT_GATHER_ENERGY", - 1021: "REQUEST_LICENSE_INFO", - 1022: "RESPONSE_LICENSE_INFO", - 1023: "REQUEST_SET_FS_DAMPENING_FACTOR", - 1024: "RESPONSE_NODE_REGISTRATION", - - 1433: "PERSIST_RC", - - 2000: "DBD_MESSAGES_END", - - 2001: "REQUEST_BUILD_INFO", - 2002: "RESPONSE_BUILD_INFO", - 2003: "REQUEST_JOB_INFO", - 2004: "RESPONSE_JOB_INFO", - 2005: "REQUEST_JOB_STEP_INFO", - 2006: "RESPONSE_JOB_STEP_INFO", - 2007: "REQUEST_NODE_INFO", - 2008: "RESPONSE_NODE_INFO", - 2009: "REQUEST_PARTITION_INFO", - 2010: "RESPONSE_PARTITION_INFO", - 2011: "DEFUNCT_RPC_2011", - 2012: "DEFUNCT_RPC_2012", - 2013: "REQUEST_JOB_ID", - 2014: "RESPONSE_JOB_ID", - 2015: "REQUEST_CONFIG", - 2016: "RESPONSE_CONFIG", - 2017: "REQUEST_TRIGGER_SET", - 2018: "REQUEST_TRIGGER_GET", - 2019: "REQUEST_TRIGGER_CLEAR", - 2020: "RESPONSE_TRIGGER_GET", - 2021: "REQUEST_JOB_INFO_SINGLE", - 2022: "REQUEST_SHARE_INFO", - 2023: "RESPONSE_SHARE_INFO", - 2024: "REQUEST_RESERVATION_INFO", - 2025: "RESPONSE_RESERVATION_INFO", - 2026: "REQUEST_PRIORITY_FACTORS", - 2027: "RESPONSE_PRIORITY_FACTORS", - 2028: "REQUEST_TOPO_INFO", - 2029: "RESPONSE_TOPO_INFO", - 2030: "REQUEST_TRIGGER_PULL", - 2031: "REQUEST_FRONT_END_INFO", - 2032: "RESPONSE_FRONT_END_INFO", - 2033: "DEFUNCT_RPC_2033", - 2034: "DEFUNCT_RPC_2034", - 2035: "REQUEST_STATS_INFO", - 2036: "RESPONSE_STATS_INFO", - 2037: "REQUEST_BURST_BUFFER_INFO", - 2038: "RESPONSE_BURST_BUFFER_INFO", - 2039: "REQUEST_JOB_USER_INFO", - 2040: "REQUEST_NODE_INFO_SINGLE", - 2041: "DEFUNCT_RPC_2041", - 2042: "DEFUNCT_RPC_2042", - 2043: "REQUEST_ASSOC_MGR_INFO", - 2044: "RESPONSE_ASSOC_MGR_INFO", - 2045: "DEFUNCT_RPC_2045", - 2046: "DEFUNCT_RPC_2046", - 2047: "DEFUNCT_RPC_2047", - 2048: "DEFUNCT_RPC_2048", - 2049: "REQUEST_FED_INFO", - 2050: "RESPONSE_FED_INFO", - 2051: "REQUEST_BATCH_SCRIPT", - 2052: "RESPONSE_BATCH_SCRIPT", - 2053: "REQUEST_CONTROL_STATUS", - 2054: "RESPONSE_CONTROL_STATUS", - 2055: "REQUEST_BURST_BUFFER_STATUS", - 2056: "RESPONSE_BURST_BUFFER_STATUS", - - 2200: "REQUEST_CRONTAB", - 2201: "RESPONSE_CRONTAB", - 2202: "REQUEST_UPDATE_CRONTAB", - 2203: "RESPONSE_UPDATE_CRONTAB", - - 3001: "REQUEST_UPDATE_JOB", - 3002: "REQUEST_UPDATE_NODE", - 3003: "REQUEST_CREATE_PARTITION", - 3004: "REQUEST_DELETE_PARTITION", - 3005: "REQUEST_UPDATE_PARTITION", - 3006: "REQUEST_CREATE_RESERVATION", - 3007: "RESPONSE_CREATE_RESERVATION", - 3008: "REQUEST_DELETE_RESERVATION", - 3009: "REQUEST_UPDATE_RESERVATION", - 3010: "DEFUNCT_RPC_3010", - 3011: "REQUEST_UPDATE_FRONT_END", - 3012: "DEFUNCT_RPC_3012", - 3013: "DEFUNCT_RPC_3013", - 3014: "REQUEST_DELETE_NODE", - 3015: "REQUEST_CREATE_NODE", - - 4001: "REQUEST_RESOURCE_ALLOCATION", - 4002: "RESPONSE_RESOURCE_ALLOCATION", - 4003: "REQUEST_SUBMIT_BATCH_JOB", - 4004: "RESPONSE_SUBMIT_BATCH_JOB", - 4005: "REQUEST_BATCH_JOB_LAUNCH", - 4006: "REQUEST_CANCEL_JOB", - 4007: "DEFUNCT_RPC_4007", - 4008: "DEFUNCT_RPC_4008", - 4009: "DEFUNCT_RPC_4009", - 4010: "DEFUNCT_RPC_4010", - 4011: "DEFUNCT_RPC_4011", - 4012: "REQUEST_JOB_WILL_RUN", - 4013: "RESPONSE_JOB_WILL_RUN", - 4014: "REQUEST_JOB_ALLOCATION_INFO", - 4015: "RESPONSE_JOB_ALLOCATION_INFO", - 4016: "DEFUNCT_RPC_4017", - 4017: "DEFUNCT_RPC_4018", - 4018: "DEFUNCT_RPC_4019", - 4019: "REQUEST_JOB_READY", - 4020: "RESPONSE_JOB_READY", - 4021: "REQUEST_JOB_END_TIME", - 4022: "REQUEST_JOB_NOTIFY", - 4023: "REQUEST_JOB_SBCAST_CRED", - 4024: "RESPONSE_JOB_SBCAST_CRED", - 4025: "REQUEST_HET_JOB_ALLOCATION", - 4026: "RESPONSE_HET_JOB_ALLOCATION", - 4027: "REQUEST_HET_JOB_ALLOC_INFO", - 4028: "REQUEST_SUBMIT_BATCH_HET_JOB", - - 4500: "REQUEST_CTLD_MULT_MSG", - 4501: "RESPONSE_CTLD_MULT_MSG", - 4502: "REQUEST_SIB_MSG", - 4503: "REQUEST_SIB_JOB_LOCK", - 4504: "REQUEST_SIB_JOB_UNLOCK", - 4505: "REQUEST_SEND_DEP", - 4506: "REQUEST_UPDATE_ORIGIN_DEP", - - 5001: "REQUEST_JOB_STEP_CREATE", - 5002: "RESPONSE_JOB_STEP_CREATE", - 5003: "DEFUNCT_RPC_5003", - 5004: "DEFUNCT_RPC_5004", - 5005: "REQUEST_CANCEL_JOB_STEP", - 5006: "DEFUNCT_RPC_5006", - 5007: "REQUEST_UPDATE_JOB_STEP", - 5008: "DEFUNCT_RPC_5008", - 5009: "DEFUNCT_RPC_5009", - 5010: "DEFUNCT_RPC_5010", - 5011: "DEFUNCT_RPC_5011", - 5012: "DEFUNCT_RPC_5012", - 5013: "DEFUNCT_RPC_5013", - 5014: "REQUEST_SUSPEND", - 5015: "DEFUNCT_RPC_5015", - 5016: "REQUEST_STEP_COMPLETE", - 5017: "REQUEST_COMPLETE_JOB_ALLOCATION", - 5018: "REQUEST_COMPLETE_BATCH_SCRIPT", - 5019: "REQUEST_JOB_STEP_STAT", - 5020: "RESPONSE_JOB_STEP_STAT", - 5021: "REQUEST_STEP_LAYOUT", - 5022: "RESPONSE_STEP_LAYOUT", - 5023: "REQUEST_JOB_REQUEUE", - 5024: "REQUEST_DAEMON_STATUS", - 5025: "RESPONSE_SLURMD_STATUS", - 5026: "DEFUNCT_RPC_5026", - 5027: "REQUEST_JOB_STEP_PIDS", - 5028: "RESPONSE_JOB_STEP_PIDS", - 5029: "REQUEST_FORWARD_DATA", - 5030: "DEFUNCT_RPC_5030", - 5031: "REQUEST_SUSPEND_INT", - 5032: "REQUEST_KILL_JOB", - 5033: "DEFUNCT_RPC_5033", - 5034: "RESPONSE_JOB_ARRAY_ERRORS", - 5035: "REQUEST_NETWORK_CALLERID", - 5036: "RESPONSE_NETWORK_CALLERID", - 5037: "DEFUNCT_RPC_5037", - 5038: "REQUEST_TOP_JOB", - 5039: "REQUEST_AUTH_TOKEN", - 5040: "RESPONSE_AUTH_TOKEN", - - 6001: "REQUEST_LAUNCH_TASKS", - 6002: "RESPONSE_LAUNCH_TASKS", - 6003: "MESSAGE_TASK_EXIT", - 6004: "REQUEST_SIGNAL_TASKS", - 6005: "DEFUNCT_RPC_6005", - 6006: "REQUEST_TERMINATE_TASKS", - 6007: "REQUEST_REATTACH_TASKS", - 6008: "RESPONSE_REATTACH_TASKS", - 6009: "REQUEST_KILL_TIMELIMIT", - 6010: "DEFUNCT_RPC_6010", - 6011: "REQUEST_TERMINATE_JOB", - 6012: "MESSAGE_EPILOG_COMPLETE", - 6013: "REQUEST_ABORT_JOB", - - 6014: "REQUEST_FILE_BCAST", - 6015: "DEFUNCT_RPC_6015", - 6016: "REQUEST_KILL_PREEMPTED", - - 6017: "REQUEST_LAUNCH_PROLOG", - 6018: "REQUEST_COMPLETE_PROLOG", - 6019: "RESPONSE_PROLOG_EXECUTING", - - 6500: "REQUEST_PERSIST_INIT", - - 7001: "SRUN_PING", - 7002: "SRUN_TIMEOUT", - 7003: "SRUN_NODE_FAIL", - 7004: "SRUN_JOB_COMPLETE", - 7005: "SRUN_USER_MSG", - 7006: "DEFUNCT_RPC_7006", - 7007: "SRUN_STEP_MISSING", - 7008: "SRUN_REQUEST_SUSPEND", - 7009: "SRUN_STEP_SIGNAL", - - 7010: "SRUN_NET_FORWARD", - - 7201: "PMI_KVS_PUT_REQ", - 7202: "DEFUNCT_RPC_7202", - 7203: "PMI_KVS_GET_REQ", - 7204: "PMI_KVS_GET_RESP", - - 8001: "RESPONSE_SLURM_RC", - 8002: "RESPONSE_SLURM_RC_MSG", - 8003: "RESPONSE_SLURM_REROUTE_MSG", - - 9001: "RESPONSE_FORWARD_FAILED", - - 10001: "ACCOUNTING_UPDATE_MSG", - 10002: "ACCOUNTING_FIRST_REG", - 10003: "ACCOUNTING_REGISTER_CTLD", - 10004: "ACCOUNTING_TRES_CHANGE_DB", - 10005: "ACCOUNTING_NODES_CHANGE_DB", - - 11001: "SLURMSCRIPTD_REQUEST_FLUSH", - 11002: "SLURMSCRIPTD_REQUEST_FLUSH_JOB", - 11003: "SLURMSCRIPTD_REQUEST_RECONFIG", - 11004: "SLURMSCRIPTD_REQUEST_RUN_SCRIPT", - 11005: "SLURMSCRIPTD_REQUEST_SCRIPT_COMPLETE", - 11006: "SLURMSCRIPTD_REQUEST_UPDATE_DEBUG_FLAGS", - 11007: "SLURMSCRIPTD_REQUEST_UPDATE_LOG", - 11008: "SLURMSCRIPTD_SHUTDOWN", - } - return num2string[opcode] - - # # QOS Class # @@ -4660,9 +3940,6 @@ cdef inline list debug_flags2str(uint64_t debug_flags): if (debug_flags & DEBUG_FLAG_JAG): debugFlags.append('Jag') - if (debug_flags & DEBUG_FLAG_JOB_CONT): - debugFlags.append('JobContainer') - if (debug_flags & DEBUG_FLAG_NODE_FEATURES): debugFlags.append('NodeFeatures') diff --git a/pyslurm/pydefines/slurm_defines.pxi b/pyslurm/pydefines/slurm_defines.pxi index 509eac9b..85a044ff 100644 --- a/pyslurm/pydefines/slurm_defines.pxi +++ b/pyslurm/pydefines/slurm_defines.pxi @@ -128,16 +128,6 @@ SHOW_SIBLING = slurm.SHOW_SIBLING SHOW_FEDERATION = slurm.SHOW_FEDERATION SHOW_FUTURE = slurm.SHOW_FUTURE -CR_CPU = slurm.CR_CPU -CR_SOCKET = slurm.CR_SOCKET -CR_CORE = slurm.CR_CORE -CR_BOARD = slurm.CR_BOARD -CR_MEMORY = slurm.CR_MEMORY -CR_ONE_TASK_PER_CORE = slurm.CR_ONE_TASK_PER_CORE -CR_PACK_NODES = slurm.CR_PACK_NODES -CR_CORE_DEFAULT_DIST_BLOCK = slurm.CR_CORE_DEFAULT_DIST_BLOCK -CR_LLN = slurm.CR_LLN - MEM_PER_CPU = slurm.MEM_PER_CPU SHARED_FORCE = slurm.SHARED_FORCE @@ -292,7 +282,6 @@ DEBUG_FLAG_ENERGY = slurm.DEBUG_FLAG_ENERGY DEBUG_FLAG_LICENSE = slurm.DEBUG_FLAG_LICENSE DEBUG_FLAG_PROFILE = slurm.DEBUG_FLAG_PROFILE DEBUG_FLAG_INTERCONNECT = slurm.DEBUG_FLAG_INTERCONNECT -DEBUG_FLAG_JOB_CONT = slurm.DEBUG_FLAG_JOB_CONT DEBUG_FLAG_PROTOCOL = slurm.DEBUG_FLAG_PROTOCOL DEBUG_FLAG_BACKFILL_MAP = slurm.DEBUG_FLAG_BACKFILL_MAP DEBUG_FLAG_TRACE_JOBS = slurm.DEBUG_FLAG_TRACE_JOBS diff --git a/pyslurm/pydefines/slurm_errno_enums.pxi b/pyslurm/pydefines/slurm_errno_enums.pxi index faa9c63f..0c282f5e 100644 --- a/pyslurm/pydefines/slurm_errno_enums.pxi +++ b/pyslurm/pydefines/slurm_errno_enums.pxi @@ -90,7 +90,6 @@ ESLURM_PARTITION_IN_USE = slurm.ESLURM_PARTITION_IN_USE ESLURM_STEP_LIMIT = slurm.ESLURM_STEP_LIMIT ESLURM_JOB_SUSPENDED = slurm.ESLURM_JOB_SUSPENDED ESLURM_CAN_NOT_START_IMMEDIATELY = slurm.ESLURM_CAN_NOT_START_IMMEDIATELY -ESLURM_INTERCONNECT_BUSY = slurm.ESLURM_INTERCONNECT_BUSY ESLURM_RESERVATION_EMPTY = slurm.ESLURM_RESERVATION_EMPTY ESLURM_INVALID_ARRAY = slurm.ESLURM_INVALID_ARRAY ESLURM_RESERVATION_NAME_DUP = slurm.ESLURM_RESERVATION_NAME_DUP @@ -176,4 +175,3 @@ ESLURM_INVALID_CLUSTER_NAME = slurm.ESLURM_INVALID_CLUSTER_NAME ESLURM_FED_JOB_LOCK = slurm.ESLURM_FED_JOB_LOCK ESLURM_FED_NO_VALID_CLUSTERS = slurm.ESLURM_FED_NO_VALID_CLUSTERS ESLURM_MISSING_TIME_LIMIT = slurm.ESLURM_MISSING_TIME_LIMIT -ESLURM_INVALID_KNL = slurm.ESLURM_INVALID_KNL diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index 49183cf6..6d8436b0 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -63,7 +63,7 @@ ctypedef struct return_code_msg_t: # https://github.com/SchedMD/slurm/blob/slurm-24-11-0-1/src/common/slurm_protocol_defs.h#L432 ctypedef struct job_id_msg_t: - uint32_t job_id + slurm_step_id_t step_id uint16_t show_flags # https://github.com/SchedMD/slurm/blob/slurm-24-05-3-1/src/common/msg_type.h#L45 diff --git a/pyslurm/slurm/slurm.h.pxi b/pyslurm/slurm/slurm.h.pxi index e6c8629d..cebd292c 100644 --- a/pyslurm/slurm/slurm.h.pxi +++ b/pyslurm/slurm/slurm.h.pxi @@ -9,7 +9,7 @@ # * C-Macros are listed with their appropriate uint type # * Any definitions that cannot be translated are not included in this file # -# Generated on 2025-09-28T15:03:24.037044 +# Generated on 2025-11-06T18:06:33.691815 # # The Original Copyright notice from slurm.h has been included # below: @@ -63,6 +63,10 @@ cdef extern from "slurm/slurm.h": uint32_t NO_VAL uint64_t NO_VAL64 uint64_t NO_CONSUME_VAL64 + uint8_t MAX_VAL8 + uint16_t MAX_VAL16 + uint32_t MAX_VAL + uint64_t MAX_VAL64 uint16_t MAX_TASKS_PER_NODE uint32_t MAX_JOB_ID uint32_t MAX_FED_JOB_ID @@ -94,6 +98,7 @@ cdef extern from "slurm/slurm.h": uint32_t JOB_RESV_DEL_HOLD uint32_t JOB_SIGNALING uint32_t JOB_STAGE_OUT + uint32_t JOB_EXPEDITING int8_t READY_JOB_FATAL int8_t READY_JOB_ERROR uint8_t READY_NODE_STATE @@ -192,20 +197,6 @@ cdef extern from "slurm/slurm.h": uint8_t SHOW_SIBLING uint8_t SHOW_FEDERATION uint8_t SHOW_FUTURE - uint8_t CR_CPU - uint8_t CR_SOCKET - uint8_t CR_CORE - uint8_t CR_BOARD - uint8_t CR_MEMORY - uint8_t ENFORCE_BINDING_GRES - uint8_t ONE_TASK_PER_SHARING_GRES - uint16_t CR_ONE_TASK_PER_CORE - uint16_t CR_PACK_NODES - uint16_t LL_SHARED_GRES - uint16_t CR_CORE_DEFAULT_DIST_BLOCK - uint16_t CR_LLN - uint16_t MULTIPLE_SHARING_GRES_PJ - uint16_t CR_LINEAR uint64_t MEM_PER_CPU uint16_t SHARED_FORCE uint8_t PRIVATE_DATA_JOBS @@ -281,6 +272,9 @@ cdef extern from "slurm/slurm.h": uint64_t GRES_ALLOW_TASK_SHARING uint64_t STEPMGR_ENABLED uint64_t HETJOB_PURGE + uint64_t SPREAD_SEGMENTS + uint64_t CONSOLIDATE_SEGMENTS + uint64_t EXPEDITED_REQUEUE uint8_t X11_FORWARD_ALL uint8_t X11_FORWARD_BATCH uint8_t X11_FORWARD_FIRST @@ -381,7 +375,7 @@ cdef extern from "slurm/slurm.h": uint32_t DEBUG_FLAG_PROFILE uint32_t DEBUG_FLAG_INTERCONNECT uint32_t DEBUG_FLAG_GLOB_SILENCE - uint32_t DEBUG_FLAG_JOB_CONT + uint32_t DEBUG_FLAG_NAMESPACE uint32_t DEBUG_FLAG_AUDIT_RPCS uint32_t DEBUG_FLAG_PROTOCOL uint32_t DEBUG_FLAG_BACKFILL_MAP @@ -413,6 +407,7 @@ cdef extern from "slurm/slurm.h": uint64_t DEBUG_FLAG_JAG uint64_t DEBUG_FLAG_CGROUP uint64_t DEBUG_FLAG_SCRIPT + uint64_t DEBUG_FLAG_METRICS uint8_t PREEMPT_MODE_OFF uint8_t PREEMPT_MODE_SUSPEND uint8_t PREEMPT_MODE_REQUEUE @@ -428,8 +423,9 @@ cdef extern from "slurm/slurm.h": uint8_t HEALTH_CHECK_NODE_ALLOC uint8_t HEALTH_CHECK_NODE_MIXED uint8_t HEALTH_CHECK_NODE_NONDRAINED_IDLE - uint16_t HEALTH_CHECK_CYCLE uint8_t HEALTH_CHECK_NODE_ANY + uint16_t HEALTH_CHECK_CYCLE + uint16_t HEALTH_CHECK_START_ONLY uint8_t PROLOG_FLAG_ALLOC uint8_t PROLOG_FLAG_NOHOLD uint8_t PROLOG_FLAG_CONTAIN @@ -455,6 +451,7 @@ cdef extern from "slurm/slurm.h": uint16_t CONF_FLAG_SHR uint16_t CONF_FLAG_CONTAIN_SPANK uint32_t CONF_FLAG_NO_STDIO + uint32_t CONF_FLAG_DISABLE_HTTP uint8_t LOG_FMT_ISO8601_MS uint8_t LOG_FMT_ISO8601 uint8_t LOG_FMT_RFC5424_MS @@ -506,7 +503,6 @@ cdef extern from "slurm/slurm.h": uint8_t KILL_OOM uint8_t KILL_NO_SIBS uint16_t KILL_JOB_RESV - uint16_t KILL_NO_CRON uint16_t KILL_NO_SIG_FAIL uint16_t KILL_JOBS_VERBOSE uint16_t KILL_CRON @@ -556,6 +552,12 @@ cdef extern from "slurm/slurm.h": unsigned char type unsigned char hash[32] + ctypedef struct slurm_step_id_t: + sluid_t sluid + uint32_t job_id + uint32_t step_het_comp + uint32_t step_id + cdef enum job_states: JOB_PENDING JOB_RUNNING @@ -589,7 +591,7 @@ cdef extern from "slurm/slurm.h": WAIT_RESERVATION WAIT_NODE_NOT_AVAIL WAIT_HELD_USER - DEFUNCT_WAIT_17 + WAIT_NVIDIA_IMEX_CHANNELS FAIL_DEFER FAIL_DOWN_PARTITION FAIL_DOWN_NODE @@ -820,7 +822,6 @@ cdef extern from "slurm/slurm.h": TLS_PLUGIN_S2N cdef enum select_plugin_type: - SELECT_PLUGIN_CONS_RES SELECT_PLUGIN_LINEAR SELECT_PLUGIN_CONS_TRES @@ -980,6 +981,22 @@ cdef extern from "slurm/slurm.h": NODE_STATE_FUTURE NODE_STATE_END + ctypedef enum select_type_flags_t: + SELECT_CPU + SELECT_SOCKET + SELECT_CORE + SELECT_BOARD + SELECT_MEMORY + SELECT_LINEAR + SELECT_ENFORCE_BINDING_GRES + SELECT_ONE_TASK_PER_SHARING_GRES + SELECT_ONE_TASK_PER_CORE + SELECT_PACK_NODES + SELECT_LL_SHARED_GRES + SELECT_CORE_DEFAULT_DIST_BLOCK + SELECT_LLN + SELECT_MULTIPLE_SHARING_GRES_PJ + ctypedef enum step_spec_flags_t: SSF_NONE SSF_EXCLUSIVE @@ -1067,12 +1084,6 @@ cdef extern from "slurm/slurm.h": ctypedef bitstr_t bitoff_t - cdef struct dynamic_plugin_data: - void* data - uint32_t plugin_id - - ctypedef dynamic_plugin_data dynamic_plugin_data_t - cdef struct acct_gather_energy: uint32_t ave_watts uint64_t base_consumed_energy @@ -1138,7 +1149,6 @@ cdef extern from "slurm/slurm.h": uint32_t het_job_offset void* id uint16_t immediate - uint32_t job_id char* job_id_str char* job_size_str uint16_t kill_on_node_fail @@ -1179,6 +1189,7 @@ cdef extern from "slurm/slurm.h": uint32_t site_factor char** spank_job_env uint32_t spank_job_env_size + slurm_step_id_t step_id char* submit_line uint32_t task_dist uint32_t time_limit @@ -1289,7 +1300,6 @@ cdef extern from "slurm/slurm.h": uint32_t het_job_id char* het_job_id_set uint32_t het_job_offset - uint32_t job_id job_resources_t* job_resrcs char* job_size_str uint32_t job_state @@ -1342,6 +1352,7 @@ cdef extern from "slurm/slurm.h": char* selinux_context uint16_t shared uint32_t site_factor + slurm_step_id_t step_id uint16_t sockets_per_board uint16_t sockets_per_node time_t start_time @@ -1352,6 +1363,7 @@ cdef extern from "slurm/slurm.h": char* std_in char* std_out uint16_t segment_size + char* submit_line time_t submit_time time_t suspend_time char* system_comment @@ -1371,6 +1383,7 @@ cdef extern from "slurm/slurm.h": uint32_t wait4switch char* wckey char* work_dir + uint32_t job_id ctypedef job_info slurm_job_info_t @@ -1448,8 +1461,7 @@ cdef extern from "slurm/slurm.h": job_state_response_job_t* jobs cdef struct step_update_request_msg: - uint32_t job_id - uint32_t step_id + slurm_step_id_t step_id uint32_t time_limit ctypedef step_update_request_msg step_update_request_msg_t @@ -1487,14 +1499,6 @@ cdef extern from "slurm/slurm.h": ctypedef slurm_step_layout slurm_step_layout_t - cdef struct slurm_step_id_msg: - sluid_t sluid - uint32_t job_id - uint32_t step_het_comp - uint32_t step_id - - ctypedef slurm_step_id_msg slurm_step_id_t - cdef struct _slurm_step_io_fds_t_slurm_step_io_fds_t_slurm_step_io_fds_input_s: int fd uint32_t taskid @@ -1537,15 +1541,12 @@ cdef extern from "slurm/slurm.h": ctypedef task_ext_msg task_exit_msg_t ctypedef struct net_forward_msg_t: - uint32_t job_id + slurm_step_id_t step_id uint32_t flags uint16_t port char* target - cdef struct srun_ping_msg: - uint32_t job_id - - ctypedef srun_ping_msg srun_ping_msg_t + ctypedef slurm_step_id_t srun_ping_msg_t ctypedef slurm_step_id_t srun_job_complete_msg_t @@ -1556,7 +1557,7 @@ cdef extern from "slurm/slurm.h": ctypedef srun_timeout_msg srun_timeout_msg_t cdef struct srun_user_msg: - uint32_t job_id + slurm_step_id_t step_id char* msg ctypedef srun_user_msg srun_user_msg_t @@ -1579,14 +1580,13 @@ cdef extern from "slurm/slurm.h": cdef struct suspend_msg: uint16_t op - uint32_t job_id char* job_id_str + slurm_step_id_t step_id ctypedef suspend_msg suspend_msg_t cdef struct top_job_msg: uint16_t op - uint32_t job_id char* job_id_str ctypedef top_job_msg top_job_msg_t @@ -1752,7 +1752,7 @@ cdef extern from "slurm/slurm.h": uint32_t user_id ctypedef struct stepmgr_job_info_t: - uint32_t job_id + slurm_step_id_t step_id char* stepmgr cdef struct job_step_info_response_msg: @@ -1820,6 +1820,7 @@ cdef extern from "slurm/slurm.h": uint32_t node_state char* os uint32_t owner + char* parameters char* partitions uint16_t port uint64_t real_memory @@ -1864,7 +1865,7 @@ cdef extern from "slurm/slurm.h": ctypedef topo_info_request_msg topo_info_request_msg_t cdef struct topo_info_response_msg: - dynamic_plugin_data_t* topo_info + void* topo_info ctypedef topo_info_response_msg topo_info_response_msg_t @@ -1874,7 +1875,7 @@ cdef extern from "slurm/slurm.h": ctypedef topo_config_response_msg topo_config_response_msg_t cdef struct job_alloc_info_msg: - uint32_t job_id + slurm_step_id_t step_id char* req_cluster ctypedef job_alloc_info_msg job_alloc_info_msg_t @@ -1958,7 +1959,6 @@ cdef extern from "slurm/slurm.h": cdef struct resource_allocation_response_msg: char* account - uint32_t job_id char* batch_host uint32_t cpu_freq_min uint32_t cpu_freq_max @@ -1982,6 +1982,9 @@ cdef extern from "slurm/slurm.h": uint64_t pn_min_memory char* qos char* resv_name + uint16_t segment_size + uint16_t start_protocol_ver + slurm_step_id_t step_id char* tres_per_node char* tres_per_task uid_t uid @@ -1999,16 +2002,20 @@ cdef extern from "slurm/slurm.h": cdef struct will_run_response_msg: char* cluster_name - uint32_t job_id char* job_submit_user_msg char* node_list char* part_name list_t* preemptee_job_id uint32_t proc_cnt + slurm_step_id_t step_id time_t start_time ctypedef will_run_response_msg will_run_response_msg_t + ctypedef struct ns_fd_map_t: + int type + int fd + cdef struct resv_core_spec: char* node_name char* core_id @@ -2017,6 +2024,7 @@ cdef extern from "slurm/slurm.h": cdef struct reserve_info: char* accounts + char* allowed_parts char* burst_buffer char* comment uint32_t core_cnt @@ -2034,6 +2042,7 @@ cdef extern from "slurm/slurm.h": char* node_list char* partition uint32_t purge_comp_time + char* qos time_t start_time char* tres_str char* users @@ -2049,6 +2058,7 @@ cdef extern from "slurm/slurm.h": cdef struct resv_desc_msg: char* accounts + char* allowed_parts char* burst_buffer char* comment uint32_t core_cnt @@ -2065,6 +2075,7 @@ cdef extern from "slurm/slurm.h": char* node_list char* partition uint32_t purge_comp_time + char* qos time_t start_time time_t time_force char* tres_str @@ -2093,7 +2104,6 @@ cdef extern from "slurm/slurm.h": char* accounting_storage_pass uint16_t accounting_storage_port char* accounting_storage_type - char* accounting_storage_user void* acct_gather_conf char* acct_gather_energy_type char* acct_gather_profile_type @@ -2114,6 +2124,7 @@ cdef extern from "slurm/slurm.h": char* certmgr_params char* certmgr_type void* cgroup_conf + char* cli_filter_params char* cli_filter_plugins uint16_t cluster_id char* cluster_name @@ -2151,6 +2162,7 @@ cdef extern from "slurm/slurm.h": uint16_t health_check_interval uint16_t health_check_node_state char* health_check_program + char* http_parser_type uint32_t host_unreach_retry_count uint16_t inactive_limit char* interactive_step_opts @@ -2162,10 +2174,11 @@ cdef extern from "slurm/slurm.h": char* job_comp_loc char* job_comp_params char* job_comp_pass + char* job_comp_pass_script uint32_t job_comp_port char* job_comp_type char* job_comp_user - char* job_container_plugin + char* namespace_plugin list_t* job_defaults_list uint16_t job_file_append uint16_t job_requeue @@ -2191,6 +2204,7 @@ cdef extern from "slurm/slurm.h": uint16_t max_tasks_per_node char* mcs_plugin char* mcs_plugin_params + char* metrics_type uint32_t min_job_age void* mpi_conf char* mpi_default @@ -2307,6 +2321,7 @@ cdef extern from "slurm/slurm.h": uint16_t tree_width char* unkillable_program uint16_t unkillable_timeout + char* url_parser_type char* version uint16_t vsize_factor uint16_t wait_time @@ -2332,8 +2347,7 @@ cdef extern from "slurm/slurm.h": ctypedef slurmd_status_msg slurmd_status_t cdef struct submit_response_msg: - uint32_t job_id - uint32_t step_id + slurm_step_id_t step_id uint32_t error_code char* job_submit_user_msg @@ -2364,7 +2378,6 @@ cdef extern from "slurm/slurm.h": ctypedef partition_info update_part_msg_t cdef struct job_sbcast_cred_msg: - uint32_t job_id char* node_list void* sbcast_cred @@ -2390,7 +2403,6 @@ cdef extern from "slurm/slurm.h": ctypedef stats_info_request_msg stats_info_request_msg_t cdef struct stats_info_response_msg: - uint32_t parts_packed time_t req_time time_t req_time_start uint32_t server_thread_count @@ -2401,7 +2413,7 @@ cdef extern from "slurm/slurm.h": uint32_t gettimeofday_latency uint32_t schedule_cycle_max uint32_t schedule_cycle_last - uint32_t schedule_cycle_sum + uint64_t schedule_cycle_sum uint32_t schedule_cycle_counter uint32_t schedule_cycle_depth uint32_t* schedule_exit @@ -2531,13 +2543,13 @@ cdef extern from "slurm/slurm.h": int slurm_allocate_resources(job_desc_msg_t* job_desc_msg, resource_allocation_response_msg_t** job_alloc_resp_msg) - ctypedef void (*_slurm_allocate_resources_blocking_pending_callback_ft)(uint32_t job_id) + ctypedef void (*_slurm_allocate_resources_blocking_pending_callback_ft)(slurm_step_id_t* step_id) resource_allocation_response_msg_t* slurm_allocate_resources_blocking(const job_desc_msg_t* user_req, time_t timeout, _slurm_allocate_resources_blocking_pending_callback_ft pending_callback) void slurm_free_resource_allocation_response_msg(resource_allocation_response_msg_t* msg) - ctypedef void (*_slurm_allocate_het_job_blocking_pending_callback_ft)(uint32_t job_id) + ctypedef void (*_slurm_allocate_het_job_blocking_pending_callback_ft)(slurm_step_id_t* step_id) list_t* slurm_allocate_het_job_blocking(list_t* job_req_list, time_t timeout, _slurm_allocate_het_job_blocking_pending_callback_ft pending_callback) @@ -2594,6 +2606,7 @@ cdef extern from "slurm/slurm.h": ctypedef struct kill_jobs_msg_t: char* account + char* admin_comment uint16_t flags char* job_name char** jobs_array @@ -2621,7 +2634,7 @@ cdef extern from "slurm/slurm.h": int slurm_kill_job(uint32_t job_id, uint16_t signal, uint16_t flags) - int slurm_kill_job_step(uint32_t job_id, uint32_t step_id, uint16_t signal, uint16_t flags) + int slurm_kill_job_step(slurm_step_id_t* step_id, uint16_t signal, uint16_t flags) int slurm_kill_job2(const char* job_id, uint16_t signal, uint16_t flags, const char* sibling) @@ -2629,11 +2642,11 @@ cdef extern from "slurm/slurm.h": int slurm_signal_job(uint32_t job_id, uint16_t signal) - int slurm_signal_job_step(uint32_t job_id, uint32_t step_id, uint32_t signal) + int slurm_signal_job_step(slurm_step_id_t* step_id, uint32_t signal) - int slurm_complete_job(uint32_t job_id, uint32_t job_return_code) + int slurm_complete_job(slurm_step_id_t* step_id, uint32_t job_return_code) - int slurm_terminate_job_step(uint32_t job_id, uint32_t step_id) + int slurm_terminate_job_step(slurm_step_id_t* step_id) void slurm_step_launch_params_t_init(slurm_step_launch_params_t* ptr) @@ -2677,6 +2690,8 @@ cdef extern from "slurm/slurm.h": int slurm_reset_statistics(stats_info_request_msg_t* req) + int slurm_get_resource_layout(slurm_step_id_t* step_id, void** response) + void slurm_free_job_info_msg(job_info_msg_t* job_buffer_ptr) void slurm_free_job_state_response_msg(job_state_response_msg_t* msg) @@ -2701,6 +2716,8 @@ cdef extern from "slurm/slurm.h": int slurm_load_job(job_info_msg_t** resp, uint32_t job_id, uint16_t show_flags) + int slurm_load_job_sluid(job_info_msg_t** resp, sluid_t sluid, uint16_t show_flags) + int slurm_load_job_prio(priority_factors_response_msg_t** factors_resp, uint16_t show_flags) int slurm_load_job_user(job_info_msg_t** job_info_msg_pptr, uint32_t user_id, uint16_t show_flags) @@ -2717,20 +2734,14 @@ cdef extern from "slurm/slurm.h": int slurm_update_job2(job_desc_msg_t* job_msg, job_array_resp_msg_t** resp) - int slurm_get_job_steps(time_t update_time, uint32_t job_id, uint32_t step_id, job_step_info_response_msg_t** step_response_pptr, uint16_t show_flags) + int slurm_get_job_steps(slurm_step_id_t* step_id, job_step_info_response_msg_t** resp, uint16_t show_flags) int slurm_find_step_ids_by_container_id(uint16_t show_flags, uid_t uid, const char* container_id, list_t* steps) void slurm_free_job_step_info_response_msg(job_step_info_response_msg_t* msg) - void slurm_print_job_step_info_msg(FILE* out, job_step_info_response_msg_t* job_step_info_msg_ptr, int one_liner) - - void slurm_print_job_step_info(FILE* out, job_step_info_t* step_ptr, int one_liner) - slurm_step_layout_t* slurm_job_step_layout_get(slurm_step_id_t* step_id) - char* slurm_sprint_job_step_info(job_step_info_t* step_ptr, int one_liner) - int slurm_job_step_stat(slurm_step_id_t* step_id, char* node_list, uint16_t use_protocol_ver, job_step_stat_response_msg_t** resp) int slurm_job_step_get_pids(slurm_step_id_t* step_id, char* node_list, job_step_pids_response_msg_t** resp) diff --git a/pyslurm/slurm/slurm_errno.h.pxi b/pyslurm/slurm/slurm_errno.h.pxi index 96453d9f..011e1cb7 100644 --- a/pyslurm/slurm/slurm_errno.h.pxi +++ b/pyslurm/slurm/slurm_errno.h.pxi @@ -9,7 +9,7 @@ # * C-Macros are listed with their appropriate uint type # * Any definitions that cannot be translated are not included in this file # -# Generated on 2025-09-28T15:03:23.665186 +# Generated on 2025-11-06T18:06:33.417836 # # The Original Copyright notice from slurm_errno.h has been included # below: @@ -159,7 +159,6 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_STEP_LIMIT ESLURM_JOB_SUSPENDED ESLURM_CAN_NOT_START_IMMEDIATELY - ESLURM_INTERCONNECT_BUSY ESLURM_RESERVATION_EMPTY ESLURM_INVALID_ARRAY ESLURM_RESERVATION_NAME_DUP @@ -177,7 +176,7 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_INVALID_BURST_BUFFER_REQUEST ESLURM_PRIO_RESET_FAIL ESLURM_CANNOT_MODIFY_CRON_JOB - ESLURM_INVALID_JOB_CONTAINER_CHANGE + ESLURM_INVALID_NAMESPACE_CHANGE ESLURM_CANNOT_CANCEL_CRON_JOB ESLURM_INVALID_MCS_LABEL ESLURM_BURST_BUFFER_WAIT @@ -259,6 +258,12 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_MAX_POWERED_NODES ESLURM_REQUESTED_TOPO_CONFIG_UNAVAILABLE ESLURM_PREEMPTION_REQUIRED + ESLURM_INVALID_NODE_STATE_TRANSITION + ESLURM_INVALID_JOB_STATE + ESLURM_BREAK_EVAL + ESLURM_RETRY_EVAL + ESLURM_RETRY_EVAL_HINT + ESLURM_INVALID_SLUID ESPANK_ERROR ESPANK_BAD_ARG ESPANK_NOT_TASK @@ -306,6 +311,8 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_AUTH_UNPACK ESLURM_AUTH_SKIP ESLURM_AUTH_UNABLE_TO_GENERATE_TOKEN + ESLURM_AUTH_NOBODY + ESLURM_AUTH_SOCKET_INVALID_PEER ESLURM_DB_CONNECTION ESLURM_JOBS_RUNNING_ON_ASSOC ESLURM_CLUSTER_DELETED @@ -327,8 +334,16 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_INVALID_CLUSTER_NAME ESLURM_FED_JOB_LOCK ESLURM_FED_NO_VALID_CLUSTERS + ESLURM_LUA_INVALID_STATE + ESLURM_LUA_INVALID_SYNTAX + ESLURM_LUA_FUNC_NOT_FOUND + ESLURM_LUA_FUNC_INVALID_RC + ESLURM_LUA_FUNC_FAILED + ESLURM_LUA_FUNC_FAILED_RUNTIME_ERROR + ESLURM_LUA_FUNC_FAILED_ENOMEM + ESLURM_LUA_FUNC_FAILED_GARBAGE_COLLECTOR + ESLURM_LUA_INVALID_CONVERSION_TYPE ESLURM_MISSING_TIME_LIMIT - ESLURM_INVALID_KNL ESLURM_PLUGIN_INVALID ESLURM_PLUGIN_INCOMPLETE ESLURM_PLUGIN_NOT_LOADED @@ -344,6 +359,8 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_REST_EMPTY_RESULT ESLURM_REST_MISSING_UID ESLURM_REST_MISSING_GID + ESLURM_REST_UNKNOWN_URL + ESLURM_REST_UNKNOWN_URL_METHOD ESLURM_DATA_PATH_NOT_FOUND ESLURM_DATA_PTR_NULL ESLURM_DATA_CONV_FAILED @@ -360,9 +377,41 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_DATA_INVALID_PARSER ESLURM_DATA_PARSING_DEPTH ESLURM_DATA_PARSER_INVALID_STATE + ESLURM_DATA_PARSE_BAD_INPUT ESLURM_CONTAINER_NOT_CONFIGURED ESLURM_URL_UNKNOWN_SCHEME ESLURM_URL_EMPTY + ESLURM_URL_NON_NULL_TERMINATOR + ESLURM_URL_INVALID_FORMATING + ESLURM_URL_INVALID_SCHEME + ESLURM_URL_UNSUPPORTED_SCHEME + ESLURM_URL_INVALID_HOST + ESLURM_URL_INVALID_PORT + ESLURM_URL_INVALID_PATH + ESLURM_URL_INVALID_QUERY + ESLURM_URL_INVALID_FRAGMENT + ESLURM_URL_UNSUPPORTED_FORMAT + ESLURM_HTTP_PARSING_FAILURE + ESLURM_HTTP_UNEXPECTED_URL + ESLURM_HTTP_EMPTY_HEADER + ESLURM_HTTP_INVALID_STATUS_CODE + ESLURM_HTTP_INVALID_METHOD + ESLURM_HTTP_MISSING_LF + ESLURM_HTTP_INVALID_CHARACTER + ESLURM_HTTP_INVALID_CONTENT_LENGTH + ESLURM_HTTP_INVALID_TRANSFER_ENCODING + ESLURM_HTTP_UNSUPPORTED_CHUNK_ENCODING + ESLURM_HTTP_UNEXPECTED_REQUEST + ESLURM_HTTP_UNSUPPORTED_VERSION + ESLURM_HTTP_UNSUPPORTED_UPGRADE + ESLURM_HTTP_INVALID_CONTENT_ENCODING + ESLURM_HTTP_UNSUPPORTED_EXPECT + ESLURM_HTTP_UNSUPPORTED_KEEP_ALIVE + ESLURM_HTTP_CONTENT_LENGTH_TOO_LARGE + ESLURM_HTTP_POST_MISSING_CONTENT_LENGTH + ESLURM_HTTP_UNEXPECTED_BODY + ESLURM_HTTP_UNKNOWN_ACCEPT_MIME_TYPE + ESLURM_TLS_REQUIRED ctypedef struct slurm_errtab_t: int xe_number diff --git a/pyslurm/slurm/slurmdb.h.pxi b/pyslurm/slurm/slurmdb.h.pxi index c10001e2..8a796998 100644 --- a/pyslurm/slurm/slurmdb.h.pxi +++ b/pyslurm/slurm/slurmdb.h.pxi @@ -9,7 +9,7 @@ # * C-Macros are listed with their appropriate uint type # * Any definitions that cannot be translated are not included in this file # -# Generated on 2025-09-28T15:03:24.516012 +# Generated on 2025-11-06T18:06:33.934775 # # The Original Copyright notice from slurmdb.h has been included # below: @@ -70,6 +70,7 @@ cdef extern from "slurm/slurmdb.h": uint8_t SLURMDB_JOB_FLAG_SCHED uint8_t SLURMDB_JOB_FLAG_BACKFILL uint8_t SLURMDB_JOB_FLAG_START_R + uint8_t SLURMDB_JOB_FLAG_ALTERED uint8_t JOBCOND_FLAG_DUP uint8_t JOBCOND_FLAG_NO_STEP uint8_t JOBCOND_FLAG_NO_TRUNC @@ -195,7 +196,6 @@ cdef extern from "slurm/slurmdb.h": QOS_FLAG_NOTSET QOS_FLAG_ADD QOS_FLAG_REMOVE - QOS_FLAG_INVALID cdef enum cluster_fed_states: CLUSTER_FED_STATE_NA @@ -209,7 +209,6 @@ cdef extern from "slurm/slurmdb.h": CLUSTER_FLAG_MULTSD CLUSTER_FLAG_FED CLUSTER_FLAG_EXT - CLUSTER_FLAG_INVALID ctypedef enum slurmdb_assoc_flags_t: ASSOC_FLAG_NONE @@ -220,13 +219,13 @@ cdef extern from "slurm/slurmdb.h": ASSOC_FLAG_BASE ASSOC_FLAG_USER_COORD ASSOC_FLAG_BLOCK_ADD - ASSOC_FLAG_INVALID ctypedef struct slurmdb_tres_rec_t: uint64_t alloc_secs uint32_t rec_count uint64_t count uint32_t id + char modifier char* name char* type @@ -302,7 +301,6 @@ cdef extern from "slurm/slurmdb.h": SLURMDB_ACCT_FLAG_USER_COORD_NO SLURMDB_ACCT_FLAG_BASE SLURMDB_ACCT_FLAG_USER_COORD - SLURMDB_ACCT_FLAG_INVALID ctypedef struct slurmdb_account_cond_t: slurmdb_assoc_cond_t* assoc_cond @@ -858,6 +856,7 @@ cdef extern from "slurm/slurmdb.h": list_t* assoc_list slurmdb_bf_usage_t* bf_usage list_t* coord_accts + uint32_t def_qos_id char* default_acct char* default_wckey uint32_t flags diff --git a/pyslurm/utils/helpers.pxd b/pyslurm/utils/helpers.pxd index 3f73c375..60b817e8 100644 --- a/pyslurm/utils/helpers.pxd +++ b/pyslurm/utils/helpers.pxd @@ -23,11 +23,13 @@ # cython: language_level=3 from pyslurm cimport slurm -from pyslurm.slurm cimport xfree, try_xmalloc, xmalloc +from pyslurm.slurm cimport xfree, try_xmalloc, xmalloc, slurm_step_id_t from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from pyslurm.utils cimport cstr from libc.stdlib cimport free +from libc.string cimport memset cpdef uid_to_name(uint32_t uid, err_on_invalid=*, dict lookup=*) cpdef gid_to_name(uint32_t gid, err_on_invalid=*, dict lookup=*) cpdef gres_from_tres_dict(dict tres_dict) +cdef slurm_step_id_t init_step_id() diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index bac5f5f7..0f0eeca3 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -418,3 +418,12 @@ def cpu_freq_int_to_str(freq): else: # This is in kHz return freq + +cdef slurm_step_id_t init_step_id(): + cdef slurm_step_id_t _s + memset(&_s, 0, sizeof(slurm_step_id_t)) + _s.sluid = 0 + _s.job_id = slurm.NO_VAL + _s.step_het_comp = slurm.NO_VAL + _s.job_id = slurm.NO_VAL + return _s diff --git a/pyslurm/version.py b/pyslurm/version.py index e88cddeb..81c07469 100644 --- a/pyslurm/version.py +++ b/pyslurm/version.py @@ -5,4 +5,4 @@ # The last Number "Z" is the current Pyslurm patch version, which should be # incremented each time a new release is made (except when migrating to a new # Slurm Major release, then set it back to 0) -__version__ = "25.5.0" +__version__ = "25.11.0" diff --git a/setup.cfg b/setup.cfg index 6c8a65a5..b75cbb33 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,6 +4,6 @@ packager = Giovanni Torres doc_files = README.md examples/ build_requires = python3-devel >= 3.6 - slurm-devel >= 25.05.0 + slurm-devel >= 25.11.0 requires = slurm use_bzip2 = 1