@@ -5534,16 +5534,13 @@ cdef class slurmdb_jobs:
55345534 u """ Class to access Slurmdbd Jobs information."""
55355535
55365536 cdef:
5537- pass
5537+ void * db_conn
5538+ slurm.slurmdb_job_cond_t * job_cond
55385539
55395540 def __cinit__ (self ):
5540- pass
5541+ self .job_cond = < slurm.slurmdb_job_cond_t * > slurm.xmalloc(sizeof(slurm.slurmdb_job_cond_t))
55415542
55425543 def __dealloc__ (self ):
5543- self .__destroy()
5544-
5545- cpdef __destroy(self ):
5546- u """ Destructor method."""
55475544 pass
55485545
55495546 def get (self , jobids = [], starttime = 0 , endtime = 0 ):
@@ -5559,61 +5556,19 @@ cdef class slurmdb_jobs:
55595556
55605557 cpdef __get(self , list jobids, time_t starttime, time_t endtime):
55615558 cdef:
5562- slurm.ListIterator iters = NULL
55635559 int i = 0
55645560 int listNum = 0
55655561 dict J_dict = {}
55665562 int apiError = 0
55675563 slurm.List JOBSList
5568- void * dbconn
5569- slurm.slurmdb_job_cond_t query
5570- slurm.List query_step_list = slurm.slurm_list_create(slurm.slurmdb_destroy_selected_step)
5571- slurm.slurmdb_selected_step_t* selstep
5572- for j in jobids:
5573- selstep = < slurm.slurmdb_selected_step_t* > slurm.xmalloc(sizeof(slurm.slurmdb_selected_step_t))
5574- selstep.array_task_id = slurm.NO_VAL
5575- selstep.stepid = slurm.NO_VAL
5576- selstep.jobid = j
5577- slurm.slurm_list_append(query_step_list, selstep);
5578-
5579- query.acct_list = NULL
5580- query.associd_list = NULL
5581- query.cluster_list = NULL
5582- query.cpus_max = 0
5583- query.cpus_min = 0
5584- query.duplicates = 0
5585- query.exitcode = 0
5586- query.groupid_list = NULL
5587- query.jobname_list = NULL
5588- query.nodes_max = 0
5589- query.nodes_min = 0
5590- query.partition_list = NULL
5591- query.qos_list = NULL
5592- query.resv_list = NULL
5593- query.resvid_list = NULL
5594- query.state_list = NULL
5595- query.step_list = query_step_list
5596- query.timelimit_max = 0
5597- query.timelimit_min = 0
5598- query.usage_end = endtime
5599- query.usage_start = starttime
5600- query.used_nodes = NULL
5601- query.userid_list = NULL
5602- query.wckey_list = NULL
5603- query.without_steps = 0
5604- query.without_usage_truncation = 1
5605-
5606- dbconn = slurm.slurmdb_connection_get()
5607- JOBSList = slurm.slurmdb_jobs_get(dbconn, < slurm.slurmdb_job_cond_t* > & query)
5608- slurm.slurm_list_destroy(query_step_list)
5564+ slurm.ListIterator iters = NULL
5565+
5566+ JOBSList = slurm.slurmdb_jobs_get(self .db_conn, self .job_cond)
56095567
56105568 if JOBSList is NULL :
56115569 apiError = slurm.slurm_get_errno()
56125570 raise ValueError (slurm.slurm_strerror(apiError), apiError)
56135571
5614- slurm.slurmdb_connection_close(& dbconn)
5615-
5616-
56175572 listNum = slurm.slurm_list_count(JOBSList)
56185573 iters = slurm.slurm_list_iterator_create(JOBSList)
56195574
@@ -5656,33 +5611,9 @@ cdef class slurmdb_jobs:
56565611 JOBS_info[u ' show_full' ] = job.show_full
56575612 JOBS_info[u ' start' ] = job.start
56585613 JOBS_info[u ' state' ] = job.state
5659- job_statistics = < slurm.slurmdb_stats_t> job.stats
5660- JOBS_info[u ' stat_actual_cpufreq' ] = job_statistics.act_cpufreq
5661- JOBS_info[u ' stat_cpu_ave' ] = job_statistics.cpu_ave
5662- JOBS_info[u ' stat_consumed_energy' ] = job_statistics.consumed_energy
5663- JOBS_info[u ' stat_cpu_min' ] = job_statistics.cpu_min
5664- JOBS_info[u ' stat_cpu_min_nodeid' ] = job_statistics.cpu_min_nodeid
5665- JOBS_info[u ' stat_cpu_min_taskid' ] = job_statistics.cpu_min_taskid
5666- JOBS_info[u ' stat_disk_read_ave' ] = job_statistics.disk_read_ave
5667- JOBS_info[u ' stat_disk_read_max' ] = job_statistics.disk_read_max
5668- JOBS_info[u ' stat_disk_read_max_nodeid' ] = job_statistics.disk_read_max_nodeid
5669- JOBS_info[u ' stat_disk_read_max_taskid' ] = job_statistics.disk_read_max_taskid
5670- JOBS_info[u ' stat_disk_write_ave' ] = job_statistics.disk_write_ave
5671- JOBS_info[u ' stat_disk_write_max' ] = job_statistics.disk_write_max
5672- JOBS_info[u ' stat_disk_write_max_nodeid' ] = job_statistics.disk_write_max_nodeid
5673- JOBS_info[u ' stat_disk_write_max_taskid' ] = job_statistics.disk_write_max_taskid
5674- JOBS_info[u ' stat_pages_ave' ] = job_statistics.pages_ave
5675- JOBS_info[u ' stat_pages_max' ] = job_statistics.pages_max
5676- JOBS_info[u ' stat_pages_max_nodeid' ] = job_statistics.pages_max_nodeid
5677- JOBS_info[u ' stat_pages_max_taskid' ] = job_statistics.pages_max_taskid
5678- JOBS_info[u ' stat_rss_ave' ] = job_statistics.rss_ave
5679- JOBS_info[u ' stat_rss_max' ] = job_statistics.rss_max
5680- JOBS_info[u ' stat_rss_max_nodeid' ] = job_statistics.rss_max_nodeid
5681- JOBS_info[u ' stat_rss_max_taskid' ] = job_statistics.rss_max_taskid
5682- JOBS_info[u ' stat_vsize_ave' ] = job_statistics.vsize_ave
5683- JOBS_info[u ' stat_vsize_max' ] = job_statistics.vsize_max
5684- JOBS_info[u ' stat_vize_max_nodeid' ] = job_statistics.vsize_max_nodeid
5685- JOBS_info[u ' stat_vsize_max_taskid' ] = job_statistics.vsize_max_taskid
5614+
5615+ JOBS_info[u ' stat_actual_cpufreq' ] = job.stats.act_cpufreq
5616+
56865617 JOBS_info[u ' steps' ] = " Not filled, string should be handled"
56875618 JOBS_info[u ' submit' ] = job.submit
56885619 JOBS_info[u ' suspended' ] = job.suspended
0 commit comments