Skip to content

Commit

Permalink
[IMP] queue_job_batch: Improve perf of counters
Browse files Browse the repository at this point in the history
In the case of a batch of 2k jobs having large task definition a memory error would be reached easily.
Using a read saves some memory by avoiding the pre-fetching.
  • Loading branch information
yvaucher committed Aug 16, 2023
1 parent ef2ab0a commit 93c82da
Showing 1 changed file with 13 additions and 13 deletions.
26 changes: 13 additions & 13 deletions queue_job_batch/models/queue_job_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,18 +116,18 @@ def get_new_batch(self, name, **kwargs):
})
return self.sudo().create(vals).sudo(self.env.uid)

@api.depends('job_ids')
@api.depends("job_ids", "job_ids.state")
def _compute_job_count(self):
for record in self:
job_count = len(record.job_ids)
failed_job_count = len(record.job_ids.filtered(
lambda r: r.state == 'failed'
))
done_job_count = len(record.job_ids.filtered(
lambda r: r.state == 'done'
))
record.job_count = job_count
record.finished_job_count = done_job_count
record.failed_job_count = failed_job_count
record.completeness = done_job_count / max(1, job_count)
record.failed_percentage = failed_job_count / max(1, job_count)
jobs = record.job_ids
states = [r["state"] for r in jobs.read(["state"])]

total = len(jobs)
failed = states.count("failed")
done = states.count("done")

record.job_count = total
record.finished_job_count = done
record.failed_job_count = failed
record.completeness = done / max(1, total)
record.failed_percentage = failed / max(1, total)

0 comments on commit 93c82da

Please sign in to comment.