/openbmc/qemu/tests/qemu-iotests/ |
H A D | 264 | 66 jobs = self.vm.qmp('query-block-jobs')['return'] 67 if jobs and jobs[0]['offset'] > 0: 72 self.assertTrue(jobs and jobs[0]['offset'] > 0) # job started 74 jobs = self.vm.qmp('query-block-jobs')['return'] 76 self.assertTrue(jobs) 77 self.assertTrue(jobs[0]['offset'] < jobs[0]['len'])
|
H A D | testrunner.py | 126 test_field_width: int, jobs: int) -> List[TestResult]: 133 with Pool(jobs) as p: 372 def run_tests(self, tests: List[str], jobs: int = 1) -> bool: 387 if jobs > 1: 388 results = self.run_tests_pool(tests, test_field_width, jobs) 393 if jobs > 1:
|
H A D | 109.out | 23 {"execute":"query-block-jobs"} 39 {"execute":"query-block-jobs"} 74 {"execute":"query-block-jobs"} 90 {"execute":"query-block-jobs"} 125 {"execute":"query-block-jobs"} 141 {"execute":"query-block-jobs"} 176 {"execute":"query-block-jobs"} 192 {"execute":"query-block-jobs"} 227 {"execute":"query-block-jobs"} 243 {"execute":"query-block-jobs"} [all …]
|
/openbmc/linux/scripts/ |
H A D | jobserver-exec | 15 jobs = b"" variable 48 jobs += slot 54 if len(jobs): 55 os.write(writer, jobs) 59 claim = len(jobs) + 1 74 if len(jobs): 75 os.write(writer, jobs)
|
H A D | generate_initcall_order.pl | 18 my $jobs = {}; # child process pid -> file handle 169 if (!exists($jobs->{$pid})) { 173 my $fh = $jobs->{$pid}; 181 delete($jobs->{$pid}); 202 $jobs->{$pid} = $fh; 213 if (scalar(keys(%{$jobs})) >= $njobs) { 219 while (scalar(keys(%{$jobs})) > 0) {
|
/openbmc/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() 162 pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; in panfrost_dequeue_job() 163 pfdev->jobs[slot][1] = NULL; in panfrost_dequeue_job() 175 if (!pfdev->jobs[slot][0]) { in panfrost_enqueue_job() 176 pfdev->jobs[slot][0] = job; in panfrost_enqueue_job() 180 WARN_ON(pfdev->jobs[slot][1]); in panfrost_enqueue_job() 181 pfdev->jobs[slot][1] = job; in panfrost_enqueue_job() 183 panfrost_get_job_chain_flag(pfdev->jobs[slot][0])); in panfrost_enqueue_job() 525 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) { in panfrost_job_handle_irq() 552 if (!failed[j] || !pfdev->jobs[j][0]) in panfrost_job_handle_irq() [all …]
|
/openbmc/u-boot/tools/ |
H A D | genboardscfg.py | 226 def scan_defconfigs(jobs=1): argument 244 for i in range(jobs): 245 defconfigs = all_defconfigs[total_boards * i / jobs : 246 total_boards * (i + 1) / jobs] 410 def gen_boards_cfg(output, jobs=1, force=False): argument 424 params_list = scan_defconfigs(jobs) 444 gen_boards_cfg(options.output, jobs=options.jobs, force=options.force)
|
/openbmc/qemu/qapi/ |
H A D | job.json | 5 # = Background jobs 56 # completion. This is used for long-running jobs like mirror that 62 # @waiting: The job is waiting for other jobs in the transaction to 199 # needs to be run explicitly for jobs that don't have automatic 203 # reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that 216 # Instructs all jobs in a transaction (or a single job if it is not 218 # necessary cleanup. This command requires that all involved jobs are 221 # For jobs in a transaction, instructing one job to finalize will 222 # force ALL jobs in the transaction to finalize, so it is only 268 # @query-jobs: [all …]
|
/openbmc/openbmc-test-automation/redfish/extended/ |
H A D | test_basic_ci.robot | 36 # root@witherspoon:~# systemctl list-jobs --no-pager | cat 39 # 1 jobs listed. 42 # root@witherspoon:~# systemctl list-jobs --no-pager | cat 43 # No jobs running. 47 ... systemctl list-jobs --no-pager | cat 48 Should Be Equal As Strings ${stdout} No jobs running.
|
/openbmc/linux/drivers/md/ |
H A D | dm-kcopyd.c | 417 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 443 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument 450 if (!list_empty(jobs)) { in pop() 451 if (jobs == &kc->io_jobs) in pop() 452 job = pop_io_job(jobs, kc); in pop() 454 job = list_entry(jobs->next, struct kcopyd_job, list); in pop() 463 static void push(struct list_head *jobs, struct kcopyd_job *job) in push() argument 469 list_add_tail(&job->list, jobs); in push() 474 static void push_head(struct list_head *jobs, struct kcopyd_job *job) in push_head() argument [all …]
|
/openbmc/linux/Documentation/core-api/ |
H A D | padata.rst | 9 Padata is a mechanism by which the kernel can farm jobs out to be done in 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 25 The first step in using padata to run serialized jobs is to set up a 26 padata_instance structure for overall control of how jobs are to be run:: 39 jobs to be serialized independently. A padata_instance may have one or more 40 padata_shells associated with it, each allowing a separate series of jobs. 45 The CPUs used to run jobs can be changed in two ways, programmatically with 52 parallel cpumask describes which processors will be used to execute jobs 116 true parallelism is achieved by submitting multiple jobs. parallel() runs with 141 pains to ensure that jobs are completed in the order in which they were [all …]
|
/openbmc/qemu/docs/devel/testing/ |
H A D | ci-runners.rst.inc | 4 Besides the jobs run under the various CI systems listed before, there 5 are a number additional jobs that will run before an actual merge. 7 other GitLab based CI jobs, but rely on additional systems, not the 12 care of running jobs created by events such as a push to a branch. 16 The GitLab CI jobs definition for the custom runners are located under:: 67 will run jobs. The association between a machine and a GitLab project 97 Tags are very important as they are used to route specific jobs to
|
H A D | ci-jobs.rst.inc | 65 * QEMU_JOB_nnnn - variables to be defined in individual jobs 71 which jobs get run in a pipeline 74 in stage 1, for use by build jobs in stage 2. Defaults to 147 the jobs to be manually started from the UI 150 the jobs immediately, as was the historical behaviour 156 these artifacts are not already cached, downloading them make the jobs 163 These variables are primarily to control execution of jobs on 187 The jobs are configured to use "ccache" by default since this typically
|
/openbmc/qemu/ui/ |
H A D | vnc-jobs.c | 61 QTAILQ_HEAD(, VncJob) jobs; 117 QTAILQ_INSERT_TAIL(&queue->jobs, job, next); in vnc_job_push() 127 QTAILQ_FOREACH(job, &queue->jobs, next) { in vnc_has_job_locked() 247 while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) { in vnc_worker_thread_loop() 251 job = QTAILQ_FIRST(&queue->jobs); in vnc_worker_thread_loop() 330 QTAILQ_REMOVE(&queue->jobs, job, next); in vnc_worker_thread_loop() 344 QTAILQ_INIT(&queue->jobs); in vnc_queue_init()
|
/openbmc/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_cs.c | 296 num_ibs[i], &p->jobs[i]); in amdgpu_cs_pass1() 300 p->gang_leader = p->jobs[p->gang_leader_idx]; in amdgpu_cs_pass1() 347 job = p->jobs[r]; in amdgpu_cs_p2_ib() 582 p->jobs[i]->shadow_va = shadow->shadow_va; in amdgpu_cs_p2_shadow() 583 p->jobs[i]->csa_va = shadow->csa_va; in amdgpu_cs_p2_shadow() 584 p->jobs[i]->gds_va = shadow->gds_va; in amdgpu_cs_p2_shadow() 585 p->jobs[i]->init_shadow = in amdgpu_cs_p2_shadow() 984 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, in amdgpu_cs_parser_bos() 1012 struct amdgpu_job *job = p->jobs[i]; in trace_amdgpu_cs_ibs() 1085 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); in amdgpu_cs_patch_jobs() [all …]
|
/openbmc/qemu/ |
H A D | job.c | 56 static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); 93 QLIST_HEAD(, Job) jobs; 117 QLIST_INIT(&txn->jobs); in job_txn_new() 162 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); in job_txn_add_job_locked() 191 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { in job_txn_apply_locked() 342 return QLIST_FIRST(&jobs); in job_next_locked() 357 QLIST_FOREACH(job, &jobs, job_list) { in job_get_locked() 437 QLIST_INSERT_HEAD(&jobs, job, job_list); in job_create() 942 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { in job_completed_txn_abort_locked() 952 while (!QLIST_EMPTY(&txn->jobs)) { in job_completed_txn_abort_locked() [all …]
|
/openbmc/u-boot/tools/buildman/ |
H A D | control.py | 41 GetPlural(options.threads), options.jobs, GetPlural(options.jobs)) 269 if not options.jobs: 270 options.jobs = max(1, (multiprocessing.cpu_count() + 293 options.threads, options.jobs, gnu_make=gnu_make, checkout=True,
|
/openbmc/openbmc/poky/meta/recipes-extended/bash/bash/ |
H A D | 0001-changes-to-SIGINT-handler-while-waiting-for-a-child-.patch | 15 jobs.c | 24 ++++++++++++++++-------- 38 diff --git a/jobs.c b/jobs.c 40 --- a/jobs.c 41 +++ b/jobs.c
|
/openbmc/openbmc/meta-openembedded/meta-oe/recipes-extended/parallel/ |
H A D | parallel_20241222.bb | 1 SUMMARY = "GNU Parallel - A shell tool for executing jobs in parallel using one or more computers" 2 DESCRIPTION = "GNU Parallel is a command-line tool for executing jobs in parallel on one or more co…
|
/openbmc/linux/tools/testing/kunit/ |
H A D | kunit.py | 46 jobs: int 92 success = linux.build_kernel(request.jobs, 448 jobs=cli_args.jobs, 484 jobs=cli_args.jobs)
|
/openbmc/openbmc-build-scripts/ |
H A D | README.md | 3 Build script for CI jobs in Jenkins.
|
/openbmc/linux/Documentation/admin-guide/device-mapper/ |
H A D | kcopyd.rst | 10 to set aside for their copy jobs. This is done with a call to 43 When a user is done with all their copy jobs, they should call
|
/openbmc/qemu/tests/vm/ |
H A D | basevm.py | 105 mem = max(4, args.jobs) 151 if args.jobs and args.jobs > 1: 152 self._args += ["-smp", "%d" % args.jobs] 657 jobs=int(args.jobs),
|
/openbmc/qemu/roms/ |
H A D | edk2-build.py | 179 def build_one(cfg, build, jobs = None, silent = False, nologs = False): argument 191 if jobs: 192 cmdline += [ '-n', jobs ] 429 build_one(cfg, build, options.jobs, options.silent, options.nologs)
|
/openbmc/linux/Documentation/gpu/ |
H A D | automated_testing.rst | 92 4. The various jobs will be run and when the pipeline is finished, all jobs 131 jobs from a branch in the target tree that is named as 142 otherwise pass, one can disable all jobs that would be submitted to that farm
|