Home
last modified time | relevance | path

Searched refs:workers (Results 1 – 25 of 28) sorted by relevance

12

/openbmc/qemu/tests/qemu-iotests/
H A D257.out33 … "job-id": "ref_backup_0", "sync": "full", "target": "ref_target_0", "x-perf": {"max-workers": 1}}}
80 … "job-id": "ref_backup_1", "sync": "full", "target": "ref_target_1", "x-perf": {"max-workers": 1}}}
94 …"job-id": "backup_1", "sync": "bitmap", "target": "backup_target_1", "x-perf": {"max-workers": 1}}}
218 … "job-id": "ref_backup_2", "sync": "full", "target": "ref_target_2", "x-perf": {"max-workers": 1}}}
232 …"job-id": "backup_2", "sync": "bitmap", "target": "backup_target_2", "x-perf": {"max-workers": 1}}}
302 … "job-id": "ref_backup_0", "sync": "full", "target": "ref_target_0", "x-perf": {"max-workers": 1}}}
349 … "job-id": "ref_backup_1", "sync": "full", "target": "ref_target_1", "x-perf": {"max-workers": 1}}}
365 …"job-id": "backup_1", "sync": "bitmap", "target": "backup_target_1", "x-perf": {"max-workers": 1}}}
425 … "job-id": "ref_backup_2", "sync": "full", "target": "ref_target_2", "x-perf": {"max-workers": 1}}}
439 …"job-id": "backup_2", "sync": "bitmap", "target": "backup_target_2", "x-perf": {"max-workers": 1}}}
[all …]
/openbmc/linux/Documentation/core-api/
H A Dworkqueue.rst34 number of workers as the number of CPUs. The kernel grew a lot of MT
118 number of the currently runnable workers. Generally, work items are
122 workers on the CPU, the worker-pool doesn't start execution of a new
125 are pending work items. This allows using a minimal number of workers
128 Keeping idle workers around doesn't cost other than the memory space
140 Forward progress guarantee relies on that workers can be created when
142 through the use of rescue workers. All work items which might be used
169 worker-pools which host workers which are not bound to any
178 of mostly unused workers across different CPUs as the issuer
200 each other. Each maintains its separate pool of workers and
[all …]
/openbmc/linux/tools/testing/selftests/bpf/
H A Dtest_progs.c434 if (verbose() && !env.workers) in test__end_subtest()
844 env->workers = atoi(arg); in parse_arg()
845 if (!env->workers) { in parse_arg()
850 env->workers = get_nprocs(); in parse_arg()
1022 for (i = 0; i < env.workers; i++) in sigint_handler()
1384 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main()
1385 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main()
1387 env.worker_current_test = calloc(sizeof(int), env.workers); in server_main()
1388 for (i = 0; i < env.workers; i++) { in server_main()
1401 for (i = 0; i < env.workers; i++) { in server_main()
[all …]
H A Dtest_progs.h125 int workers; /* number of worker process */ member
/openbmc/linux/fs/erofs/
H A DKconfig129 bool "EROFS per-cpu decompression kthread workers"
132 Saying Y here enables per-CPU kthread workers pool to carry out
138 bool "EROFS high priority per-CPU kthread workers"
142 This permits EROFS to configure per-CPU kthread workers to run
/openbmc/openbmc/poky/meta-selftest/recipes-devtools/python/
H A Dpython-async-test.inc1 SUMMARY = "Python framework to process interdependent tasks in a pool of workers"
/openbmc/openbmc/poky/documentation/ref-manual/
H A Drelease-process.rst213 takes the Autobuilder workers several hours.
217 The Autobuilder workers are non-homogeneous, which means regular
/openbmc/linux/net/l2tp/
H A DKconfig23 with home workers to connect to their offices.
/openbmc/linux/drivers/md/
H A Draid5.h514 struct r5worker *workers; member
H A Draid5.c203 group->workers[0].working = true; in raid5_wakeup_stripe_thread()
205 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
210 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread()
211 group->workers[i].working = true; in raid5_wakeup_stripe_thread()
213 &group->workers[i].work); in raid5_wakeup_stripe_thread()
7271 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt()
7308 struct r5worker *workers; in alloc_thread_groups() local
7317 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups()
7320 if (!*worker_groups || !workers) { in alloc_thread_groups()
7321 kfree(workers); in alloc_thread_groups()
[all …]
/openbmc/qemu/docs/devel/testing/
H A Dfuzzing.rst69 * ``-jobs=4 -workers=4`` : These arguments configure libFuzzer to run 4 fuzzers in
71 ``-jobs=N``, libFuzzer automatically spawns a number of workers less than or equal
/openbmc/openbmc/poky/meta/classes-global/
H A Duninative.bbclass133 This event handler is called in the workers and is responsible for setting
/openbmc/linux/fs/btrfs/
H A Dfs.h545 struct btrfs_workqueue *workers; member
H A Dbio.c641 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
H A Ddisk-io.c1759 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers()
1952 fs_info->workers = in btrfs_init_workqueues()
1991 if (!(fs_info->workers && in btrfs_init_workqueues()
H A Dsuper.c1637 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
/openbmc/linux/drivers/block/mtip32xx/
H A Dmtip32xx.c733 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local
754 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq()
759 workers++; in mtip_handle_irq()
762 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq()
763 if (workers) { in mtip_handle_irq()
/openbmc/qemu/docs/specs/
H A Drapl-msr.rst74 energy spent by the QEMU workers.
/openbmc/linux/kernel/
H A Dworkqueue.c187 struct list_head workers; /* A: attached workers */ member
490 list_for_each_entry((worker), &(pool)->workers, node) \
2118 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
2143 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) in worker_detach_from_pool()
3895 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
4037 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) in put_unbound_pool()
/openbmc/linux/Documentation/admin-guide/
H A Dworkload-tracing.rst126 starts specified number (N) of workers that exercise various netdevice
264 The netdev stressor starts N workers that exercise various netdevice ioctl
H A Dkernel-per-CPU-kthreads.rst262 d. As of v3.18, Christoph Lameter's on-demand vmstat workers
/openbmc/openbmc/poky/documentation/test-manual/
H A Dintro.rst79 topology that includes a controller and a cluster of workers:
513 workers, consider the following:
/openbmc/linux/Documentation/dev-tools/
H A Dkcov.rst248 exits (e.g. vhost workers).
/openbmc/openbmc/poky/bitbake/lib/bb/
H A Drunqueue.py3324 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
3325 for worker in workers.values():
/openbmc/qemu/qapi/
H A Dblock-core.json1544 # @max-workers: Maximum number of parallel requests for the sustained
1562 'data': { '*use-copy-range': 'bool', '*max-workers': 'int',

12