/openbmc/linux/tools/testing/selftests/cgroup/ |
H A D | memcg_protection.m | 6 % hierarchy to illustrate how overcommitted protection spreads among siblings 9 % Simulation assumes siblings consumed the initial amount of memory (w/out 16 % n vector nominal protection of siblings set at the given level (memory.low) 48 siblings = sum(u); variable 52 e = protected * min(1, E / siblings); % normalize overcommit 55 unclaimed = max(0, E - siblings); 56 parent_overuse = sum(c) - siblings; 79 % XXX here I do parallel reclaim of all siblings
|
/openbmc/linux/drivers/infiniband/hw/irdma/ |
H A D | ws.c | 133 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node() 139 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node() 214 list_del(&tc_node->siblings); in irdma_remove_leaf() 219 list_del(&vsi_node->siblings); in irdma_remove_leaf() 295 list_add(&vsi_node->siblings, &ws_tree_root->child_list_head); in irdma_ws_add() 322 list_add(&tc_node->siblings, &vsi_node->child_list_head); in irdma_ws_add() 356 list_del(&tc_node->siblings); in irdma_ws_add() 362 list_del(&vsi_node->siblings); in irdma_ws_add()
|
H A D | ws.h | 19 struct list_head siblings; member
|
/openbmc/linux/drivers/gpu/drm/i915/gt/uc/ |
H A D | selftest_guc_multi_lrc.c | 34 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in multi_lrc_create_parent() local 43 siblings[i++] = engine; in multi_lrc_create_parent() 49 logical_sort(siblings, i); in multi_lrc_create_parent() 51 return intel_engine_create_parallel(siblings, 1, i); in multi_lrc_create_parent()
|
H A D | intel_guc_submission.c | 142 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, 3991 struct intel_engine_cs **siblings = NULL; in guc_create_parallel() local 3995 siblings = kmalloc_array(num_siblings, in guc_create_parallel() 3996 sizeof(*siblings), in guc_create_parallel() 3998 if (!siblings) in guc_create_parallel() 4003 siblings[j] = engines[i * num_siblings + j]; in guc_create_parallel() 4005 ce = intel_engine_create_virtual(siblings, num_siblings, in guc_create_parallel() 4037 kfree(siblings); in guc_create_parallel() 4043 kfree(siblings); in guc_create_parallel() 5513 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, in guc_create_virtual() argument [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_context.c | 205 kfree(pc->user_engines[i].siblings); in proto_context_close() 404 struct intel_engine_cs **siblings; in set_proto_ctx_engines_balance() local 442 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); in set_proto_ctx_engines_balance() 443 if (!siblings) in set_proto_ctx_engines_balance() 454 siblings[n] = intel_engine_lookup_user(i915, in set_proto_ctx_engines_balance() 457 if (!siblings[n]) { in set_proto_ctx_engines_balance() 468 set->engines[idx].engine = siblings[0]; in set_proto_ctx_engines_balance() 469 kfree(siblings); in set_proto_ctx_engines_balance() 473 set->engines[idx].siblings = siblings; in set_proto_ctx_engines_balance() 479 kfree(siblings); in set_proto_ctx_engines_balance() [all …]
|
H A D | i915_gem_context_types.h | 122 struct intel_engine_cs **siblings; member
|
/openbmc/linux/drivers/gpu/drm/i915/gt/ |
H A D | selftest_execlists.c | 3712 struct intel_engine_cs **siblings, in nop_virtual_engine() argument 3729 ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); in nop_virtual_engine() 3840 struct intel_engine_cs **siblings, in __select_siblings() argument 3853 siblings[n++] = gt->engine_class[class][inst]; in __select_siblings() 3862 struct intel_engine_cs **siblings) in select_siblings() argument 3864 return __select_siblings(gt, class, siblings, NULL); in select_siblings() 3870 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in live_virtual_engine() local 3891 nsibling = select_siblings(gt, class, siblings); in live_virtual_engine() 3896 err = nop_virtual_engine(gt, siblings, nsibling, in live_virtual_engine() 3902 err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); in live_virtual_engine() [all …]
|
H A D | intel_execlists_submission.c | 198 struct intel_engine_cs *siblings[]; member 208 execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, 1054 if (likely(engine == ve->siblings[0])) in virtual_xfer_context() 1068 if (ve->siblings[n] == engine) { in virtual_xfer_context() 1069 swap(ve->siblings[n], ve->siblings[0]); in virtual_xfer_context() 1443 str_yes_no(engine != ve->siblings[0])); in execlists_dequeue() 1470 GEM_BUG_ON(ve->siblings[0] != engine); in execlists_dequeue() 3636 struct intel_engine_cs *sibling = ve->siblings[n]; in rcu_virtual_context_destroy() 3705 swap(ve->siblings[swp], ve->siblings[0]); in virtual_engine_initial_hint() 3712 return lrc_alloc(ce, ve->siblings[0]); in virtual_context_alloc() [all …]
|
/openbmc/linux/drivers/gpio/ |
H A D | gpio-sim.c | 566 struct list_head siblings; member 600 struct list_head siblings; member 694 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_get_line_names_size() 709 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_set_line_names() 742 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs() 743 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_add_hogs() 760 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs() 761 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_add_hogs() 860 list_for_each_entry(this, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique() 861 list_for_each_entry(pos, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique() [all …]
|
/openbmc/linux/Documentation/admin-guide/hw-vuln/ |
H A D | core-scheduling.rst | 100 siblings of a core such that all the selected tasks running on a core are 107 the sibling has the task enqueued. For rest of the siblings in the core, 112 Once a task has been selected for all the siblings in the core, an IPI is sent to 113 siblings for whom a new task was selected. Siblings on receiving the IPI will 125 siblings could be forced to select a lower priority task if the highest 157 and are considered system-wide trusted. The forced-idling of siblings running 174 the siblings to switch to the new task. But there could be hardware delays in 176 cause an attacker task to start running on a CPU before its siblings receive the 177 IPI. Even though cache is flushed on entry to user mode, victim tasks on siblings 185 Core scheduling cannot protect against MDS attacks between the siblings [all …]
|
/openbmc/linux/arch/x86/kernel/cpu/microcode/ |
H A D | core.c | 541 unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; in load_late_stop_cpus() local 574 case UCODE_OK: siblings++; break; in load_late_stop_cpus() 602 pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); in load_late_stop_cpus() 605 num_online_cpus() - (updated + siblings)); in load_late_stop_cpus() 610 return updated + siblings == num_online_cpus() ? 0 : -EIO; in load_late_stop_cpus()
|
/openbmc/u-boot/fs/yaffs2/ |
H A D | yaffs_allocator.c | 273 list_add(&new_objs[i].siblings, &allocator->free_objs); in yaffs_create_free_objs() 304 obj = list_entry(lh, struct yaffs_obj, siblings); in yaffs_alloc_raw_obj() 323 list_add(&obj->siblings, &allocator->free_objs); in yaffs_free_raw_obj()
|
H A D | yaffs_verify.c | 449 list_obj = list_entry(lh, struct yaffs_obj, siblings); in yaffs_verify_obj_in_dir() 486 list_obj = list_entry(lh, struct yaffs_obj, siblings); in yaffs_verify_dir()
|
H A D | yaffs_guts.c | 1261 list_del_init(&obj->siblings); in yaffs_remove_obj_from_dir() 1283 if (obj->siblings.prev == NULL) { in yaffs_add_obj_to_dir() 1293 list_add(&obj->siblings, &directory->variant.dir_variant.children); in yaffs_add_obj_to_dir() 1623 if (!list_empty(&obj->siblings)) in yaffs_free_obj() 1814 INIT_LIST_HEAD(&obj->siblings); in yaffs_alloc_empty_obj() 1819 list_add(&(obj->siblings), in yaffs_alloc_empty_obj() 4185 l = list_entry(i, struct yaffs_obj, siblings); in yaffs_strip_deleted_objs() 4190 l = list_entry(i, struct yaffs_obj, siblings); in yaffs_strip_deleted_objs() 4293 obj = list_entry(lh, struct yaffs_obj, siblings); in yaffs_del_dir_contents() 4337 l = list_entry(i, struct yaffs_obj, siblings); in yaffs_find_by_name()
|
/openbmc/linux/drivers/nvme/host/ |
H A D | multipath.c | 229 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_revalidate_paths() 262 list_for_each_entry_srcu(ns, &head->list, siblings, in __nvme_find_path() 301 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns() 302 siblings); in nvme_next_ns() 305 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns() 362 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_queue_depth_path() 431 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_available_path()
|
/openbmc/linux/include/scsi/ |
H A D | scsi_device.h | 108 struct list_head siblings; /* list of all devices on this host */ member 336 struct list_head siblings; member 444 list_for_each_entry((sdev), &((shost)->__devices), siblings)
|
/openbmc/linux/drivers/scsi/ |
H A D | scsi_scan.c | 302 INIT_LIST_HEAD(&sdev->siblings); in scsi_alloc_sdev() 401 list_del_init(&starget->siblings); in scsi_target_destroy() 434 list_for_each_entry(starget, &shost->__targets, siblings) { in __scsi_find_target() 519 INIT_LIST_HEAD(&starget->siblings); in scsi_alloc_target() 531 list_add_tail(&starget->siblings, &shost->__targets); in scsi_alloc_target() 2034 list_for_each_entry(sdev, &shost->__devices, siblings) { in scsi_forget_host()
|
H A D | scsi.c | 799 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); in __scsi_iterate_devices() 805 next = list_entry(list->next, struct scsi_device, siblings); in __scsi_iterate_devices() 950 list_for_each_entry(sdev, &shost->__devices, siblings) { in __scsi_device_lookup()
|
/openbmc/linux/kernel/ |
H A D | Kconfig.preempt | 123 selection across SMT siblings. When enabled -- see 124 prctl(PR_SCHED_CORE) -- task selection ensures that all SMT siblings
|
/openbmc/linux/Documentation/admin-guide/pm/ |
H A D | intel_epb.rst | 40 example, SMT siblings or cores in one package). For this reason, updating the
|
/openbmc/linux/drivers/scsi/libsas/ |
H A D | sas_expander.c | 834 list_add_tail(&child->siblings, &parent_ex->children); in sas_ex_discover_end_dev() 944 list_add_tail(&child->siblings, &parent->ex_dev.children); in sas_ex_discover_expander() 1072 list_for_each_entry(child, &ex->children, siblings) { in sas_check_level_subtractive_boundary() 1800 list_for_each_entry(ch, &ex->children, siblings) { in sas_find_bcast_dev() 1816 list_for_each_entry_safe(child, n, &ex->children, siblings) { in sas_unregister_ex_tree() 1834 &ex_dev->children, siblings) { in sas_unregister_devs_sas_addr() 1865 list_for_each_entry(child, &ex_root->children, siblings) { in sas_discover_bfs_by_root_level() 1916 list_for_each_entry(child, &dev->ex_dev.children, siblings) { in sas_discover_new()
|
H A D | sas_internal.h | 214 INIT_LIST_HEAD(&dev->siblings); in sas_alloc_device()
|
/openbmc/linux/Documentation/networking/devlink/ |
H A D | devlink-port.rst | 369 Allows for usage of strict priority arbiter among siblings. This 377 siblings. This arbitration scheme can be used simultaneously with the 379 BW relative to its siblings. Values are relative like a percentage 381 its siblings.
|
/openbmc/linux/net/sched/ |
H A D | sch_hfsc.c | 123 struct list_head siblings; /* sibling classes */ member 854 list_for_each_entry(p, &cl->children, siblings) { in hfsc_adjust_levels() 1079 list_add_tail(&cl->siblings, &parent->children); in hfsc_change_class() 1118 list_del(&cl->siblings); in hfsc_delete_class()
|