/openbmc/linux/net/netfilter/ipvs/ |
H A D | ip_vs_sched.c | 41 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() argument 45 if (scheduler->init_service) { in ip_vs_bind_scheduler() 46 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler() 52 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler() 135 if (scheduler) in ip_vs_scheduler_put() 136 module_put(scheduler->module); in ip_vs_scheduler_put() 171 if (!scheduler) { in register_ip_vs_scheduler() 176 if (!scheduler->name) { in register_ip_vs_scheduler() 191 __func__, scheduler->name); in register_ip_vs_scheduler() 225 if (!scheduler) { in unregister_ip_vs_scheduler() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gvt/ |
H A D | sched_policy.c | 134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local 143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu() 144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 166 scheduler->current_vgpu = scheduler->next_vgpu; in try_to_schedule_next_vgpu() 167 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in tbs_sched_func() local 224 scheduler->next_vgpu = vgpu; in tbs_sched_func() 236 if (scheduler->next_vgpu) in tbs_sched_func() 280 &gvt->scheduler; in tbs_sched_init() 302 &gvt->scheduler; in tbs_sched_clean() [all …]
|
H A D | scheduler.c | 292 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local 850 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in pick_next_workload() local 859 if (!scheduler->current_vgpu) { in pick_next_workload() 864 if (scheduler->need_reschedule) { in pick_next_workload() 1068 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in complete_current_workload() local 1143 if (gvt->scheduler.need_reschedule) in complete_current_workload() 1155 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in workload_thread() local 1232 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in intel_gvt_wait_vgpu_idle() local 1244 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in intel_gvt_clean_workload_scheduler() local 1254 kthread_stop(scheduler->thread[i]); in intel_gvt_clean_workload_scheduler() [all …]
|
/openbmc/linux/Documentation/block/ |
H A D | switching-sched.rst | 5 Each io queue has a set of io scheduler tunables associated with it. These 6 tunables control how the io scheduler works. You can find these entries 16 It is possible to change the IO scheduler for a given block device on 20 To set a specific scheduler, simply do this:: 22 echo SCHEDNAME > /sys/block/DEV/queue/scheduler 24 where SCHEDNAME is the name of a defined IO scheduler, and DEV is the 28 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names 29 will be displayed, with the currently selected scheduler in brackets:: 31 # cat /sys/block/sda/queue/scheduler 33 # echo none >/sys/block/sda/queue/scheduler [all …]
|
H A D | deadline-iosched.rst | 2 Deadline IO scheduler tunables 5 This little file attempts to document how the deadline io scheduler works. 12 selecting an io scheduler on a per-device basis. 19 The goal of the deadline io scheduler is to attempt to guarantee a start 21 tunable. When a read request first enters the io scheduler, it is assigned 49 When we have to move requests from the io scheduler queue to the block 60 Sometimes it happens that a request enters the io scheduler that is contiguous 69 rbtree front sector lookup when the io scheduler merge function is called.
|
H A D | kyber-iosched.rst | 2 Kyber I/O scheduler tunables 5 The only two tunables for the Kyber scheduler are the target latencies for
|
/openbmc/linux/block/ |
H A D | Kconfig.iosched | 5 tristate "MQ deadline I/O scheduler" 8 MQ version of the deadline IO scheduler. 11 tristate "Kyber I/O scheduler" 14 The Kyber I/O scheduler is a low-overhead scheduler suitable for 20 tristate "BFQ I/O scheduler" 23 BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
|
/openbmc/openbmc/poky/meta/recipes-extended/cups/cups/ |
H A D | 0001-use-echo-only-in-init.patch | 9 scheduler/cups.sh.in | 2 +- 12 diff --git a/scheduler/cups.sh.in b/scheduler/cups.sh.in 14 --- a/scheduler/cups.sh.in 15 +++ b/scheduler/cups.sh.in
|
/openbmc/linux/Documentation/gpu/rfc/ |
H A D | i915_scheduler.rst | 8 i915 with the DRM scheduler is: 32 * Convert the i915 to use the DRM scheduler 33 * GuC submission backend fully integrated with DRM scheduler 35 handled in DRM scheduler) 36 * Resets / cancels hook in DRM scheduler 37 * Watchdog hooks into DRM scheduler 39 integrated with DRM scheduler (e.g. state machine gets 47 * ROI low on fully integrating into DRM scheduler 49 scheduler 53 * Will be an optional feature in the DRM scheduler [all …]
|
/openbmc/sdbusplus/include/sdbusplus/async/stdexec/__detail/ |
H A D | __schedulers.hpp | 83 concept scheduler = // 89 template <scheduler _Scheduler> 95 { get_scheduler(__sp) } -> scheduler; 106 static_assert(scheduler<tag_invoke_result_t<get_scheduler_t, const _Env&>>); in operator ()() 118 scheduler<tag_invoke_result_t<get_delegatee_scheduler_t, const _Env&>>); in operator ()() 132 scheduler<tag_invoke_result_t<get_completion_scheduler_t<_Tag>, in operator ()()
|
H A D | __on.hpp | 108 template <scheduler _Scheduler, sender _Sender> 118 template <sender _Sender, scheduler _Scheduler, 131 template <scheduler _Scheduler, __sender_adaptor_closure _Closure> 148 if constexpr (scheduler<_Data>) in __transform_env_fn() 166 if constexpr (scheduler<_Data>) in __transform_sender_fn()
|
H A D | __continue_on.hpp | 54 template <sender _Sender, scheduler _Scheduler> 66 template <scheduler _Scheduler>
|
/openbmc/linux/Documentation/scheduler/ |
H A D | sched-design-CFS.rst | 10 scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the 11 replacement for the previous vanilla scheduler's SCHED_OTHER interactivity 59 previous vanilla scheduler and RSDL/SD are affected). 79 schedules (or a scheduler tick happens) the task's CPU usage is "accounted 94 way the previous scheduler had, and has no heuristics whatsoever. There is 133 idle timer scheduler in order to avoid to get into priority 148 Classes," an extensible hierarchy of scheduler modules. These modules 149 encapsulate scheduling policy details and are handled by the scheduler core 152 sched/fair.c implements the CFS scheduler described above. 155 the previous vanilla scheduler did. It uses 100 runqueues (for all 100 RT [all …]
|
H A D | sched-nice-design.rst | 6 nice-levels implementation in the new Linux scheduler. 12 scheduler, (otherwise we'd have done it long ago) because nice level 16 In the O(1) scheduler (in 2003) we changed negative nice levels to be 77 With the old scheduler, if you for example started a niced task with +1 88 The new scheduler in v2.6.23 addresses all three types of complaints: 91 enough), the scheduler was decoupled from 'time slice' and HZ concepts 94 support: with the new scheduler nice +19 tasks get a HZ-independent 96 scheduler. 99 the new scheduler makes nice(1) have the same CPU utilization effect on 101 scheduler, running a nice +10 and a nice 11 task has the same CPU [all …]
|
H A D | sched-energy.rst | 8 Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict 23 The actual EM used by EAS is _not_ maintained by the scheduler, but by a 50 scheduler. This alternative considers two objectives: energy-efficiency and 53 The idea behind introducing an EM is to allow the scheduler to evaluate the 56 time, the EM must be as simple as possible to minimize the scheduler latency 60 for the scheduler to decide where a task should run (during wake-up), the EM 71 EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to 87 The scheduler manages references to the EM objects in the topology code when the 115 Please note that the scheduler will create two duplicate list nodes for 121 manipulated by the scheduler. [all …]
|
/openbmc/u-boot/doc/ |
H A D | README.sched | 1 Notes on the scheduler in sched.c: 4 'sched.c' provides an very simplistic multi-threading scheduler. 23 - The scheduler is NOT transparent to the user. The user 25 scheduler. 30 - There are NO capabilities to collect thread CPU usage, scheduler
|
/openbmc/linux/net/mptcp/ |
H A D | ctrl.c | 35 char scheduler[MPTCP_SCHED_NAME_MAX]; member 75 return mptcp_get_pernet(net)->scheduler; in mptcp_get_scheduler() 86 strcpy(pernet->scheduler, "default"); in mptcp_pernet_set_defaults() 99 strscpy(pernet->scheduler, name, MPTCP_SCHED_NAME_MAX); in mptcp_set_scheduler() 202 table[6].data = &pernet->scheduler; in mptcp_pernet_new_table()
|
/openbmc/linux/sound/pci/mixart/ |
H A D | mixart_core.h | 218 u64 scheduler; member 231 u64 scheduler; member 240 u64 scheduler; member 388 u64 scheduler; member 438 u64 scheduler; member 498 u64 scheduler; member 543 u64 scheduler; member
|
/openbmc/linux/tools/testing/kunit/test_data/ |
H A D | test_is_test_passed-no_tests_run_no_header.log | 33 io scheduler noop registered 34 io scheduler deadline registered 35 io scheduler cfq registered (default) 36 io scheduler mq-deadline registered 37 io scheduler kyber registered
|
/openbmc/linux/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/ |
H A D | tracepoints.rst | 110 - mlx5_esw_vport_qos_create: trace creation of transmit scheduler arbiter for vport:: 117 - mlx5_esw_vport_qos_config: trace configuration of transmit scheduler arbiter for vport:: 124 - mlx5_esw_vport_qos_destroy: trace deletion of transmit scheduler arbiter for vport:: 131 - mlx5_esw_group_qos_create: trace creation of transmit scheduler arbiter for rate group:: 138 - mlx5_esw_group_qos_config: trace configuration of transmit scheduler arbiter for rate group:: 145 - mlx5_esw_group_qos_destroy: trace deletion of transmit scheduler arbiter for group::
|
/openbmc/linux/Documentation/networking/ |
H A D | mptcp-sysctl.rst | 69 The packet scheduler ignores stale subflows. 78 scheduler - STRING 79 Select the scheduler of your choice.
|
/openbmc/linux/Documentation/virt/kvm/ |
H A D | halt-polling.rst | 12 before giving up the cpu to the scheduler in order to let something else run. 15 very quickly by at least saving us a trip through the scheduler, normally on 18 interval or some other task on the runqueue is runnable the scheduler is 21 savings of not invoking the scheduler are distinguishable. 34 The maximum time for which to poll before invoking the scheduler, referred to 77 whether the scheduler is invoked within that function).
|
/openbmc/linux/Documentation/translations/zh_CN/scheduler/ |
H A D | schedutil.rst | 4 :Original: Documentation/scheduler/schedutil.rst 89 …- Documentation/translations/zh_CN/scheduler/sched-capacity.rst:"1. CPU Capacity + 2. Task utiliza…
|
/openbmc/linux/drivers/gpu/drm/i915/ |
H A D | i915_getparam.c | 69 value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); in i915_getparam_ioctl() 124 value = i915->caps.scheduler; in i915_getparam_ioctl()
|
/openbmc/linux/Documentation/devicetree/bindings/usb/ |
H A D | da8xx-usb.txt | 35 - reg-names: "controller", "scheduler", "queuemgr" 75 reg-names = "controller", "scheduler", "queuemgr";
|