deadline.c (27988c96687667e74df1a9a3b8662519bc1c29c9) deadline.c (f9a25f776d780bfa3279f0b6e5f5cf3224997976)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.

--- 2269 unchanged lines hidden (view full) ---

2278{
2279 unsigned int i;
2280
2281 for_each_possible_cpu(i)
2282 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2283 GFP_KERNEL, cpu_to_node(i));
2284}
2285
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.

--- 2269 unchanged lines hidden (view full) ---

2278{
2279 unsigned int i;
2280
2281 for_each_possible_cpu(i)
2282 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2283 GFP_KERNEL, cpu_to_node(i));
2284}
2285
2286void dl_add_task_root_domain(struct task_struct *p)
2287{
2288 struct rq_flags rf;
2289 struct rq *rq;
2290 struct dl_bw *dl_b;
2291
2292 rq = task_rq_lock(p, &rf);
2293 if (!dl_task(p))
2294 goto unlock;
2295
2296 dl_b = &rq->rd->dl_bw;
2297 raw_spin_lock(&dl_b->lock);
2298
2299 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2300
2301 raw_spin_unlock(&dl_b->lock);
2302
2303unlock:
2304 task_rq_unlock(rq, p, &rf);
2305}
2306
2307void dl_clear_root_domain(struct root_domain *rd)
2308{
2309 unsigned long flags;
2310
2311 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2312 rd->dl_bw.total_bw = 0;
2313 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2314}
2315
2286#endif /* CONFIG_SMP */
2287
2288static void switched_from_dl(struct rq *rq, struct task_struct *p)
2289{
2290 /*
2291 * task_non_contending() can start the "inactive timer" (if the 0-lag
2292 * time is in the future). If the task switches back to dl before
2293 * the "inactive timer" fires, it can continue to consume its current

--- 462 unchanged lines hidden ---
2316#endif /* CONFIG_SMP */
2317
2318static void switched_from_dl(struct rq *rq, struct task_struct *p)
2319{
2320 /*
2321 * task_non_contending() can start the "inactive timer" (if the 0-lag
2322 * time is in the future). If the task switches back to dl before
2323 * the "inactive timer" fires, it can continue to consume its current

--- 462 unchanged lines hidden ---