xref: /openbmc/linux/kernel/sched/fair.c (revision 6dfcd296)
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21  */
22 
23 #include <linux/sched.h>
24 #include <linux/latencytop.h>
25 #include <linux/cpumask.h>
26 #include <linux/cpuidle.h>
27 #include <linux/slab.h>
28 #include <linux/profile.h>
29 #include <linux/interrupt.h>
30 #include <linux/mempolicy.h>
31 #include <linux/migrate.h>
32 #include <linux/task_work.h>
33 
34 #include <trace/events/sched.h>
35 
36 #include "sched.h"
37 
38 /*
39  * Targeted preemption latency for CPU-bound tasks:
40  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41  *
42  * NOTE: this latency value is not the same as the concept of
43  * 'timeslice length' - timeslices in CFS are of variable length
44  * and have no persistent notion like in traditional, time-slice
45  * based scheduling concepts.
46  *
47  * (to see the precise effective timeslice length of your workload,
48  *  run vmstat and monitor the context-switches (cs) field)
49  */
50 unsigned int sysctl_sched_latency = 6000000ULL;
51 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
52 
53 /*
54  * The initial- and re-scaling of tunables is configurable
55  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56  *
57  * Options are:
58  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61  */
62 enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 	= SCHED_TUNABLESCALING_LOG;
64 
65 /*
66  * Minimal preemption granularity for CPU-bound tasks:
67  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
68  */
69 unsigned int sysctl_sched_min_granularity = 750000ULL;
70 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
71 
72 /*
73  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74  */
75 static unsigned int sched_nr_latency = 8;
76 
77 /*
78  * After fork, child runs first. If set to 0 (default) then
79  * parent will (try to) run first.
80  */
81 unsigned int sysctl_sched_child_runs_first __read_mostly;
82 
83 /*
84  * SCHED_OTHER wake-up granularity.
85  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
86  *
87  * This option delays the preemption effects of decoupled workloads
88  * and reduces their over-scheduling. Synchronous workloads will still
89  * have immediate wakeup/sleep latencies.
90  */
91 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
92 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93 
94 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95 
96 /*
97  * The exponential sliding  window over which load is averaged for shares
98  * distribution.
99  * (default: 10msec)
100  */
101 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102 
103 #ifdef CONFIG_CFS_BANDWIDTH
104 /*
105  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106  * each time a cfs_rq requests quota.
107  *
108  * Note: in the case that the slice exceeds the runtime remaining (either due
109  * to consumption or the quota being specified to be smaller than the slice)
110  * we will always only issue the remaining available time.
111  *
112  * default: 5 msec, units: microseconds
113   */
114 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115 #endif
116 
117 /*
118  * The margin used when comparing utilization with CPU capacity:
119  * util * 1024 < capacity * margin
120  */
121 unsigned int capacity_margin = 1280; /* ~20% */
122 
123 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
124 {
125 	lw->weight += inc;
126 	lw->inv_weight = 0;
127 }
128 
129 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
130 {
131 	lw->weight -= dec;
132 	lw->inv_weight = 0;
133 }
134 
135 static inline void update_load_set(struct load_weight *lw, unsigned long w)
136 {
137 	lw->weight = w;
138 	lw->inv_weight = 0;
139 }
140 
141 /*
142  * Increase the granularity value when there are more CPUs,
143  * because with more CPUs the 'effective latency' as visible
144  * to users decreases. But the relationship is not linear,
145  * so pick a second-best guess by going with the log2 of the
146  * number of CPUs.
147  *
148  * This idea comes from the SD scheduler of Con Kolivas:
149  */
150 static unsigned int get_update_sysctl_factor(void)
151 {
152 	unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
153 	unsigned int factor;
154 
155 	switch (sysctl_sched_tunable_scaling) {
156 	case SCHED_TUNABLESCALING_NONE:
157 		factor = 1;
158 		break;
159 	case SCHED_TUNABLESCALING_LINEAR:
160 		factor = cpus;
161 		break;
162 	case SCHED_TUNABLESCALING_LOG:
163 	default:
164 		factor = 1 + ilog2(cpus);
165 		break;
166 	}
167 
168 	return factor;
169 }
170 
171 static void update_sysctl(void)
172 {
173 	unsigned int factor = get_update_sysctl_factor();
174 
175 #define SET_SYSCTL(name) \
176 	(sysctl_##name = (factor) * normalized_sysctl_##name)
177 	SET_SYSCTL(sched_min_granularity);
178 	SET_SYSCTL(sched_latency);
179 	SET_SYSCTL(sched_wakeup_granularity);
180 #undef SET_SYSCTL
181 }
182 
183 void sched_init_granularity(void)
184 {
185 	update_sysctl();
186 }
187 
188 #define WMULT_CONST	(~0U)
189 #define WMULT_SHIFT	32
190 
191 static void __update_inv_weight(struct load_weight *lw)
192 {
193 	unsigned long w;
194 
195 	if (likely(lw->inv_weight))
196 		return;
197 
198 	w = scale_load_down(lw->weight);
199 
200 	if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
201 		lw->inv_weight = 1;
202 	else if (unlikely(!w))
203 		lw->inv_weight = WMULT_CONST;
204 	else
205 		lw->inv_weight = WMULT_CONST / w;
206 }
207 
208 /*
209  * delta_exec * weight / lw.weight
210  *   OR
211  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
212  *
213  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
214  * we're guaranteed shift stays positive because inv_weight is guaranteed to
215  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
216  *
217  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
218  * weight/lw.weight <= 1, and therefore our shift will also be positive.
219  */
220 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
221 {
222 	u64 fact = scale_load_down(weight);
223 	int shift = WMULT_SHIFT;
224 
225 	__update_inv_weight(lw);
226 
227 	if (unlikely(fact >> 32)) {
228 		while (fact >> 32) {
229 			fact >>= 1;
230 			shift--;
231 		}
232 	}
233 
234 	/* hint to use a 32x32->64 mul */
235 	fact = (u64)(u32)fact * lw->inv_weight;
236 
237 	while (fact >> 32) {
238 		fact >>= 1;
239 		shift--;
240 	}
241 
242 	return mul_u64_u32_shr(delta_exec, fact, shift);
243 }
244 
245 
246 const struct sched_class fair_sched_class;
247 
248 /**************************************************************
249  * CFS operations on generic schedulable entities:
250  */
251 
252 #ifdef CONFIG_FAIR_GROUP_SCHED
253 
254 /* cpu runqueue to which this cfs_rq is attached */
255 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
256 {
257 	return cfs_rq->rq;
258 }
259 
260 /* An entity is a task if it doesn't "own" a runqueue */
261 #define entity_is_task(se)	(!se->my_q)
262 
263 static inline struct task_struct *task_of(struct sched_entity *se)
264 {
265 	SCHED_WARN_ON(!entity_is_task(se));
266 	return container_of(se, struct task_struct, se);
267 }
268 
269 /* Walk up scheduling entities hierarchy */
270 #define for_each_sched_entity(se) \
271 		for (; se; se = se->parent)
272 
273 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
274 {
275 	return p->se.cfs_rq;
276 }
277 
278 /* runqueue on which this entity is (to be) queued */
279 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
280 {
281 	return se->cfs_rq;
282 }
283 
284 /* runqueue "owned" by this group */
285 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
286 {
287 	return grp->my_q;
288 }
289 
290 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
291 {
292 	if (!cfs_rq->on_list) {
293 		/*
294 		 * Ensure we either appear before our parent (if already
295 		 * enqueued) or force our parent to appear after us when it is
296 		 * enqueued.  The fact that we always enqueue bottom-up
297 		 * reduces this to two cases.
298 		 */
299 		if (cfs_rq->tg->parent &&
300 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
301 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
302 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
303 		} else {
304 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
305 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
306 		}
307 
308 		cfs_rq->on_list = 1;
309 	}
310 }
311 
312 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313 {
314 	if (cfs_rq->on_list) {
315 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316 		cfs_rq->on_list = 0;
317 	}
318 }
319 
320 /* Iterate thr' all leaf cfs_rq's on a runqueue */
321 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
322 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323 
324 /* Do the two (enqueued) entities belong to the same group ? */
325 static inline struct cfs_rq *
326 is_same_group(struct sched_entity *se, struct sched_entity *pse)
327 {
328 	if (se->cfs_rq == pse->cfs_rq)
329 		return se->cfs_rq;
330 
331 	return NULL;
332 }
333 
334 static inline struct sched_entity *parent_entity(struct sched_entity *se)
335 {
336 	return se->parent;
337 }
338 
339 static void
340 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
341 {
342 	int se_depth, pse_depth;
343 
344 	/*
345 	 * preemption test can be made between sibling entities who are in the
346 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
347 	 * both tasks until we find their ancestors who are siblings of common
348 	 * parent.
349 	 */
350 
351 	/* First walk up until both entities are at same depth */
352 	se_depth = (*se)->depth;
353 	pse_depth = (*pse)->depth;
354 
355 	while (se_depth > pse_depth) {
356 		se_depth--;
357 		*se = parent_entity(*se);
358 	}
359 
360 	while (pse_depth > se_depth) {
361 		pse_depth--;
362 		*pse = parent_entity(*pse);
363 	}
364 
365 	while (!is_same_group(*se, *pse)) {
366 		*se = parent_entity(*se);
367 		*pse = parent_entity(*pse);
368 	}
369 }
370 
371 #else	/* !CONFIG_FAIR_GROUP_SCHED */
372 
373 static inline struct task_struct *task_of(struct sched_entity *se)
374 {
375 	return container_of(se, struct task_struct, se);
376 }
377 
378 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
379 {
380 	return container_of(cfs_rq, struct rq, cfs);
381 }
382 
383 #define entity_is_task(se)	1
384 
385 #define for_each_sched_entity(se) \
386 		for (; se; se = NULL)
387 
388 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
389 {
390 	return &task_rq(p)->cfs;
391 }
392 
393 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
394 {
395 	struct task_struct *p = task_of(se);
396 	struct rq *rq = task_rq(p);
397 
398 	return &rq->cfs;
399 }
400 
401 /* runqueue "owned" by this group */
402 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
403 {
404 	return NULL;
405 }
406 
407 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408 {
409 }
410 
411 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
412 {
413 }
414 
415 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
416 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
417 
418 static inline struct sched_entity *parent_entity(struct sched_entity *se)
419 {
420 	return NULL;
421 }
422 
423 static inline void
424 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
425 {
426 }
427 
428 #endif	/* CONFIG_FAIR_GROUP_SCHED */
429 
430 static __always_inline
431 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
432 
433 /**************************************************************
434  * Scheduling class tree data structure manipulation methods:
435  */
436 
437 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
438 {
439 	s64 delta = (s64)(vruntime - max_vruntime);
440 	if (delta > 0)
441 		max_vruntime = vruntime;
442 
443 	return max_vruntime;
444 }
445 
446 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
447 {
448 	s64 delta = (s64)(vruntime - min_vruntime);
449 	if (delta < 0)
450 		min_vruntime = vruntime;
451 
452 	return min_vruntime;
453 }
454 
455 static inline int entity_before(struct sched_entity *a,
456 				struct sched_entity *b)
457 {
458 	return (s64)(a->vruntime - b->vruntime) < 0;
459 }
460 
461 static void update_min_vruntime(struct cfs_rq *cfs_rq)
462 {
463 	struct sched_entity *curr = cfs_rq->curr;
464 
465 	u64 vruntime = cfs_rq->min_vruntime;
466 
467 	if (curr) {
468 		if (curr->on_rq)
469 			vruntime = curr->vruntime;
470 		else
471 			curr = NULL;
472 	}
473 
474 	if (cfs_rq->rb_leftmost) {
475 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
476 						   struct sched_entity,
477 						   run_node);
478 
479 		if (!curr)
480 			vruntime = se->vruntime;
481 		else
482 			vruntime = min_vruntime(vruntime, se->vruntime);
483 	}
484 
485 	/* ensure we never gain time by being placed backwards. */
486 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
487 #ifndef CONFIG_64BIT
488 	smp_wmb();
489 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
490 #endif
491 }
492 
493 /*
494  * Enqueue an entity into the rb-tree:
495  */
496 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
497 {
498 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
499 	struct rb_node *parent = NULL;
500 	struct sched_entity *entry;
501 	int leftmost = 1;
502 
503 	/*
504 	 * Find the right place in the rbtree:
505 	 */
506 	while (*link) {
507 		parent = *link;
508 		entry = rb_entry(parent, struct sched_entity, run_node);
509 		/*
510 		 * We dont care about collisions. Nodes with
511 		 * the same key stay together.
512 		 */
513 		if (entity_before(se, entry)) {
514 			link = &parent->rb_left;
515 		} else {
516 			link = &parent->rb_right;
517 			leftmost = 0;
518 		}
519 	}
520 
521 	/*
522 	 * Maintain a cache of leftmost tree entries (it is frequently
523 	 * used):
524 	 */
525 	if (leftmost)
526 		cfs_rq->rb_leftmost = &se->run_node;
527 
528 	rb_link_node(&se->run_node, parent, link);
529 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
530 }
531 
532 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
533 {
534 	if (cfs_rq->rb_leftmost == &se->run_node) {
535 		struct rb_node *next_node;
536 
537 		next_node = rb_next(&se->run_node);
538 		cfs_rq->rb_leftmost = next_node;
539 	}
540 
541 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
542 }
543 
544 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
545 {
546 	struct rb_node *left = cfs_rq->rb_leftmost;
547 
548 	if (!left)
549 		return NULL;
550 
551 	return rb_entry(left, struct sched_entity, run_node);
552 }
553 
554 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
555 {
556 	struct rb_node *next = rb_next(&se->run_node);
557 
558 	if (!next)
559 		return NULL;
560 
561 	return rb_entry(next, struct sched_entity, run_node);
562 }
563 
564 #ifdef CONFIG_SCHED_DEBUG
565 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
566 {
567 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
568 
569 	if (!last)
570 		return NULL;
571 
572 	return rb_entry(last, struct sched_entity, run_node);
573 }
574 
575 /**************************************************************
576  * Scheduling class statistics methods:
577  */
578 
579 int sched_proc_update_handler(struct ctl_table *table, int write,
580 		void __user *buffer, size_t *lenp,
581 		loff_t *ppos)
582 {
583 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
584 	unsigned int factor = get_update_sysctl_factor();
585 
586 	if (ret || !write)
587 		return ret;
588 
589 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
590 					sysctl_sched_min_granularity);
591 
592 #define WRT_SYSCTL(name) \
593 	(normalized_sysctl_##name = sysctl_##name / (factor))
594 	WRT_SYSCTL(sched_min_granularity);
595 	WRT_SYSCTL(sched_latency);
596 	WRT_SYSCTL(sched_wakeup_granularity);
597 #undef WRT_SYSCTL
598 
599 	return 0;
600 }
601 #endif
602 
603 /*
604  * delta /= w
605  */
606 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
607 {
608 	if (unlikely(se->load.weight != NICE_0_LOAD))
609 		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
610 
611 	return delta;
612 }
613 
614 /*
615  * The idea is to set a period in which each task runs once.
616  *
617  * When there are too many tasks (sched_nr_latency) we have to stretch
618  * this period because otherwise the slices get too small.
619  *
620  * p = (nr <= nl) ? l : l*nr/nl
621  */
622 static u64 __sched_period(unsigned long nr_running)
623 {
624 	if (unlikely(nr_running > sched_nr_latency))
625 		return nr_running * sysctl_sched_min_granularity;
626 	else
627 		return sysctl_sched_latency;
628 }
629 
630 /*
631  * We calculate the wall-time slice from the period by taking a part
632  * proportional to the weight.
633  *
634  * s = p*P[w/rw]
635  */
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
637 {
638 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
639 
640 	for_each_sched_entity(se) {
641 		struct load_weight *load;
642 		struct load_weight lw;
643 
644 		cfs_rq = cfs_rq_of(se);
645 		load = &cfs_rq->load;
646 
647 		if (unlikely(!se->on_rq)) {
648 			lw = cfs_rq->load;
649 
650 			update_load_add(&lw, se->load.weight);
651 			load = &lw;
652 		}
653 		slice = __calc_delta(slice, se->load.weight, load);
654 	}
655 	return slice;
656 }
657 
658 /*
659  * We calculate the vruntime slice of a to-be-inserted task.
660  *
661  * vs = s/w
662  */
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
664 {
665 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
666 }
667 
668 #ifdef CONFIG_SMP
669 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
670 static unsigned long task_h_load(struct task_struct *p);
671 
672 /*
673  * We choose a half-life close to 1 scheduling period.
674  * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
675  * dependent on this value.
676  */
677 #define LOAD_AVG_PERIOD 32
678 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
679 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
680 
681 /* Give new sched_entity start runnable values to heavy its load in infant time */
682 void init_entity_runnable_average(struct sched_entity *se)
683 {
684 	struct sched_avg *sa = &se->avg;
685 
686 	sa->last_update_time = 0;
687 	/*
688 	 * sched_avg's period_contrib should be strictly less then 1024, so
689 	 * we give it 1023 to make sure it is almost a period (1024us), and
690 	 * will definitely be update (after enqueue).
691 	 */
692 	sa->period_contrib = 1023;
693 	sa->load_avg = scale_load_down(se->load.weight);
694 	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
695 	/*
696 	 * At this point, util_avg won't be used in select_task_rq_fair anyway
697 	 */
698 	sa->util_avg = 0;
699 	sa->util_sum = 0;
700 	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
701 }
702 
703 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
704 static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
705 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
706 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
707 
708 /*
709  * With new tasks being created, their initial util_avgs are extrapolated
710  * based on the cfs_rq's current util_avg:
711  *
712  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
713  *
714  * However, in many cases, the above util_avg does not give a desired
715  * value. Moreover, the sum of the util_avgs may be divergent, such
716  * as when the series is a harmonic series.
717  *
718  * To solve this problem, we also cap the util_avg of successive tasks to
719  * only 1/2 of the left utilization budget:
720  *
721  *   util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
722  *
723  * where n denotes the nth task.
724  *
725  * For example, a simplest series from the beginning would be like:
726  *
727  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
728  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
729  *
730  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
731  * if util_avg > util_avg_cap.
732  */
733 void post_init_entity_util_avg(struct sched_entity *se)
734 {
735 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
736 	struct sched_avg *sa = &se->avg;
737 	long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
738 	u64 now = cfs_rq_clock_task(cfs_rq);
739 
740 	if (cap > 0) {
741 		if (cfs_rq->avg.util_avg != 0) {
742 			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
743 			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
744 
745 			if (sa->util_avg > cap)
746 				sa->util_avg = cap;
747 		} else {
748 			sa->util_avg = cap;
749 		}
750 		sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
751 	}
752 
753 	if (entity_is_task(se)) {
754 		struct task_struct *p = task_of(se);
755 		if (p->sched_class != &fair_sched_class) {
756 			/*
757 			 * For !fair tasks do:
758 			 *
759 			update_cfs_rq_load_avg(now, cfs_rq, false);
760 			attach_entity_load_avg(cfs_rq, se);
761 			switched_from_fair(rq, p);
762 			 *
763 			 * such that the next switched_to_fair() has the
764 			 * expected state.
765 			 */
766 			se->avg.last_update_time = now;
767 			return;
768 		}
769 	}
770 
771 	update_cfs_rq_load_avg(now, cfs_rq, false);
772 	attach_entity_load_avg(cfs_rq, se);
773 	update_tg_load_avg(cfs_rq, false);
774 }
775 
776 #else /* !CONFIG_SMP */
777 void init_entity_runnable_average(struct sched_entity *se)
778 {
779 }
780 void post_init_entity_util_avg(struct sched_entity *se)
781 {
782 }
783 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
784 {
785 }
786 #endif /* CONFIG_SMP */
787 
788 /*
789  * Update the current task's runtime statistics.
790  */
791 static void update_curr(struct cfs_rq *cfs_rq)
792 {
793 	struct sched_entity *curr = cfs_rq->curr;
794 	u64 now = rq_clock_task(rq_of(cfs_rq));
795 	u64 delta_exec;
796 
797 	if (unlikely(!curr))
798 		return;
799 
800 	delta_exec = now - curr->exec_start;
801 	if (unlikely((s64)delta_exec <= 0))
802 		return;
803 
804 	curr->exec_start = now;
805 
806 	schedstat_set(curr->statistics.exec_max,
807 		      max(delta_exec, curr->statistics.exec_max));
808 
809 	curr->sum_exec_runtime += delta_exec;
810 	schedstat_add(cfs_rq->exec_clock, delta_exec);
811 
812 	curr->vruntime += calc_delta_fair(delta_exec, curr);
813 	update_min_vruntime(cfs_rq);
814 
815 	if (entity_is_task(curr)) {
816 		struct task_struct *curtask = task_of(curr);
817 
818 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
819 		cpuacct_charge(curtask, delta_exec);
820 		account_group_exec_runtime(curtask, delta_exec);
821 	}
822 
823 	account_cfs_rq_runtime(cfs_rq, delta_exec);
824 }
825 
826 static void update_curr_fair(struct rq *rq)
827 {
828 	update_curr(cfs_rq_of(&rq->curr->se));
829 }
830 
831 static inline void
832 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
833 {
834 	u64 wait_start, prev_wait_start;
835 
836 	if (!schedstat_enabled())
837 		return;
838 
839 	wait_start = rq_clock(rq_of(cfs_rq));
840 	prev_wait_start = schedstat_val(se->statistics.wait_start);
841 
842 	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
843 	    likely(wait_start > prev_wait_start))
844 		wait_start -= prev_wait_start;
845 
846 	schedstat_set(se->statistics.wait_start, wait_start);
847 }
848 
849 static inline void
850 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
851 {
852 	struct task_struct *p;
853 	u64 delta;
854 
855 	if (!schedstat_enabled())
856 		return;
857 
858 	delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
859 
860 	if (entity_is_task(se)) {
861 		p = task_of(se);
862 		if (task_on_rq_migrating(p)) {
863 			/*
864 			 * Preserve migrating task's wait time so wait_start
865 			 * time stamp can be adjusted to accumulate wait time
866 			 * prior to migration.
867 			 */
868 			schedstat_set(se->statistics.wait_start, delta);
869 			return;
870 		}
871 		trace_sched_stat_wait(p, delta);
872 	}
873 
874 	schedstat_set(se->statistics.wait_max,
875 		      max(schedstat_val(se->statistics.wait_max), delta));
876 	schedstat_inc(se->statistics.wait_count);
877 	schedstat_add(se->statistics.wait_sum, delta);
878 	schedstat_set(se->statistics.wait_start, 0);
879 }
880 
881 static inline void
882 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
883 {
884 	struct task_struct *tsk = NULL;
885 	u64 sleep_start, block_start;
886 
887 	if (!schedstat_enabled())
888 		return;
889 
890 	sleep_start = schedstat_val(se->statistics.sleep_start);
891 	block_start = schedstat_val(se->statistics.block_start);
892 
893 	if (entity_is_task(se))
894 		tsk = task_of(se);
895 
896 	if (sleep_start) {
897 		u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
898 
899 		if ((s64)delta < 0)
900 			delta = 0;
901 
902 		if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
903 			schedstat_set(se->statistics.sleep_max, delta);
904 
905 		schedstat_set(se->statistics.sleep_start, 0);
906 		schedstat_add(se->statistics.sum_sleep_runtime, delta);
907 
908 		if (tsk) {
909 			account_scheduler_latency(tsk, delta >> 10, 1);
910 			trace_sched_stat_sleep(tsk, delta);
911 		}
912 	}
913 	if (block_start) {
914 		u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
915 
916 		if ((s64)delta < 0)
917 			delta = 0;
918 
919 		if (unlikely(delta > schedstat_val(se->statistics.block_max)))
920 			schedstat_set(se->statistics.block_max, delta);
921 
922 		schedstat_set(se->statistics.block_start, 0);
923 		schedstat_add(se->statistics.sum_sleep_runtime, delta);
924 
925 		if (tsk) {
926 			if (tsk->in_iowait) {
927 				schedstat_add(se->statistics.iowait_sum, delta);
928 				schedstat_inc(se->statistics.iowait_count);
929 				trace_sched_stat_iowait(tsk, delta);
930 			}
931 
932 			trace_sched_stat_blocked(tsk, delta);
933 
934 			/*
935 			 * Blocking time is in units of nanosecs, so shift by
936 			 * 20 to get a milliseconds-range estimation of the
937 			 * amount of time that the task spent sleeping:
938 			 */
939 			if (unlikely(prof_on == SLEEP_PROFILING)) {
940 				profile_hits(SLEEP_PROFILING,
941 						(void *)get_wchan(tsk),
942 						delta >> 20);
943 			}
944 			account_scheduler_latency(tsk, delta >> 10, 0);
945 		}
946 	}
947 }
948 
949 /*
950  * Task is being enqueued - update stats:
951  */
952 static inline void
953 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
954 {
955 	if (!schedstat_enabled())
956 		return;
957 
958 	/*
959 	 * Are we enqueueing a waiting task? (for current tasks
960 	 * a dequeue/enqueue event is a NOP)
961 	 */
962 	if (se != cfs_rq->curr)
963 		update_stats_wait_start(cfs_rq, se);
964 
965 	if (flags & ENQUEUE_WAKEUP)
966 		update_stats_enqueue_sleeper(cfs_rq, se);
967 }
968 
969 static inline void
970 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
971 {
972 
973 	if (!schedstat_enabled())
974 		return;
975 
976 	/*
977 	 * Mark the end of the wait period if dequeueing a
978 	 * waiting task:
979 	 */
980 	if (se != cfs_rq->curr)
981 		update_stats_wait_end(cfs_rq, se);
982 
983 	if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
984 		struct task_struct *tsk = task_of(se);
985 
986 		if (tsk->state & TASK_INTERRUPTIBLE)
987 			schedstat_set(se->statistics.sleep_start,
988 				      rq_clock(rq_of(cfs_rq)));
989 		if (tsk->state & TASK_UNINTERRUPTIBLE)
990 			schedstat_set(se->statistics.block_start,
991 				      rq_clock(rq_of(cfs_rq)));
992 	}
993 }
994 
995 /*
996  * We are picking a new current task - update its stats:
997  */
998 static inline void
999 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1000 {
1001 	/*
1002 	 * We are starting a new run period:
1003 	 */
1004 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
1005 }
1006 
1007 /**************************************************
1008  * Scheduling class queueing methods:
1009  */
1010 
1011 #ifdef CONFIG_NUMA_BALANCING
1012 /*
1013  * Approximate time to scan a full NUMA task in ms. The task scan period is
1014  * calculated based on the tasks virtual memory size and
1015  * numa_balancing_scan_size.
1016  */
1017 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1018 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1019 
1020 /* Portion of address space to scan in MB */
1021 unsigned int sysctl_numa_balancing_scan_size = 256;
1022 
1023 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1024 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1025 
1026 static unsigned int task_nr_scan_windows(struct task_struct *p)
1027 {
1028 	unsigned long rss = 0;
1029 	unsigned long nr_scan_pages;
1030 
1031 	/*
1032 	 * Calculations based on RSS as non-present and empty pages are skipped
1033 	 * by the PTE scanner and NUMA hinting faults should be trapped based
1034 	 * on resident pages
1035 	 */
1036 	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1037 	rss = get_mm_rss(p->mm);
1038 	if (!rss)
1039 		rss = nr_scan_pages;
1040 
1041 	rss = round_up(rss, nr_scan_pages);
1042 	return rss / nr_scan_pages;
1043 }
1044 
1045 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1046 #define MAX_SCAN_WINDOW 2560
1047 
1048 static unsigned int task_scan_min(struct task_struct *p)
1049 {
1050 	unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1051 	unsigned int scan, floor;
1052 	unsigned int windows = 1;
1053 
1054 	if (scan_size < MAX_SCAN_WINDOW)
1055 		windows = MAX_SCAN_WINDOW / scan_size;
1056 	floor = 1000 / windows;
1057 
1058 	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1059 	return max_t(unsigned int, floor, scan);
1060 }
1061 
1062 static unsigned int task_scan_max(struct task_struct *p)
1063 {
1064 	unsigned int smin = task_scan_min(p);
1065 	unsigned int smax;
1066 
1067 	/* Watch for min being lower than max due to floor calculations */
1068 	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1069 	return max(smin, smax);
1070 }
1071 
1072 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1073 {
1074 	rq->nr_numa_running += (p->numa_preferred_nid != -1);
1075 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1076 }
1077 
1078 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1079 {
1080 	rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1081 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1082 }
1083 
1084 struct numa_group {
1085 	atomic_t refcount;
1086 
1087 	spinlock_t lock; /* nr_tasks, tasks */
1088 	int nr_tasks;
1089 	pid_t gid;
1090 	int active_nodes;
1091 
1092 	struct rcu_head rcu;
1093 	unsigned long total_faults;
1094 	unsigned long max_faults_cpu;
1095 	/*
1096 	 * Faults_cpu is used to decide whether memory should move
1097 	 * towards the CPU. As a consequence, these stats are weighted
1098 	 * more by CPU use than by memory faults.
1099 	 */
1100 	unsigned long *faults_cpu;
1101 	unsigned long faults[0];
1102 };
1103 
1104 /* Shared or private faults. */
1105 #define NR_NUMA_HINT_FAULT_TYPES 2
1106 
1107 /* Memory and CPU locality */
1108 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1109 
1110 /* Averaged statistics, and temporary buffers. */
1111 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1112 
1113 pid_t task_numa_group_id(struct task_struct *p)
1114 {
1115 	return p->numa_group ? p->numa_group->gid : 0;
1116 }
1117 
1118 /*
1119  * The averaged statistics, shared & private, memory & cpu,
1120  * occupy the first half of the array. The second half of the
1121  * array is for current counters, which are averaged into the
1122  * first set by task_numa_placement.
1123  */
1124 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1125 {
1126 	return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1127 }
1128 
1129 static inline unsigned long task_faults(struct task_struct *p, int nid)
1130 {
1131 	if (!p->numa_faults)
1132 		return 0;
1133 
1134 	return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1135 		p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1136 }
1137 
1138 static inline unsigned long group_faults(struct task_struct *p, int nid)
1139 {
1140 	if (!p->numa_group)
1141 		return 0;
1142 
1143 	return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1144 		p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1145 }
1146 
1147 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1148 {
1149 	return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1150 		group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1151 }
1152 
1153 /*
1154  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1155  * considered part of a numa group's pseudo-interleaving set. Migrations
1156  * between these nodes are slowed down, to allow things to settle down.
1157  */
1158 #define ACTIVE_NODE_FRACTION 3
1159 
1160 static bool numa_is_active_node(int nid, struct numa_group *ng)
1161 {
1162 	return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1163 }
1164 
1165 /* Handle placement on systems where not all nodes are directly connected. */
1166 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1167 					int maxdist, bool task)
1168 {
1169 	unsigned long score = 0;
1170 	int node;
1171 
1172 	/*
1173 	 * All nodes are directly connected, and the same distance
1174 	 * from each other. No need for fancy placement algorithms.
1175 	 */
1176 	if (sched_numa_topology_type == NUMA_DIRECT)
1177 		return 0;
1178 
1179 	/*
1180 	 * This code is called for each node, introducing N^2 complexity,
1181 	 * which should be ok given the number of nodes rarely exceeds 8.
1182 	 */
1183 	for_each_online_node(node) {
1184 		unsigned long faults;
1185 		int dist = node_distance(nid, node);
1186 
1187 		/*
1188 		 * The furthest away nodes in the system are not interesting
1189 		 * for placement; nid was already counted.
1190 		 */
1191 		if (dist == sched_max_numa_distance || node == nid)
1192 			continue;
1193 
1194 		/*
1195 		 * On systems with a backplane NUMA topology, compare groups
1196 		 * of nodes, and move tasks towards the group with the most
1197 		 * memory accesses. When comparing two nodes at distance
1198 		 * "hoplimit", only nodes closer by than "hoplimit" are part
1199 		 * of each group. Skip other nodes.
1200 		 */
1201 		if (sched_numa_topology_type == NUMA_BACKPLANE &&
1202 					dist > maxdist)
1203 			continue;
1204 
1205 		/* Add up the faults from nearby nodes. */
1206 		if (task)
1207 			faults = task_faults(p, node);
1208 		else
1209 			faults = group_faults(p, node);
1210 
1211 		/*
1212 		 * On systems with a glueless mesh NUMA topology, there are
1213 		 * no fixed "groups of nodes". Instead, nodes that are not
1214 		 * directly connected bounce traffic through intermediate
1215 		 * nodes; a numa_group can occupy any set of nodes.
1216 		 * The further away a node is, the less the faults count.
1217 		 * This seems to result in good task placement.
1218 		 */
1219 		if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1220 			faults *= (sched_max_numa_distance - dist);
1221 			faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1222 		}
1223 
1224 		score += faults;
1225 	}
1226 
1227 	return score;
1228 }
1229 
1230 /*
1231  * These return the fraction of accesses done by a particular task, or
1232  * task group, on a particular numa node.  The group weight is given a
1233  * larger multiplier, in order to group tasks together that are almost
1234  * evenly spread out between numa nodes.
1235  */
1236 static inline unsigned long task_weight(struct task_struct *p, int nid,
1237 					int dist)
1238 {
1239 	unsigned long faults, total_faults;
1240 
1241 	if (!p->numa_faults)
1242 		return 0;
1243 
1244 	total_faults = p->total_numa_faults;
1245 
1246 	if (!total_faults)
1247 		return 0;
1248 
1249 	faults = task_faults(p, nid);
1250 	faults += score_nearby_nodes(p, nid, dist, true);
1251 
1252 	return 1000 * faults / total_faults;
1253 }
1254 
1255 static inline unsigned long group_weight(struct task_struct *p, int nid,
1256 					 int dist)
1257 {
1258 	unsigned long faults, total_faults;
1259 
1260 	if (!p->numa_group)
1261 		return 0;
1262 
1263 	total_faults = p->numa_group->total_faults;
1264 
1265 	if (!total_faults)
1266 		return 0;
1267 
1268 	faults = group_faults(p, nid);
1269 	faults += score_nearby_nodes(p, nid, dist, false);
1270 
1271 	return 1000 * faults / total_faults;
1272 }
1273 
1274 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1275 				int src_nid, int dst_cpu)
1276 {
1277 	struct numa_group *ng = p->numa_group;
1278 	int dst_nid = cpu_to_node(dst_cpu);
1279 	int last_cpupid, this_cpupid;
1280 
1281 	this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1282 
1283 	/*
1284 	 * Multi-stage node selection is used in conjunction with a periodic
1285 	 * migration fault to build a temporal task<->page relation. By using
1286 	 * a two-stage filter we remove short/unlikely relations.
1287 	 *
1288 	 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1289 	 * a task's usage of a particular page (n_p) per total usage of this
1290 	 * page (n_t) (in a given time-span) to a probability.
1291 	 *
1292 	 * Our periodic faults will sample this probability and getting the
1293 	 * same result twice in a row, given these samples are fully
1294 	 * independent, is then given by P(n)^2, provided our sample period
1295 	 * is sufficiently short compared to the usage pattern.
1296 	 *
1297 	 * This quadric squishes small probabilities, making it less likely we
1298 	 * act on an unlikely task<->page relation.
1299 	 */
1300 	last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1301 	if (!cpupid_pid_unset(last_cpupid) &&
1302 				cpupid_to_nid(last_cpupid) != dst_nid)
1303 		return false;
1304 
1305 	/* Always allow migrate on private faults */
1306 	if (cpupid_match_pid(p, last_cpupid))
1307 		return true;
1308 
1309 	/* A shared fault, but p->numa_group has not been set up yet. */
1310 	if (!ng)
1311 		return true;
1312 
1313 	/*
1314 	 * Destination node is much more heavily used than the source
1315 	 * node? Allow migration.
1316 	 */
1317 	if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1318 					ACTIVE_NODE_FRACTION)
1319 		return true;
1320 
1321 	/*
1322 	 * Distribute memory according to CPU & memory use on each node,
1323 	 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1324 	 *
1325 	 * faults_cpu(dst)   3   faults_cpu(src)
1326 	 * --------------- * - > ---------------
1327 	 * faults_mem(dst)   4   faults_mem(src)
1328 	 */
1329 	return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1330 	       group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1331 }
1332 
1333 static unsigned long weighted_cpuload(const int cpu);
1334 static unsigned long source_load(int cpu, int type);
1335 static unsigned long target_load(int cpu, int type);
1336 static unsigned long capacity_of(int cpu);
1337 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1338 
1339 /* Cached statistics for all CPUs within a node */
1340 struct numa_stats {
1341 	unsigned long nr_running;
1342 	unsigned long load;
1343 
1344 	/* Total compute capacity of CPUs on a node */
1345 	unsigned long compute_capacity;
1346 
1347 	/* Approximate capacity in terms of runnable tasks on a node */
1348 	unsigned long task_capacity;
1349 	int has_free_capacity;
1350 };
1351 
1352 /*
1353  * XXX borrowed from update_sg_lb_stats
1354  */
1355 static void update_numa_stats(struct numa_stats *ns, int nid)
1356 {
1357 	int smt, cpu, cpus = 0;
1358 	unsigned long capacity;
1359 
1360 	memset(ns, 0, sizeof(*ns));
1361 	for_each_cpu(cpu, cpumask_of_node(nid)) {
1362 		struct rq *rq = cpu_rq(cpu);
1363 
1364 		ns->nr_running += rq->nr_running;
1365 		ns->load += weighted_cpuload(cpu);
1366 		ns->compute_capacity += capacity_of(cpu);
1367 
1368 		cpus++;
1369 	}
1370 
1371 	/*
1372 	 * If we raced with hotplug and there are no CPUs left in our mask
1373 	 * the @ns structure is NULL'ed and task_numa_compare() will
1374 	 * not find this node attractive.
1375 	 *
1376 	 * We'll either bail at !has_free_capacity, or we'll detect a huge
1377 	 * imbalance and bail there.
1378 	 */
1379 	if (!cpus)
1380 		return;
1381 
1382 	/* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1383 	smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1384 	capacity = cpus / smt; /* cores */
1385 
1386 	ns->task_capacity = min_t(unsigned, capacity,
1387 		DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1388 	ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1389 }
1390 
1391 struct task_numa_env {
1392 	struct task_struct *p;
1393 
1394 	int src_cpu, src_nid;
1395 	int dst_cpu, dst_nid;
1396 
1397 	struct numa_stats src_stats, dst_stats;
1398 
1399 	int imbalance_pct;
1400 	int dist;
1401 
1402 	struct task_struct *best_task;
1403 	long best_imp;
1404 	int best_cpu;
1405 };
1406 
1407 static void task_numa_assign(struct task_numa_env *env,
1408 			     struct task_struct *p, long imp)
1409 {
1410 	if (env->best_task)
1411 		put_task_struct(env->best_task);
1412 	if (p)
1413 		get_task_struct(p);
1414 
1415 	env->best_task = p;
1416 	env->best_imp = imp;
1417 	env->best_cpu = env->dst_cpu;
1418 }
1419 
1420 static bool load_too_imbalanced(long src_load, long dst_load,
1421 				struct task_numa_env *env)
1422 {
1423 	long imb, old_imb;
1424 	long orig_src_load, orig_dst_load;
1425 	long src_capacity, dst_capacity;
1426 
1427 	/*
1428 	 * The load is corrected for the CPU capacity available on each node.
1429 	 *
1430 	 * src_load        dst_load
1431 	 * ------------ vs ---------
1432 	 * src_capacity    dst_capacity
1433 	 */
1434 	src_capacity = env->src_stats.compute_capacity;
1435 	dst_capacity = env->dst_stats.compute_capacity;
1436 
1437 	/* We care about the slope of the imbalance, not the direction. */
1438 	if (dst_load < src_load)
1439 		swap(dst_load, src_load);
1440 
1441 	/* Is the difference below the threshold? */
1442 	imb = dst_load * src_capacity * 100 -
1443 	      src_load * dst_capacity * env->imbalance_pct;
1444 	if (imb <= 0)
1445 		return false;
1446 
1447 	/*
1448 	 * The imbalance is above the allowed threshold.
1449 	 * Compare it with the old imbalance.
1450 	 */
1451 	orig_src_load = env->src_stats.load;
1452 	orig_dst_load = env->dst_stats.load;
1453 
1454 	if (orig_dst_load < orig_src_load)
1455 		swap(orig_dst_load, orig_src_load);
1456 
1457 	old_imb = orig_dst_load * src_capacity * 100 -
1458 		  orig_src_load * dst_capacity * env->imbalance_pct;
1459 
1460 	/* Would this change make things worse? */
1461 	return (imb > old_imb);
1462 }
1463 
1464 /*
1465  * This checks if the overall compute and NUMA accesses of the system would
1466  * be improved if the source tasks was migrated to the target dst_cpu taking
1467  * into account that it might be best if task running on the dst_cpu should
1468  * be exchanged with the source task
1469  */
1470 static void task_numa_compare(struct task_numa_env *env,
1471 			      long taskimp, long groupimp)
1472 {
1473 	struct rq *src_rq = cpu_rq(env->src_cpu);
1474 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
1475 	struct task_struct *cur;
1476 	long src_load, dst_load;
1477 	long load;
1478 	long imp = env->p->numa_group ? groupimp : taskimp;
1479 	long moveimp = imp;
1480 	int dist = env->dist;
1481 
1482 	rcu_read_lock();
1483 	cur = task_rcu_dereference(&dst_rq->curr);
1484 	if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1485 		cur = NULL;
1486 
1487 	/*
1488 	 * Because we have preemption enabled we can get migrated around and
1489 	 * end try selecting ourselves (current == env->p) as a swap candidate.
1490 	 */
1491 	if (cur == env->p)
1492 		goto unlock;
1493 
1494 	/*
1495 	 * "imp" is the fault differential for the source task between the
1496 	 * source and destination node. Calculate the total differential for
1497 	 * the source task and potential destination task. The more negative
1498 	 * the value is, the more rmeote accesses that would be expected to
1499 	 * be incurred if the tasks were swapped.
1500 	 */
1501 	if (cur) {
1502 		/* Skip this swap candidate if cannot move to the source cpu */
1503 		if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1504 			goto unlock;
1505 
1506 		/*
1507 		 * If dst and source tasks are in the same NUMA group, or not
1508 		 * in any group then look only at task weights.
1509 		 */
1510 		if (cur->numa_group == env->p->numa_group) {
1511 			imp = taskimp + task_weight(cur, env->src_nid, dist) -
1512 			      task_weight(cur, env->dst_nid, dist);
1513 			/*
1514 			 * Add some hysteresis to prevent swapping the
1515 			 * tasks within a group over tiny differences.
1516 			 */
1517 			if (cur->numa_group)
1518 				imp -= imp/16;
1519 		} else {
1520 			/*
1521 			 * Compare the group weights. If a task is all by
1522 			 * itself (not part of a group), use the task weight
1523 			 * instead.
1524 			 */
1525 			if (cur->numa_group)
1526 				imp += group_weight(cur, env->src_nid, dist) -
1527 				       group_weight(cur, env->dst_nid, dist);
1528 			else
1529 				imp += task_weight(cur, env->src_nid, dist) -
1530 				       task_weight(cur, env->dst_nid, dist);
1531 		}
1532 	}
1533 
1534 	if (imp <= env->best_imp && moveimp <= env->best_imp)
1535 		goto unlock;
1536 
1537 	if (!cur) {
1538 		/* Is there capacity at our destination? */
1539 		if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1540 		    !env->dst_stats.has_free_capacity)
1541 			goto unlock;
1542 
1543 		goto balance;
1544 	}
1545 
1546 	/* Balance doesn't matter much if we're running a task per cpu */
1547 	if (imp > env->best_imp && src_rq->nr_running == 1 &&
1548 			dst_rq->nr_running == 1)
1549 		goto assign;
1550 
1551 	/*
1552 	 * In the overloaded case, try and keep the load balanced.
1553 	 */
1554 balance:
1555 	load = task_h_load(env->p);
1556 	dst_load = env->dst_stats.load + load;
1557 	src_load = env->src_stats.load - load;
1558 
1559 	if (moveimp > imp && moveimp > env->best_imp) {
1560 		/*
1561 		 * If the improvement from just moving env->p direction is
1562 		 * better than swapping tasks around, check if a move is
1563 		 * possible. Store a slightly smaller score than moveimp,
1564 		 * so an actually idle CPU will win.
1565 		 */
1566 		if (!load_too_imbalanced(src_load, dst_load, env)) {
1567 			imp = moveimp - 1;
1568 			cur = NULL;
1569 			goto assign;
1570 		}
1571 	}
1572 
1573 	if (imp <= env->best_imp)
1574 		goto unlock;
1575 
1576 	if (cur) {
1577 		load = task_h_load(cur);
1578 		dst_load -= load;
1579 		src_load += load;
1580 	}
1581 
1582 	if (load_too_imbalanced(src_load, dst_load, env))
1583 		goto unlock;
1584 
1585 	/*
1586 	 * One idle CPU per node is evaluated for a task numa move.
1587 	 * Call select_idle_sibling to maybe find a better one.
1588 	 */
1589 	if (!cur) {
1590 		/*
1591 		 * select_idle_siblings() uses an per-cpu cpumask that
1592 		 * can be used from IRQ context.
1593 		 */
1594 		local_irq_disable();
1595 		env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1596 						   env->dst_cpu);
1597 		local_irq_enable();
1598 	}
1599 
1600 assign:
1601 	task_numa_assign(env, cur, imp);
1602 unlock:
1603 	rcu_read_unlock();
1604 }
1605 
1606 static void task_numa_find_cpu(struct task_numa_env *env,
1607 				long taskimp, long groupimp)
1608 {
1609 	int cpu;
1610 
1611 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1612 		/* Skip this CPU if the source task cannot migrate */
1613 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1614 			continue;
1615 
1616 		env->dst_cpu = cpu;
1617 		task_numa_compare(env, taskimp, groupimp);
1618 	}
1619 }
1620 
1621 /* Only move tasks to a NUMA node less busy than the current node. */
1622 static bool numa_has_capacity(struct task_numa_env *env)
1623 {
1624 	struct numa_stats *src = &env->src_stats;
1625 	struct numa_stats *dst = &env->dst_stats;
1626 
1627 	if (src->has_free_capacity && !dst->has_free_capacity)
1628 		return false;
1629 
1630 	/*
1631 	 * Only consider a task move if the source has a higher load
1632 	 * than the destination, corrected for CPU capacity on each node.
1633 	 *
1634 	 *      src->load                dst->load
1635 	 * --------------------- vs ---------------------
1636 	 * src->compute_capacity    dst->compute_capacity
1637 	 */
1638 	if (src->load * dst->compute_capacity * env->imbalance_pct >
1639 
1640 	    dst->load * src->compute_capacity * 100)
1641 		return true;
1642 
1643 	return false;
1644 }
1645 
1646 static int task_numa_migrate(struct task_struct *p)
1647 {
1648 	struct task_numa_env env = {
1649 		.p = p,
1650 
1651 		.src_cpu = task_cpu(p),
1652 		.src_nid = task_node(p),
1653 
1654 		.imbalance_pct = 112,
1655 
1656 		.best_task = NULL,
1657 		.best_imp = 0,
1658 		.best_cpu = -1,
1659 	};
1660 	struct sched_domain *sd;
1661 	unsigned long taskweight, groupweight;
1662 	int nid, ret, dist;
1663 	long taskimp, groupimp;
1664 
1665 	/*
1666 	 * Pick the lowest SD_NUMA domain, as that would have the smallest
1667 	 * imbalance and would be the first to start moving tasks about.
1668 	 *
1669 	 * And we want to avoid any moving of tasks about, as that would create
1670 	 * random movement of tasks -- counter the numa conditions we're trying
1671 	 * to satisfy here.
1672 	 */
1673 	rcu_read_lock();
1674 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1675 	if (sd)
1676 		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1677 	rcu_read_unlock();
1678 
1679 	/*
1680 	 * Cpusets can break the scheduler domain tree into smaller
1681 	 * balance domains, some of which do not cross NUMA boundaries.
1682 	 * Tasks that are "trapped" in such domains cannot be migrated
1683 	 * elsewhere, so there is no point in (re)trying.
1684 	 */
1685 	if (unlikely(!sd)) {
1686 		p->numa_preferred_nid = task_node(p);
1687 		return -EINVAL;
1688 	}
1689 
1690 	env.dst_nid = p->numa_preferred_nid;
1691 	dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1692 	taskweight = task_weight(p, env.src_nid, dist);
1693 	groupweight = group_weight(p, env.src_nid, dist);
1694 	update_numa_stats(&env.src_stats, env.src_nid);
1695 	taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1696 	groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1697 	update_numa_stats(&env.dst_stats, env.dst_nid);
1698 
1699 	/* Try to find a spot on the preferred nid. */
1700 	if (numa_has_capacity(&env))
1701 		task_numa_find_cpu(&env, taskimp, groupimp);
1702 
1703 	/*
1704 	 * Look at other nodes in these cases:
1705 	 * - there is no space available on the preferred_nid
1706 	 * - the task is part of a numa_group that is interleaved across
1707 	 *   multiple NUMA nodes; in order to better consolidate the group,
1708 	 *   we need to check other locations.
1709 	 */
1710 	if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1711 		for_each_online_node(nid) {
1712 			if (nid == env.src_nid || nid == p->numa_preferred_nid)
1713 				continue;
1714 
1715 			dist = node_distance(env.src_nid, env.dst_nid);
1716 			if (sched_numa_topology_type == NUMA_BACKPLANE &&
1717 						dist != env.dist) {
1718 				taskweight = task_weight(p, env.src_nid, dist);
1719 				groupweight = group_weight(p, env.src_nid, dist);
1720 			}
1721 
1722 			/* Only consider nodes where both task and groups benefit */
1723 			taskimp = task_weight(p, nid, dist) - taskweight;
1724 			groupimp = group_weight(p, nid, dist) - groupweight;
1725 			if (taskimp < 0 && groupimp < 0)
1726 				continue;
1727 
1728 			env.dist = dist;
1729 			env.dst_nid = nid;
1730 			update_numa_stats(&env.dst_stats, env.dst_nid);
1731 			if (numa_has_capacity(&env))
1732 				task_numa_find_cpu(&env, taskimp, groupimp);
1733 		}
1734 	}
1735 
1736 	/*
1737 	 * If the task is part of a workload that spans multiple NUMA nodes,
1738 	 * and is migrating into one of the workload's active nodes, remember
1739 	 * this node as the task's preferred numa node, so the workload can
1740 	 * settle down.
1741 	 * A task that migrated to a second choice node will be better off
1742 	 * trying for a better one later. Do not set the preferred node here.
1743 	 */
1744 	if (p->numa_group) {
1745 		struct numa_group *ng = p->numa_group;
1746 
1747 		if (env.best_cpu == -1)
1748 			nid = env.src_nid;
1749 		else
1750 			nid = env.dst_nid;
1751 
1752 		if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1753 			sched_setnuma(p, env.dst_nid);
1754 	}
1755 
1756 	/* No better CPU than the current one was found. */
1757 	if (env.best_cpu == -1)
1758 		return -EAGAIN;
1759 
1760 	/*
1761 	 * Reset the scan period if the task is being rescheduled on an
1762 	 * alternative node to recheck if the tasks is now properly placed.
1763 	 */
1764 	p->numa_scan_period = task_scan_min(p);
1765 
1766 	if (env.best_task == NULL) {
1767 		ret = migrate_task_to(p, env.best_cpu);
1768 		if (ret != 0)
1769 			trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1770 		return ret;
1771 	}
1772 
1773 	ret = migrate_swap(p, env.best_task);
1774 	if (ret != 0)
1775 		trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1776 	put_task_struct(env.best_task);
1777 	return ret;
1778 }
1779 
1780 /* Attempt to migrate a task to a CPU on the preferred node. */
1781 static void numa_migrate_preferred(struct task_struct *p)
1782 {
1783 	unsigned long interval = HZ;
1784 
1785 	/* This task has no NUMA fault statistics yet */
1786 	if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1787 		return;
1788 
1789 	/* Periodically retry migrating the task to the preferred node */
1790 	interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1791 	p->numa_migrate_retry = jiffies + interval;
1792 
1793 	/* Success if task is already running on preferred CPU */
1794 	if (task_node(p) == p->numa_preferred_nid)
1795 		return;
1796 
1797 	/* Otherwise, try migrate to a CPU on the preferred node */
1798 	task_numa_migrate(p);
1799 }
1800 
1801 /*
1802  * Find out how many nodes on the workload is actively running on. Do this by
1803  * tracking the nodes from which NUMA hinting faults are triggered. This can
1804  * be different from the set of nodes where the workload's memory is currently
1805  * located.
1806  */
1807 static void numa_group_count_active_nodes(struct numa_group *numa_group)
1808 {
1809 	unsigned long faults, max_faults = 0;
1810 	int nid, active_nodes = 0;
1811 
1812 	for_each_online_node(nid) {
1813 		faults = group_faults_cpu(numa_group, nid);
1814 		if (faults > max_faults)
1815 			max_faults = faults;
1816 	}
1817 
1818 	for_each_online_node(nid) {
1819 		faults = group_faults_cpu(numa_group, nid);
1820 		if (faults * ACTIVE_NODE_FRACTION > max_faults)
1821 			active_nodes++;
1822 	}
1823 
1824 	numa_group->max_faults_cpu = max_faults;
1825 	numa_group->active_nodes = active_nodes;
1826 }
1827 
1828 /*
1829  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1830  * increments. The more local the fault statistics are, the higher the scan
1831  * period will be for the next scan window. If local/(local+remote) ratio is
1832  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1833  * the scan period will decrease. Aim for 70% local accesses.
1834  */
1835 #define NUMA_PERIOD_SLOTS 10
1836 #define NUMA_PERIOD_THRESHOLD 7
1837 
1838 /*
1839  * Increase the scan period (slow down scanning) if the majority of
1840  * our memory is already on our local node, or if the majority of
1841  * the page accesses are shared with other processes.
1842  * Otherwise, decrease the scan period.
1843  */
1844 static void update_task_scan_period(struct task_struct *p,
1845 			unsigned long shared, unsigned long private)
1846 {
1847 	unsigned int period_slot;
1848 	int ratio;
1849 	int diff;
1850 
1851 	unsigned long remote = p->numa_faults_locality[0];
1852 	unsigned long local = p->numa_faults_locality[1];
1853 
1854 	/*
1855 	 * If there were no record hinting faults then either the task is
1856 	 * completely idle or all activity is areas that are not of interest
1857 	 * to automatic numa balancing. Related to that, if there were failed
1858 	 * migration then it implies we are migrating too quickly or the local
1859 	 * node is overloaded. In either case, scan slower
1860 	 */
1861 	if (local + shared == 0 || p->numa_faults_locality[2]) {
1862 		p->numa_scan_period = min(p->numa_scan_period_max,
1863 			p->numa_scan_period << 1);
1864 
1865 		p->mm->numa_next_scan = jiffies +
1866 			msecs_to_jiffies(p->numa_scan_period);
1867 
1868 		return;
1869 	}
1870 
1871 	/*
1872 	 * Prepare to scale scan period relative to the current period.
1873 	 *	 == NUMA_PERIOD_THRESHOLD scan period stays the same
1874 	 *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1875 	 *	 >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1876 	 */
1877 	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1878 	ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1879 	if (ratio >= NUMA_PERIOD_THRESHOLD) {
1880 		int slot = ratio - NUMA_PERIOD_THRESHOLD;
1881 		if (!slot)
1882 			slot = 1;
1883 		diff = slot * period_slot;
1884 	} else {
1885 		diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1886 
1887 		/*
1888 		 * Scale scan rate increases based on sharing. There is an
1889 		 * inverse relationship between the degree of sharing and
1890 		 * the adjustment made to the scanning period. Broadly
1891 		 * speaking the intent is that there is little point
1892 		 * scanning faster if shared accesses dominate as it may
1893 		 * simply bounce migrations uselessly
1894 		 */
1895 		ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1896 		diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1897 	}
1898 
1899 	p->numa_scan_period = clamp(p->numa_scan_period + diff,
1900 			task_scan_min(p), task_scan_max(p));
1901 	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1902 }
1903 
1904 /*
1905  * Get the fraction of time the task has been running since the last
1906  * NUMA placement cycle. The scheduler keeps similar statistics, but
1907  * decays those on a 32ms period, which is orders of magnitude off
1908  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1909  * stats only if the task is so new there are no NUMA statistics yet.
1910  */
1911 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1912 {
1913 	u64 runtime, delta, now;
1914 	/* Use the start of this time slice to avoid calculations. */
1915 	now = p->se.exec_start;
1916 	runtime = p->se.sum_exec_runtime;
1917 
1918 	if (p->last_task_numa_placement) {
1919 		delta = runtime - p->last_sum_exec_runtime;
1920 		*period = now - p->last_task_numa_placement;
1921 	} else {
1922 		delta = p->se.avg.load_sum / p->se.load.weight;
1923 		*period = LOAD_AVG_MAX;
1924 	}
1925 
1926 	p->last_sum_exec_runtime = runtime;
1927 	p->last_task_numa_placement = now;
1928 
1929 	return delta;
1930 }
1931 
1932 /*
1933  * Determine the preferred nid for a task in a numa_group. This needs to
1934  * be done in a way that produces consistent results with group_weight,
1935  * otherwise workloads might not converge.
1936  */
1937 static int preferred_group_nid(struct task_struct *p, int nid)
1938 {
1939 	nodemask_t nodes;
1940 	int dist;
1941 
1942 	/* Direct connections between all NUMA nodes. */
1943 	if (sched_numa_topology_type == NUMA_DIRECT)
1944 		return nid;
1945 
1946 	/*
1947 	 * On a system with glueless mesh NUMA topology, group_weight
1948 	 * scores nodes according to the number of NUMA hinting faults on
1949 	 * both the node itself, and on nearby nodes.
1950 	 */
1951 	if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1952 		unsigned long score, max_score = 0;
1953 		int node, max_node = nid;
1954 
1955 		dist = sched_max_numa_distance;
1956 
1957 		for_each_online_node(node) {
1958 			score = group_weight(p, node, dist);
1959 			if (score > max_score) {
1960 				max_score = score;
1961 				max_node = node;
1962 			}
1963 		}
1964 		return max_node;
1965 	}
1966 
1967 	/*
1968 	 * Finding the preferred nid in a system with NUMA backplane
1969 	 * interconnect topology is more involved. The goal is to locate
1970 	 * tasks from numa_groups near each other in the system, and
1971 	 * untangle workloads from different sides of the system. This requires
1972 	 * searching down the hierarchy of node groups, recursively searching
1973 	 * inside the highest scoring group of nodes. The nodemask tricks
1974 	 * keep the complexity of the search down.
1975 	 */
1976 	nodes = node_online_map;
1977 	for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1978 		unsigned long max_faults = 0;
1979 		nodemask_t max_group = NODE_MASK_NONE;
1980 		int a, b;
1981 
1982 		/* Are there nodes at this distance from each other? */
1983 		if (!find_numa_distance(dist))
1984 			continue;
1985 
1986 		for_each_node_mask(a, nodes) {
1987 			unsigned long faults = 0;
1988 			nodemask_t this_group;
1989 			nodes_clear(this_group);
1990 
1991 			/* Sum group's NUMA faults; includes a==b case. */
1992 			for_each_node_mask(b, nodes) {
1993 				if (node_distance(a, b) < dist) {
1994 					faults += group_faults(p, b);
1995 					node_set(b, this_group);
1996 					node_clear(b, nodes);
1997 				}
1998 			}
1999 
2000 			/* Remember the top group. */
2001 			if (faults > max_faults) {
2002 				max_faults = faults;
2003 				max_group = this_group;
2004 				/*
2005 				 * subtle: at the smallest distance there is
2006 				 * just one node left in each "group", the
2007 				 * winner is the preferred nid.
2008 				 */
2009 				nid = a;
2010 			}
2011 		}
2012 		/* Next round, evaluate the nodes within max_group. */
2013 		if (!max_faults)
2014 			break;
2015 		nodes = max_group;
2016 	}
2017 	return nid;
2018 }
2019 
2020 static void task_numa_placement(struct task_struct *p)
2021 {
2022 	int seq, nid, max_nid = -1, max_group_nid = -1;
2023 	unsigned long max_faults = 0, max_group_faults = 0;
2024 	unsigned long fault_types[2] = { 0, 0 };
2025 	unsigned long total_faults;
2026 	u64 runtime, period;
2027 	spinlock_t *group_lock = NULL;
2028 
2029 	/*
2030 	 * The p->mm->numa_scan_seq field gets updated without
2031 	 * exclusive access. Use READ_ONCE() here to ensure
2032 	 * that the field is read in a single access:
2033 	 */
2034 	seq = READ_ONCE(p->mm->numa_scan_seq);
2035 	if (p->numa_scan_seq == seq)
2036 		return;
2037 	p->numa_scan_seq = seq;
2038 	p->numa_scan_period_max = task_scan_max(p);
2039 
2040 	total_faults = p->numa_faults_locality[0] +
2041 		       p->numa_faults_locality[1];
2042 	runtime = numa_get_avg_runtime(p, &period);
2043 
2044 	/* If the task is part of a group prevent parallel updates to group stats */
2045 	if (p->numa_group) {
2046 		group_lock = &p->numa_group->lock;
2047 		spin_lock_irq(group_lock);
2048 	}
2049 
2050 	/* Find the node with the highest number of faults */
2051 	for_each_online_node(nid) {
2052 		/* Keep track of the offsets in numa_faults array */
2053 		int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2054 		unsigned long faults = 0, group_faults = 0;
2055 		int priv;
2056 
2057 		for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2058 			long diff, f_diff, f_weight;
2059 
2060 			mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2061 			membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2062 			cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2063 			cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2064 
2065 			/* Decay existing window, copy faults since last scan */
2066 			diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2067 			fault_types[priv] += p->numa_faults[membuf_idx];
2068 			p->numa_faults[membuf_idx] = 0;
2069 
2070 			/*
2071 			 * Normalize the faults_from, so all tasks in a group
2072 			 * count according to CPU use, instead of by the raw
2073 			 * number of faults. Tasks with little runtime have
2074 			 * little over-all impact on throughput, and thus their
2075 			 * faults are less important.
2076 			 */
2077 			f_weight = div64_u64(runtime << 16, period + 1);
2078 			f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2079 				   (total_faults + 1);
2080 			f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2081 			p->numa_faults[cpubuf_idx] = 0;
2082 
2083 			p->numa_faults[mem_idx] += diff;
2084 			p->numa_faults[cpu_idx] += f_diff;
2085 			faults += p->numa_faults[mem_idx];
2086 			p->total_numa_faults += diff;
2087 			if (p->numa_group) {
2088 				/*
2089 				 * safe because we can only change our own group
2090 				 *
2091 				 * mem_idx represents the offset for a given
2092 				 * nid and priv in a specific region because it
2093 				 * is at the beginning of the numa_faults array.
2094 				 */
2095 				p->numa_group->faults[mem_idx] += diff;
2096 				p->numa_group->faults_cpu[mem_idx] += f_diff;
2097 				p->numa_group->total_faults += diff;
2098 				group_faults += p->numa_group->faults[mem_idx];
2099 			}
2100 		}
2101 
2102 		if (faults > max_faults) {
2103 			max_faults = faults;
2104 			max_nid = nid;
2105 		}
2106 
2107 		if (group_faults > max_group_faults) {
2108 			max_group_faults = group_faults;
2109 			max_group_nid = nid;
2110 		}
2111 	}
2112 
2113 	update_task_scan_period(p, fault_types[0], fault_types[1]);
2114 
2115 	if (p->numa_group) {
2116 		numa_group_count_active_nodes(p->numa_group);
2117 		spin_unlock_irq(group_lock);
2118 		max_nid = preferred_group_nid(p, max_group_nid);
2119 	}
2120 
2121 	if (max_faults) {
2122 		/* Set the new preferred node */
2123 		if (max_nid != p->numa_preferred_nid)
2124 			sched_setnuma(p, max_nid);
2125 
2126 		if (task_node(p) != p->numa_preferred_nid)
2127 			numa_migrate_preferred(p);
2128 	}
2129 }
2130 
2131 static inline int get_numa_group(struct numa_group *grp)
2132 {
2133 	return atomic_inc_not_zero(&grp->refcount);
2134 }
2135 
2136 static inline void put_numa_group(struct numa_group *grp)
2137 {
2138 	if (atomic_dec_and_test(&grp->refcount))
2139 		kfree_rcu(grp, rcu);
2140 }
2141 
2142 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2143 			int *priv)
2144 {
2145 	struct numa_group *grp, *my_grp;
2146 	struct task_struct *tsk;
2147 	bool join = false;
2148 	int cpu = cpupid_to_cpu(cpupid);
2149 	int i;
2150 
2151 	if (unlikely(!p->numa_group)) {
2152 		unsigned int size = sizeof(struct numa_group) +
2153 				    4*nr_node_ids*sizeof(unsigned long);
2154 
2155 		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2156 		if (!grp)
2157 			return;
2158 
2159 		atomic_set(&grp->refcount, 1);
2160 		grp->active_nodes = 1;
2161 		grp->max_faults_cpu = 0;
2162 		spin_lock_init(&grp->lock);
2163 		grp->gid = p->pid;
2164 		/* Second half of the array tracks nids where faults happen */
2165 		grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2166 						nr_node_ids;
2167 
2168 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2169 			grp->faults[i] = p->numa_faults[i];
2170 
2171 		grp->total_faults = p->total_numa_faults;
2172 
2173 		grp->nr_tasks++;
2174 		rcu_assign_pointer(p->numa_group, grp);
2175 	}
2176 
2177 	rcu_read_lock();
2178 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
2179 
2180 	if (!cpupid_match_pid(tsk, cpupid))
2181 		goto no_join;
2182 
2183 	grp = rcu_dereference(tsk->numa_group);
2184 	if (!grp)
2185 		goto no_join;
2186 
2187 	my_grp = p->numa_group;
2188 	if (grp == my_grp)
2189 		goto no_join;
2190 
2191 	/*
2192 	 * Only join the other group if its bigger; if we're the bigger group,
2193 	 * the other task will join us.
2194 	 */
2195 	if (my_grp->nr_tasks > grp->nr_tasks)
2196 		goto no_join;
2197 
2198 	/*
2199 	 * Tie-break on the grp address.
2200 	 */
2201 	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2202 		goto no_join;
2203 
2204 	/* Always join threads in the same process. */
2205 	if (tsk->mm == current->mm)
2206 		join = true;
2207 
2208 	/* Simple filter to avoid false positives due to PID collisions */
2209 	if (flags & TNF_SHARED)
2210 		join = true;
2211 
2212 	/* Update priv based on whether false sharing was detected */
2213 	*priv = !join;
2214 
2215 	if (join && !get_numa_group(grp))
2216 		goto no_join;
2217 
2218 	rcu_read_unlock();
2219 
2220 	if (!join)
2221 		return;
2222 
2223 	BUG_ON(irqs_disabled());
2224 	double_lock_irq(&my_grp->lock, &grp->lock);
2225 
2226 	for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2227 		my_grp->faults[i] -= p->numa_faults[i];
2228 		grp->faults[i] += p->numa_faults[i];
2229 	}
2230 	my_grp->total_faults -= p->total_numa_faults;
2231 	grp->total_faults += p->total_numa_faults;
2232 
2233 	my_grp->nr_tasks--;
2234 	grp->nr_tasks++;
2235 
2236 	spin_unlock(&my_grp->lock);
2237 	spin_unlock_irq(&grp->lock);
2238 
2239 	rcu_assign_pointer(p->numa_group, grp);
2240 
2241 	put_numa_group(my_grp);
2242 	return;
2243 
2244 no_join:
2245 	rcu_read_unlock();
2246 	return;
2247 }
2248 
2249 void task_numa_free(struct task_struct *p)
2250 {
2251 	struct numa_group *grp = p->numa_group;
2252 	void *numa_faults = p->numa_faults;
2253 	unsigned long flags;
2254 	int i;
2255 
2256 	if (grp) {
2257 		spin_lock_irqsave(&grp->lock, flags);
2258 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2259 			grp->faults[i] -= p->numa_faults[i];
2260 		grp->total_faults -= p->total_numa_faults;
2261 
2262 		grp->nr_tasks--;
2263 		spin_unlock_irqrestore(&grp->lock, flags);
2264 		RCU_INIT_POINTER(p->numa_group, NULL);
2265 		put_numa_group(grp);
2266 	}
2267 
2268 	p->numa_faults = NULL;
2269 	kfree(numa_faults);
2270 }
2271 
2272 /*
2273  * Got a PROT_NONE fault for a page on @node.
2274  */
2275 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2276 {
2277 	struct task_struct *p = current;
2278 	bool migrated = flags & TNF_MIGRATED;
2279 	int cpu_node = task_node(current);
2280 	int local = !!(flags & TNF_FAULT_LOCAL);
2281 	struct numa_group *ng;
2282 	int priv;
2283 
2284 	if (!static_branch_likely(&sched_numa_balancing))
2285 		return;
2286 
2287 	/* for example, ksmd faulting in a user's mm */
2288 	if (!p->mm)
2289 		return;
2290 
2291 	/* Allocate buffer to track faults on a per-node basis */
2292 	if (unlikely(!p->numa_faults)) {
2293 		int size = sizeof(*p->numa_faults) *
2294 			   NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2295 
2296 		p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2297 		if (!p->numa_faults)
2298 			return;
2299 
2300 		p->total_numa_faults = 0;
2301 		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2302 	}
2303 
2304 	/*
2305 	 * First accesses are treated as private, otherwise consider accesses
2306 	 * to be private if the accessing pid has not changed
2307 	 */
2308 	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2309 		priv = 1;
2310 	} else {
2311 		priv = cpupid_match_pid(p, last_cpupid);
2312 		if (!priv && !(flags & TNF_NO_GROUP))
2313 			task_numa_group(p, last_cpupid, flags, &priv);
2314 	}
2315 
2316 	/*
2317 	 * If a workload spans multiple NUMA nodes, a shared fault that
2318 	 * occurs wholly within the set of nodes that the workload is
2319 	 * actively using should be counted as local. This allows the
2320 	 * scan rate to slow down when a workload has settled down.
2321 	 */
2322 	ng = p->numa_group;
2323 	if (!priv && !local && ng && ng->active_nodes > 1 &&
2324 				numa_is_active_node(cpu_node, ng) &&
2325 				numa_is_active_node(mem_node, ng))
2326 		local = 1;
2327 
2328 	task_numa_placement(p);
2329 
2330 	/*
2331 	 * Retry task to preferred node migration periodically, in case it
2332 	 * case it previously failed, or the scheduler moved us.
2333 	 */
2334 	if (time_after(jiffies, p->numa_migrate_retry))
2335 		numa_migrate_preferred(p);
2336 
2337 	if (migrated)
2338 		p->numa_pages_migrated += pages;
2339 	if (flags & TNF_MIGRATE_FAIL)
2340 		p->numa_faults_locality[2] += pages;
2341 
2342 	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2343 	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2344 	p->numa_faults_locality[local] += pages;
2345 }
2346 
2347 static void reset_ptenuma_scan(struct task_struct *p)
2348 {
2349 	/*
2350 	 * We only did a read acquisition of the mmap sem, so
2351 	 * p->mm->numa_scan_seq is written to without exclusive access
2352 	 * and the update is not guaranteed to be atomic. That's not
2353 	 * much of an issue though, since this is just used for
2354 	 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2355 	 * expensive, to avoid any form of compiler optimizations:
2356 	 */
2357 	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2358 	p->mm->numa_scan_offset = 0;
2359 }
2360 
2361 /*
2362  * The expensive part of numa migration is done from task_work context.
2363  * Triggered from task_tick_numa().
2364  */
2365 void task_numa_work(struct callback_head *work)
2366 {
2367 	unsigned long migrate, next_scan, now = jiffies;
2368 	struct task_struct *p = current;
2369 	struct mm_struct *mm = p->mm;
2370 	u64 runtime = p->se.sum_exec_runtime;
2371 	struct vm_area_struct *vma;
2372 	unsigned long start, end;
2373 	unsigned long nr_pte_updates = 0;
2374 	long pages, virtpages;
2375 
2376 	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2377 
2378 	work->next = work; /* protect against double add */
2379 	/*
2380 	 * Who cares about NUMA placement when they're dying.
2381 	 *
2382 	 * NOTE: make sure not to dereference p->mm before this check,
2383 	 * exit_task_work() happens _after_ exit_mm() so we could be called
2384 	 * without p->mm even though we still had it when we enqueued this
2385 	 * work.
2386 	 */
2387 	if (p->flags & PF_EXITING)
2388 		return;
2389 
2390 	if (!mm->numa_next_scan) {
2391 		mm->numa_next_scan = now +
2392 			msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2393 	}
2394 
2395 	/*
2396 	 * Enforce maximal scan/migration frequency..
2397 	 */
2398 	migrate = mm->numa_next_scan;
2399 	if (time_before(now, migrate))
2400 		return;
2401 
2402 	if (p->numa_scan_period == 0) {
2403 		p->numa_scan_period_max = task_scan_max(p);
2404 		p->numa_scan_period = task_scan_min(p);
2405 	}
2406 
2407 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2408 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2409 		return;
2410 
2411 	/*
2412 	 * Delay this task enough that another task of this mm will likely win
2413 	 * the next time around.
2414 	 */
2415 	p->node_stamp += 2 * TICK_NSEC;
2416 
2417 	start = mm->numa_scan_offset;
2418 	pages = sysctl_numa_balancing_scan_size;
2419 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2420 	virtpages = pages * 8;	   /* Scan up to this much virtual space */
2421 	if (!pages)
2422 		return;
2423 
2424 
2425 	down_read(&mm->mmap_sem);
2426 	vma = find_vma(mm, start);
2427 	if (!vma) {
2428 		reset_ptenuma_scan(p);
2429 		start = 0;
2430 		vma = mm->mmap;
2431 	}
2432 	for (; vma; vma = vma->vm_next) {
2433 		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2434 			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2435 			continue;
2436 		}
2437 
2438 		/*
2439 		 * Shared library pages mapped by multiple processes are not
2440 		 * migrated as it is expected they are cache replicated. Avoid
2441 		 * hinting faults in read-only file-backed mappings or the vdso
2442 		 * as migrating the pages will be of marginal benefit.
2443 		 */
2444 		if (!vma->vm_mm ||
2445 		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2446 			continue;
2447 
2448 		/*
2449 		 * Skip inaccessible VMAs to avoid any confusion between
2450 		 * PROT_NONE and NUMA hinting ptes
2451 		 */
2452 		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2453 			continue;
2454 
2455 		do {
2456 			start = max(start, vma->vm_start);
2457 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2458 			end = min(end, vma->vm_end);
2459 			nr_pte_updates = change_prot_numa(vma, start, end);
2460 
2461 			/*
2462 			 * Try to scan sysctl_numa_balancing_size worth of
2463 			 * hpages that have at least one present PTE that
2464 			 * is not already pte-numa. If the VMA contains
2465 			 * areas that are unused or already full of prot_numa
2466 			 * PTEs, scan up to virtpages, to skip through those
2467 			 * areas faster.
2468 			 */
2469 			if (nr_pte_updates)
2470 				pages -= (end - start) >> PAGE_SHIFT;
2471 			virtpages -= (end - start) >> PAGE_SHIFT;
2472 
2473 			start = end;
2474 			if (pages <= 0 || virtpages <= 0)
2475 				goto out;
2476 
2477 			cond_resched();
2478 		} while (end != vma->vm_end);
2479 	}
2480 
2481 out:
2482 	/*
2483 	 * It is possible to reach the end of the VMA list but the last few
2484 	 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2485 	 * would find the !migratable VMA on the next scan but not reset the
2486 	 * scanner to the start so check it now.
2487 	 */
2488 	if (vma)
2489 		mm->numa_scan_offset = start;
2490 	else
2491 		reset_ptenuma_scan(p);
2492 	up_read(&mm->mmap_sem);
2493 
2494 	/*
2495 	 * Make sure tasks use at least 32x as much time to run other code
2496 	 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2497 	 * Usually update_task_scan_period slows down scanning enough; on an
2498 	 * overloaded system we need to limit overhead on a per task basis.
2499 	 */
2500 	if (unlikely(p->se.sum_exec_runtime != runtime)) {
2501 		u64 diff = p->se.sum_exec_runtime - runtime;
2502 		p->node_stamp += 32 * diff;
2503 	}
2504 }
2505 
2506 /*
2507  * Drive the periodic memory faults..
2508  */
2509 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2510 {
2511 	struct callback_head *work = &curr->numa_work;
2512 	u64 period, now;
2513 
2514 	/*
2515 	 * We don't care about NUMA placement if we don't have memory.
2516 	 */
2517 	if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2518 		return;
2519 
2520 	/*
2521 	 * Using runtime rather than walltime has the dual advantage that
2522 	 * we (mostly) drive the selection from busy threads and that the
2523 	 * task needs to have done some actual work before we bother with
2524 	 * NUMA placement.
2525 	 */
2526 	now = curr->se.sum_exec_runtime;
2527 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2528 
2529 	if (now > curr->node_stamp + period) {
2530 		if (!curr->node_stamp)
2531 			curr->numa_scan_period = task_scan_min(curr);
2532 		curr->node_stamp += period;
2533 
2534 		if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2535 			init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2536 			task_work_add(curr, work, true);
2537 		}
2538 	}
2539 }
2540 #else
2541 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2542 {
2543 }
2544 
2545 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2546 {
2547 }
2548 
2549 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2550 {
2551 }
2552 #endif /* CONFIG_NUMA_BALANCING */
2553 
2554 static void
2555 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2556 {
2557 	update_load_add(&cfs_rq->load, se->load.weight);
2558 	if (!parent_entity(se))
2559 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2560 #ifdef CONFIG_SMP
2561 	if (entity_is_task(se)) {
2562 		struct rq *rq = rq_of(cfs_rq);
2563 
2564 		account_numa_enqueue(rq, task_of(se));
2565 		list_add(&se->group_node, &rq->cfs_tasks);
2566 	}
2567 #endif
2568 	cfs_rq->nr_running++;
2569 }
2570 
2571 static void
2572 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2573 {
2574 	update_load_sub(&cfs_rq->load, se->load.weight);
2575 	if (!parent_entity(se))
2576 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2577 #ifdef CONFIG_SMP
2578 	if (entity_is_task(se)) {
2579 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2580 		list_del_init(&se->group_node);
2581 	}
2582 #endif
2583 	cfs_rq->nr_running--;
2584 }
2585 
2586 #ifdef CONFIG_FAIR_GROUP_SCHED
2587 # ifdef CONFIG_SMP
2588 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2589 {
2590 	long tg_weight, load, shares;
2591 
2592 	/*
2593 	 * This really should be: cfs_rq->avg.load_avg, but instead we use
2594 	 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2595 	 * the shares for small weight interactive tasks.
2596 	 */
2597 	load = scale_load_down(cfs_rq->load.weight);
2598 
2599 	tg_weight = atomic_long_read(&tg->load_avg);
2600 
2601 	/* Ensure tg_weight >= load */
2602 	tg_weight -= cfs_rq->tg_load_avg_contrib;
2603 	tg_weight += load;
2604 
2605 	shares = (tg->shares * load);
2606 	if (tg_weight)
2607 		shares /= tg_weight;
2608 
2609 	if (shares < MIN_SHARES)
2610 		shares = MIN_SHARES;
2611 	if (shares > tg->shares)
2612 		shares = tg->shares;
2613 
2614 	return shares;
2615 }
2616 # else /* CONFIG_SMP */
2617 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2618 {
2619 	return tg->shares;
2620 }
2621 # endif /* CONFIG_SMP */
2622 
2623 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2624 			    unsigned long weight)
2625 {
2626 	if (se->on_rq) {
2627 		/* commit outstanding execution time */
2628 		if (cfs_rq->curr == se)
2629 			update_curr(cfs_rq);
2630 		account_entity_dequeue(cfs_rq, se);
2631 	}
2632 
2633 	update_load_set(&se->load, weight);
2634 
2635 	if (se->on_rq)
2636 		account_entity_enqueue(cfs_rq, se);
2637 }
2638 
2639 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2640 
2641 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2642 {
2643 	struct task_group *tg;
2644 	struct sched_entity *se;
2645 	long shares;
2646 
2647 	tg = cfs_rq->tg;
2648 	se = tg->se[cpu_of(rq_of(cfs_rq))];
2649 	if (!se || throttled_hierarchy(cfs_rq))
2650 		return;
2651 #ifndef CONFIG_SMP
2652 	if (likely(se->load.weight == tg->shares))
2653 		return;
2654 #endif
2655 	shares = calc_cfs_shares(cfs_rq, tg);
2656 
2657 	reweight_entity(cfs_rq_of(se), se, shares);
2658 }
2659 #else /* CONFIG_FAIR_GROUP_SCHED */
2660 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2661 {
2662 }
2663 #endif /* CONFIG_FAIR_GROUP_SCHED */
2664 
2665 #ifdef CONFIG_SMP
2666 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2667 static const u32 runnable_avg_yN_inv[] = {
2668 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2669 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2670 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2671 	0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2672 	0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2673 	0x85aac367, 0x82cd8698,
2674 };
2675 
2676 /*
2677  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
2678  * over-estimates when re-combining.
2679  */
2680 static const u32 runnable_avg_yN_sum[] = {
2681 	    0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2682 	 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2683 	17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2684 };
2685 
2686 /*
2687  * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
2688  * lower integers. See Documentation/scheduler/sched-avg.txt how these
2689  * were generated:
2690  */
2691 static const u32 __accumulated_sum_N32[] = {
2692 	    0, 23371, 35056, 40899, 43820, 45281,
2693 	46011, 46376, 46559, 46650, 46696, 46719,
2694 };
2695 
2696 /*
2697  * Approximate:
2698  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
2699  */
2700 static __always_inline u64 decay_load(u64 val, u64 n)
2701 {
2702 	unsigned int local_n;
2703 
2704 	if (!n)
2705 		return val;
2706 	else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2707 		return 0;
2708 
2709 	/* after bounds checking we can collapse to 32-bit */
2710 	local_n = n;
2711 
2712 	/*
2713 	 * As y^PERIOD = 1/2, we can combine
2714 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2715 	 * With a look-up table which covers y^n (n<PERIOD)
2716 	 *
2717 	 * To achieve constant time decay_load.
2718 	 */
2719 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2720 		val >>= local_n / LOAD_AVG_PERIOD;
2721 		local_n %= LOAD_AVG_PERIOD;
2722 	}
2723 
2724 	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2725 	return val;
2726 }
2727 
2728 /*
2729  * For updates fully spanning n periods, the contribution to runnable
2730  * average will be: \Sum 1024*y^n
2731  *
2732  * We can compute this reasonably efficiently by combining:
2733  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
2734  */
2735 static u32 __compute_runnable_contrib(u64 n)
2736 {
2737 	u32 contrib = 0;
2738 
2739 	if (likely(n <= LOAD_AVG_PERIOD))
2740 		return runnable_avg_yN_sum[n];
2741 	else if (unlikely(n >= LOAD_AVG_MAX_N))
2742 		return LOAD_AVG_MAX;
2743 
2744 	/* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
2745 	contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
2746 	n %= LOAD_AVG_PERIOD;
2747 	contrib = decay_load(contrib, n);
2748 	return contrib + runnable_avg_yN_sum[n];
2749 }
2750 
2751 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2752 
2753 /*
2754  * We can represent the historical contribution to runnable average as the
2755  * coefficients of a geometric series.  To do this we sub-divide our runnable
2756  * history into segments of approximately 1ms (1024us); label the segment that
2757  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2758  *
2759  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2760  *      p0            p1           p2
2761  *     (now)       (~1ms ago)  (~2ms ago)
2762  *
2763  * Let u_i denote the fraction of p_i that the entity was runnable.
2764  *
2765  * We then designate the fractions u_i as our co-efficients, yielding the
2766  * following representation of historical load:
2767  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2768  *
2769  * We choose y based on the with of a reasonably scheduling period, fixing:
2770  *   y^32 = 0.5
2771  *
2772  * This means that the contribution to load ~32ms ago (u_32) will be weighted
2773  * approximately half as much as the contribution to load within the last ms
2774  * (u_0).
2775  *
2776  * When a period "rolls over" and we have new u_0`, multiplying the previous
2777  * sum again by y is sufficient to update:
2778  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2779  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2780  */
2781 static __always_inline int
2782 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2783 		  unsigned long weight, int running, struct cfs_rq *cfs_rq)
2784 {
2785 	u64 delta, scaled_delta, periods;
2786 	u32 contrib;
2787 	unsigned int delta_w, scaled_delta_w, decayed = 0;
2788 	unsigned long scale_freq, scale_cpu;
2789 
2790 	delta = now - sa->last_update_time;
2791 	/*
2792 	 * This should only happen when time goes backwards, which it
2793 	 * unfortunately does during sched clock init when we swap over to TSC.
2794 	 */
2795 	if ((s64)delta < 0) {
2796 		sa->last_update_time = now;
2797 		return 0;
2798 	}
2799 
2800 	/*
2801 	 * Use 1024ns as the unit of measurement since it's a reasonable
2802 	 * approximation of 1us and fast to compute.
2803 	 */
2804 	delta >>= 10;
2805 	if (!delta)
2806 		return 0;
2807 	sa->last_update_time = now;
2808 
2809 	scale_freq = arch_scale_freq_capacity(NULL, cpu);
2810 	scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2811 
2812 	/* delta_w is the amount already accumulated against our next period */
2813 	delta_w = sa->period_contrib;
2814 	if (delta + delta_w >= 1024) {
2815 		decayed = 1;
2816 
2817 		/* how much left for next period will start over, we don't know yet */
2818 		sa->period_contrib = 0;
2819 
2820 		/*
2821 		 * Now that we know we're crossing a period boundary, figure
2822 		 * out how much from delta we need to complete the current
2823 		 * period and accrue it.
2824 		 */
2825 		delta_w = 1024 - delta_w;
2826 		scaled_delta_w = cap_scale(delta_w, scale_freq);
2827 		if (weight) {
2828 			sa->load_sum += weight * scaled_delta_w;
2829 			if (cfs_rq) {
2830 				cfs_rq->runnable_load_sum +=
2831 						weight * scaled_delta_w;
2832 			}
2833 		}
2834 		if (running)
2835 			sa->util_sum += scaled_delta_w * scale_cpu;
2836 
2837 		delta -= delta_w;
2838 
2839 		/* Figure out how many additional periods this update spans */
2840 		periods = delta / 1024;
2841 		delta %= 1024;
2842 
2843 		sa->load_sum = decay_load(sa->load_sum, periods + 1);
2844 		if (cfs_rq) {
2845 			cfs_rq->runnable_load_sum =
2846 				decay_load(cfs_rq->runnable_load_sum, periods + 1);
2847 		}
2848 		sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
2849 
2850 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
2851 		contrib = __compute_runnable_contrib(periods);
2852 		contrib = cap_scale(contrib, scale_freq);
2853 		if (weight) {
2854 			sa->load_sum += weight * contrib;
2855 			if (cfs_rq)
2856 				cfs_rq->runnable_load_sum += weight * contrib;
2857 		}
2858 		if (running)
2859 			sa->util_sum += contrib * scale_cpu;
2860 	}
2861 
2862 	/* Remainder of delta accrued against u_0` */
2863 	scaled_delta = cap_scale(delta, scale_freq);
2864 	if (weight) {
2865 		sa->load_sum += weight * scaled_delta;
2866 		if (cfs_rq)
2867 			cfs_rq->runnable_load_sum += weight * scaled_delta;
2868 	}
2869 	if (running)
2870 		sa->util_sum += scaled_delta * scale_cpu;
2871 
2872 	sa->period_contrib += delta;
2873 
2874 	if (decayed) {
2875 		sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2876 		if (cfs_rq) {
2877 			cfs_rq->runnable_load_avg =
2878 				div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2879 		}
2880 		sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2881 	}
2882 
2883 	return decayed;
2884 }
2885 
2886 #ifdef CONFIG_FAIR_GROUP_SCHED
2887 /**
2888  * update_tg_load_avg - update the tg's load avg
2889  * @cfs_rq: the cfs_rq whose avg changed
2890  * @force: update regardless of how small the difference
2891  *
2892  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
2893  * However, because tg->load_avg is a global value there are performance
2894  * considerations.
2895  *
2896  * In order to avoid having to look at the other cfs_rq's, we use a
2897  * differential update where we store the last value we propagated. This in
2898  * turn allows skipping updates if the differential is 'small'.
2899  *
2900  * Updating tg's load_avg is necessary before update_cfs_share() (which is
2901  * done) and effective_load() (which is not done because it is too costly).
2902  */
2903 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
2904 {
2905 	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
2906 
2907 	/*
2908 	 * No need to update load_avg for root_task_group as it is not used.
2909 	 */
2910 	if (cfs_rq->tg == &root_task_group)
2911 		return;
2912 
2913 	if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2914 		atomic_long_add(delta, &cfs_rq->tg->load_avg);
2915 		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
2916 	}
2917 }
2918 
2919 /*
2920  * Called within set_task_rq() right before setting a task's cpu. The
2921  * caller only guarantees p->pi_lock is held; no other assumptions,
2922  * including the state of rq->lock, should be made.
2923  */
2924 void set_task_rq_fair(struct sched_entity *se,
2925 		      struct cfs_rq *prev, struct cfs_rq *next)
2926 {
2927 	if (!sched_feat(ATTACH_AGE_LOAD))
2928 		return;
2929 
2930 	/*
2931 	 * We are supposed to update the task to "current" time, then its up to
2932 	 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
2933 	 * getting what current time is, so simply throw away the out-of-date
2934 	 * time. This will result in the wakee task is less decayed, but giving
2935 	 * the wakee more load sounds not bad.
2936 	 */
2937 	if (se->avg.last_update_time && prev) {
2938 		u64 p_last_update_time;
2939 		u64 n_last_update_time;
2940 
2941 #ifndef CONFIG_64BIT
2942 		u64 p_last_update_time_copy;
2943 		u64 n_last_update_time_copy;
2944 
2945 		do {
2946 			p_last_update_time_copy = prev->load_last_update_time_copy;
2947 			n_last_update_time_copy = next->load_last_update_time_copy;
2948 
2949 			smp_rmb();
2950 
2951 			p_last_update_time = prev->avg.last_update_time;
2952 			n_last_update_time = next->avg.last_update_time;
2953 
2954 		} while (p_last_update_time != p_last_update_time_copy ||
2955 			 n_last_update_time != n_last_update_time_copy);
2956 #else
2957 		p_last_update_time = prev->avg.last_update_time;
2958 		n_last_update_time = next->avg.last_update_time;
2959 #endif
2960 		__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
2961 				  &se->avg, 0, 0, NULL);
2962 		se->avg.last_update_time = n_last_update_time;
2963 	}
2964 }
2965 #else /* CONFIG_FAIR_GROUP_SCHED */
2966 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
2967 #endif /* CONFIG_FAIR_GROUP_SCHED */
2968 
2969 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
2970 {
2971 	if (&this_rq()->cfs == cfs_rq) {
2972 		/*
2973 		 * There are a few boundary cases this might miss but it should
2974 		 * get called often enough that that should (hopefully) not be
2975 		 * a real problem -- added to that it only calls on the local
2976 		 * CPU, so if we enqueue remotely we'll miss an update, but
2977 		 * the next tick/schedule should update.
2978 		 *
2979 		 * It will not get called when we go idle, because the idle
2980 		 * thread is a different class (!fair), nor will the utilization
2981 		 * number include things like RT tasks.
2982 		 *
2983 		 * As is, the util number is not freq-invariant (we'd have to
2984 		 * implement arch_scale_freq_capacity() for that).
2985 		 *
2986 		 * See cpu_util().
2987 		 */
2988 		cpufreq_update_util(rq_of(cfs_rq), 0);
2989 	}
2990 }
2991 
2992 /*
2993  * Unsigned subtract and clamp on underflow.
2994  *
2995  * Explicitly do a load-store to ensure the intermediate value never hits
2996  * memory. This allows lockless observations without ever seeing the negative
2997  * values.
2998  */
2999 #define sub_positive(_ptr, _val) do {				\
3000 	typeof(_ptr) ptr = (_ptr);				\
3001 	typeof(*ptr) val = (_val);				\
3002 	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
3003 	res = var - val;					\
3004 	if (res > var)						\
3005 		res = 0;					\
3006 	WRITE_ONCE(*ptr, res);					\
3007 } while (0)
3008 
3009 /**
3010  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3011  * @now: current time, as per cfs_rq_clock_task()
3012  * @cfs_rq: cfs_rq to update
3013  * @update_freq: should we call cfs_rq_util_change() or will the call do so
3014  *
3015  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3016  * avg. The immediate corollary is that all (fair) tasks must be attached, see
3017  * post_init_entity_util_avg().
3018  *
3019  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3020  *
3021  * Returns true if the load decayed or we removed load.
3022  *
3023  * Since both these conditions indicate a changed cfs_rq->avg.load we should
3024  * call update_tg_load_avg() when this function returns true.
3025  */
3026 static inline int
3027 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3028 {
3029 	struct sched_avg *sa = &cfs_rq->avg;
3030 	int decayed, removed_load = 0, removed_util = 0;
3031 
3032 	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
3033 		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
3034 		sub_positive(&sa->load_avg, r);
3035 		sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
3036 		removed_load = 1;
3037 	}
3038 
3039 	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
3040 		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
3041 		sub_positive(&sa->util_avg, r);
3042 		sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
3043 		removed_util = 1;
3044 	}
3045 
3046 	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
3047 		scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
3048 
3049 #ifndef CONFIG_64BIT
3050 	smp_wmb();
3051 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
3052 #endif
3053 
3054 	if (update_freq && (decayed || removed_util))
3055 		cfs_rq_util_change(cfs_rq);
3056 
3057 	return decayed || removed_load;
3058 }
3059 
3060 /* Update task and its cfs_rq load average */
3061 static inline void update_load_avg(struct sched_entity *se, int update_tg)
3062 {
3063 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3064 	u64 now = cfs_rq_clock_task(cfs_rq);
3065 	struct rq *rq = rq_of(cfs_rq);
3066 	int cpu = cpu_of(rq);
3067 
3068 	/*
3069 	 * Track task load average for carrying it to new CPU after migrated, and
3070 	 * track group sched_entity load average for task_h_load calc in migration
3071 	 */
3072 	__update_load_avg(now, cpu, &se->avg,
3073 			  se->on_rq * scale_load_down(se->load.weight),
3074 			  cfs_rq->curr == se, NULL);
3075 
3076 	if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
3077 		update_tg_load_avg(cfs_rq, 0);
3078 }
3079 
3080 /**
3081  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3082  * @cfs_rq: cfs_rq to attach to
3083  * @se: sched_entity to attach
3084  *
3085  * Must call update_cfs_rq_load_avg() before this, since we rely on
3086  * cfs_rq->avg.last_update_time being current.
3087  */
3088 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3089 {
3090 	if (!sched_feat(ATTACH_AGE_LOAD))
3091 		goto skip_aging;
3092 
3093 	/*
3094 	 * If we got migrated (either between CPUs or between cgroups) we'll
3095 	 * have aged the average right before clearing @last_update_time.
3096 	 *
3097 	 * Or we're fresh through post_init_entity_util_avg().
3098 	 */
3099 	if (se->avg.last_update_time) {
3100 		__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3101 				  &se->avg, 0, 0, NULL);
3102 
3103 		/*
3104 		 * XXX: we could have just aged the entire load away if we've been
3105 		 * absent from the fair class for too long.
3106 		 */
3107 	}
3108 
3109 skip_aging:
3110 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
3111 	cfs_rq->avg.load_avg += se->avg.load_avg;
3112 	cfs_rq->avg.load_sum += se->avg.load_sum;
3113 	cfs_rq->avg.util_avg += se->avg.util_avg;
3114 	cfs_rq->avg.util_sum += se->avg.util_sum;
3115 
3116 	cfs_rq_util_change(cfs_rq);
3117 }
3118 
3119 /**
3120  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3121  * @cfs_rq: cfs_rq to detach from
3122  * @se: sched_entity to detach
3123  *
3124  * Must call update_cfs_rq_load_avg() before this, since we rely on
3125  * cfs_rq->avg.last_update_time being current.
3126  */
3127 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3128 {
3129 	__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3130 			  &se->avg, se->on_rq * scale_load_down(se->load.weight),
3131 			  cfs_rq->curr == se, NULL);
3132 
3133 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3134 	sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3135 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3136 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3137 
3138 	cfs_rq_util_change(cfs_rq);
3139 }
3140 
3141 /* Add the load generated by se into cfs_rq's load average */
3142 static inline void
3143 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3144 {
3145 	struct sched_avg *sa = &se->avg;
3146 	u64 now = cfs_rq_clock_task(cfs_rq);
3147 	int migrated, decayed;
3148 
3149 	migrated = !sa->last_update_time;
3150 	if (!migrated) {
3151 		__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
3152 			se->on_rq * scale_load_down(se->load.weight),
3153 			cfs_rq->curr == se, NULL);
3154 	}
3155 
3156 	decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
3157 
3158 	cfs_rq->runnable_load_avg += sa->load_avg;
3159 	cfs_rq->runnable_load_sum += sa->load_sum;
3160 
3161 	if (migrated)
3162 		attach_entity_load_avg(cfs_rq, se);
3163 
3164 	if (decayed || migrated)
3165 		update_tg_load_avg(cfs_rq, 0);
3166 }
3167 
3168 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
3169 static inline void
3170 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3171 {
3172 	update_load_avg(se, 1);
3173 
3174 	cfs_rq->runnable_load_avg =
3175 		max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3176 	cfs_rq->runnable_load_sum =
3177 		max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
3178 }
3179 
3180 #ifndef CONFIG_64BIT
3181 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3182 {
3183 	u64 last_update_time_copy;
3184 	u64 last_update_time;
3185 
3186 	do {
3187 		last_update_time_copy = cfs_rq->load_last_update_time_copy;
3188 		smp_rmb();
3189 		last_update_time = cfs_rq->avg.last_update_time;
3190 	} while (last_update_time != last_update_time_copy);
3191 
3192 	return last_update_time;
3193 }
3194 #else
3195 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3196 {
3197 	return cfs_rq->avg.last_update_time;
3198 }
3199 #endif
3200 
3201 /*
3202  * Task first catches up with cfs_rq, and then subtract
3203  * itself from the cfs_rq (task must be off the queue now).
3204  */
3205 void remove_entity_load_avg(struct sched_entity *se)
3206 {
3207 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3208 	u64 last_update_time;
3209 
3210 	/*
3211 	 * tasks cannot exit without having gone through wake_up_new_task() ->
3212 	 * post_init_entity_util_avg() which will have added things to the
3213 	 * cfs_rq, so we can remove unconditionally.
3214 	 *
3215 	 * Similarly for groups, they will have passed through
3216 	 * post_init_entity_util_avg() before unregister_sched_fair_group()
3217 	 * calls this.
3218 	 */
3219 
3220 	last_update_time = cfs_rq_last_update_time(cfs_rq);
3221 
3222 	__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
3223 	atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3224 	atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3225 }
3226 
3227 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3228 {
3229 	return cfs_rq->runnable_load_avg;
3230 }
3231 
3232 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3233 {
3234 	return cfs_rq->avg.load_avg;
3235 }
3236 
3237 static int idle_balance(struct rq *this_rq);
3238 
3239 #else /* CONFIG_SMP */
3240 
3241 static inline int
3242 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3243 {
3244 	return 0;
3245 }
3246 
3247 static inline void update_load_avg(struct sched_entity *se, int not_used)
3248 {
3249 	cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
3250 }
3251 
3252 static inline void
3253 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3254 static inline void
3255 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3256 static inline void remove_entity_load_avg(struct sched_entity *se) {}
3257 
3258 static inline void
3259 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3260 static inline void
3261 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3262 
3263 static inline int idle_balance(struct rq *rq)
3264 {
3265 	return 0;
3266 }
3267 
3268 #endif /* CONFIG_SMP */
3269 
3270 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3271 {
3272 #ifdef CONFIG_SCHED_DEBUG
3273 	s64 d = se->vruntime - cfs_rq->min_vruntime;
3274 
3275 	if (d < 0)
3276 		d = -d;
3277 
3278 	if (d > 3*sysctl_sched_latency)
3279 		schedstat_inc(cfs_rq->nr_spread_over);
3280 #endif
3281 }
3282 
3283 static void
3284 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3285 {
3286 	u64 vruntime = cfs_rq->min_vruntime;
3287 
3288 	/*
3289 	 * The 'current' period is already promised to the current tasks,
3290 	 * however the extra weight of the new task will slow them down a
3291 	 * little, place the new task so that it fits in the slot that
3292 	 * stays open at the end.
3293 	 */
3294 	if (initial && sched_feat(START_DEBIT))
3295 		vruntime += sched_vslice(cfs_rq, se);
3296 
3297 	/* sleeps up to a single latency don't count. */
3298 	if (!initial) {
3299 		unsigned long thresh = sysctl_sched_latency;
3300 
3301 		/*
3302 		 * Halve their sleep time's effect, to allow
3303 		 * for a gentler effect of sleepers:
3304 		 */
3305 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
3306 			thresh >>= 1;
3307 
3308 		vruntime -= thresh;
3309 	}
3310 
3311 	/* ensure we never gain time by being placed backwards. */
3312 	se->vruntime = max_vruntime(se->vruntime, vruntime);
3313 }
3314 
3315 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3316 
3317 static inline void check_schedstat_required(void)
3318 {
3319 #ifdef CONFIG_SCHEDSTATS
3320 	if (schedstat_enabled())
3321 		return;
3322 
3323 	/* Force schedstat enabled if a dependent tracepoint is active */
3324 	if (trace_sched_stat_wait_enabled()    ||
3325 			trace_sched_stat_sleep_enabled()   ||
3326 			trace_sched_stat_iowait_enabled()  ||
3327 			trace_sched_stat_blocked_enabled() ||
3328 			trace_sched_stat_runtime_enabled())  {
3329 		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3330 			     "stat_blocked and stat_runtime require the "
3331 			     "kernel parameter schedstats=enabled or "
3332 			     "kernel.sched_schedstats=1\n");
3333 	}
3334 #endif
3335 }
3336 
3337 
3338 /*
3339  * MIGRATION
3340  *
3341  *	dequeue
3342  *	  update_curr()
3343  *	    update_min_vruntime()
3344  *	  vruntime -= min_vruntime
3345  *
3346  *	enqueue
3347  *	  update_curr()
3348  *	    update_min_vruntime()
3349  *	  vruntime += min_vruntime
3350  *
3351  * this way the vruntime transition between RQs is done when both
3352  * min_vruntime are up-to-date.
3353  *
3354  * WAKEUP (remote)
3355  *
3356  *	->migrate_task_rq_fair() (p->state == TASK_WAKING)
3357  *	  vruntime -= min_vruntime
3358  *
3359  *	enqueue
3360  *	  update_curr()
3361  *	    update_min_vruntime()
3362  *	  vruntime += min_vruntime
3363  *
3364  * this way we don't have the most up-to-date min_vruntime on the originating
3365  * CPU and an up-to-date min_vruntime on the destination CPU.
3366  */
3367 
3368 static void
3369 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3370 {
3371 	bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3372 	bool curr = cfs_rq->curr == se;
3373 
3374 	/*
3375 	 * If we're the current task, we must renormalise before calling
3376 	 * update_curr().
3377 	 */
3378 	if (renorm && curr)
3379 		se->vruntime += cfs_rq->min_vruntime;
3380 
3381 	update_curr(cfs_rq);
3382 
3383 	/*
3384 	 * Otherwise, renormalise after, such that we're placed at the current
3385 	 * moment in time, instead of some random moment in the past. Being
3386 	 * placed in the past could significantly boost this task to the
3387 	 * fairness detriment of existing tasks.
3388 	 */
3389 	if (renorm && !curr)
3390 		se->vruntime += cfs_rq->min_vruntime;
3391 
3392 	enqueue_entity_load_avg(cfs_rq, se);
3393 	account_entity_enqueue(cfs_rq, se);
3394 	update_cfs_shares(cfs_rq);
3395 
3396 	if (flags & ENQUEUE_WAKEUP)
3397 		place_entity(cfs_rq, se, 0);
3398 
3399 	check_schedstat_required();
3400 	update_stats_enqueue(cfs_rq, se, flags);
3401 	check_spread(cfs_rq, se);
3402 	if (!curr)
3403 		__enqueue_entity(cfs_rq, se);
3404 	se->on_rq = 1;
3405 
3406 	if (cfs_rq->nr_running == 1) {
3407 		list_add_leaf_cfs_rq(cfs_rq);
3408 		check_enqueue_throttle(cfs_rq);
3409 	}
3410 }
3411 
3412 static void __clear_buddies_last(struct sched_entity *se)
3413 {
3414 	for_each_sched_entity(se) {
3415 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
3416 		if (cfs_rq->last != se)
3417 			break;
3418 
3419 		cfs_rq->last = NULL;
3420 	}
3421 }
3422 
3423 static void __clear_buddies_next(struct sched_entity *se)
3424 {
3425 	for_each_sched_entity(se) {
3426 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
3427 		if (cfs_rq->next != se)
3428 			break;
3429 
3430 		cfs_rq->next = NULL;
3431 	}
3432 }
3433 
3434 static void __clear_buddies_skip(struct sched_entity *se)
3435 {
3436 	for_each_sched_entity(se) {
3437 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
3438 		if (cfs_rq->skip != se)
3439 			break;
3440 
3441 		cfs_rq->skip = NULL;
3442 	}
3443 }
3444 
3445 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3446 {
3447 	if (cfs_rq->last == se)
3448 		__clear_buddies_last(se);
3449 
3450 	if (cfs_rq->next == se)
3451 		__clear_buddies_next(se);
3452 
3453 	if (cfs_rq->skip == se)
3454 		__clear_buddies_skip(se);
3455 }
3456 
3457 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3458 
3459 static void
3460 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3461 {
3462 	/*
3463 	 * Update run-time statistics of the 'current'.
3464 	 */
3465 	update_curr(cfs_rq);
3466 	dequeue_entity_load_avg(cfs_rq, se);
3467 
3468 	update_stats_dequeue(cfs_rq, se, flags);
3469 
3470 	clear_buddies(cfs_rq, se);
3471 
3472 	if (se != cfs_rq->curr)
3473 		__dequeue_entity(cfs_rq, se);
3474 	se->on_rq = 0;
3475 	account_entity_dequeue(cfs_rq, se);
3476 
3477 	/*
3478 	 * Normalize after update_curr(); which will also have moved
3479 	 * min_vruntime if @se is the one holding it back. But before doing
3480 	 * update_min_vruntime() again, which will discount @se's position and
3481 	 * can move min_vruntime forward still more.
3482 	 */
3483 	if (!(flags & DEQUEUE_SLEEP))
3484 		se->vruntime -= cfs_rq->min_vruntime;
3485 
3486 	/* return excess runtime on last dequeue */
3487 	return_cfs_rq_runtime(cfs_rq);
3488 
3489 	update_cfs_shares(cfs_rq);
3490 
3491 	/*
3492 	 * Now advance min_vruntime if @se was the entity holding it back,
3493 	 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3494 	 * put back on, and if we advance min_vruntime, we'll be placed back
3495 	 * further than we started -- ie. we'll be penalized.
3496 	 */
3497 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3498 		update_min_vruntime(cfs_rq);
3499 }
3500 
3501 /*
3502  * Preempt the current task with a newly woken task if needed:
3503  */
3504 static void
3505 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3506 {
3507 	unsigned long ideal_runtime, delta_exec;
3508 	struct sched_entity *se;
3509 	s64 delta;
3510 
3511 	ideal_runtime = sched_slice(cfs_rq, curr);
3512 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3513 	if (delta_exec > ideal_runtime) {
3514 		resched_curr(rq_of(cfs_rq));
3515 		/*
3516 		 * The current task ran long enough, ensure it doesn't get
3517 		 * re-elected due to buddy favours.
3518 		 */
3519 		clear_buddies(cfs_rq, curr);
3520 		return;
3521 	}
3522 
3523 	/*
3524 	 * Ensure that a task that missed wakeup preemption by a
3525 	 * narrow margin doesn't have to wait for a full slice.
3526 	 * This also mitigates buddy induced latencies under load.
3527 	 */
3528 	if (delta_exec < sysctl_sched_min_granularity)
3529 		return;
3530 
3531 	se = __pick_first_entity(cfs_rq);
3532 	delta = curr->vruntime - se->vruntime;
3533 
3534 	if (delta < 0)
3535 		return;
3536 
3537 	if (delta > ideal_runtime)
3538 		resched_curr(rq_of(cfs_rq));
3539 }
3540 
3541 static void
3542 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3543 {
3544 	/* 'current' is not kept within the tree. */
3545 	if (se->on_rq) {
3546 		/*
3547 		 * Any task has to be enqueued before it get to execute on
3548 		 * a CPU. So account for the time it spent waiting on the
3549 		 * runqueue.
3550 		 */
3551 		update_stats_wait_end(cfs_rq, se);
3552 		__dequeue_entity(cfs_rq, se);
3553 		update_load_avg(se, 1);
3554 	}
3555 
3556 	update_stats_curr_start(cfs_rq, se);
3557 	cfs_rq->curr = se;
3558 
3559 	/*
3560 	 * Track our maximum slice length, if the CPU's load is at
3561 	 * least twice that of our own weight (i.e. dont track it
3562 	 * when there are only lesser-weight tasks around):
3563 	 */
3564 	if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3565 		schedstat_set(se->statistics.slice_max,
3566 			max((u64)schedstat_val(se->statistics.slice_max),
3567 			    se->sum_exec_runtime - se->prev_sum_exec_runtime));
3568 	}
3569 
3570 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
3571 }
3572 
3573 static int
3574 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3575 
3576 /*
3577  * Pick the next process, keeping these things in mind, in this order:
3578  * 1) keep things fair between processes/task groups
3579  * 2) pick the "next" process, since someone really wants that to run
3580  * 3) pick the "last" process, for cache locality
3581  * 4) do not run the "skip" process, if something else is available
3582  */
3583 static struct sched_entity *
3584 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3585 {
3586 	struct sched_entity *left = __pick_first_entity(cfs_rq);
3587 	struct sched_entity *se;
3588 
3589 	/*
3590 	 * If curr is set we have to see if its left of the leftmost entity
3591 	 * still in the tree, provided there was anything in the tree at all.
3592 	 */
3593 	if (!left || (curr && entity_before(curr, left)))
3594 		left = curr;
3595 
3596 	se = left; /* ideally we run the leftmost entity */
3597 
3598 	/*
3599 	 * Avoid running the skip buddy, if running something else can
3600 	 * be done without getting too unfair.
3601 	 */
3602 	if (cfs_rq->skip == se) {
3603 		struct sched_entity *second;
3604 
3605 		if (se == curr) {
3606 			second = __pick_first_entity(cfs_rq);
3607 		} else {
3608 			second = __pick_next_entity(se);
3609 			if (!second || (curr && entity_before(curr, second)))
3610 				second = curr;
3611 		}
3612 
3613 		if (second && wakeup_preempt_entity(second, left) < 1)
3614 			se = second;
3615 	}
3616 
3617 	/*
3618 	 * Prefer last buddy, try to return the CPU to a preempted task.
3619 	 */
3620 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3621 		se = cfs_rq->last;
3622 
3623 	/*
3624 	 * Someone really wants this to run. If it's not unfair, run it.
3625 	 */
3626 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3627 		se = cfs_rq->next;
3628 
3629 	clear_buddies(cfs_rq, se);
3630 
3631 	return se;
3632 }
3633 
3634 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3635 
3636 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3637 {
3638 	/*
3639 	 * If still on the runqueue then deactivate_task()
3640 	 * was not called and update_curr() has to be done:
3641 	 */
3642 	if (prev->on_rq)
3643 		update_curr(cfs_rq);
3644 
3645 	/* throttle cfs_rqs exceeding runtime */
3646 	check_cfs_rq_runtime(cfs_rq);
3647 
3648 	check_spread(cfs_rq, prev);
3649 
3650 	if (prev->on_rq) {
3651 		update_stats_wait_start(cfs_rq, prev);
3652 		/* Put 'current' back into the tree. */
3653 		__enqueue_entity(cfs_rq, prev);
3654 		/* in !on_rq case, update occurred at dequeue */
3655 		update_load_avg(prev, 0);
3656 	}
3657 	cfs_rq->curr = NULL;
3658 }
3659 
3660 static void
3661 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3662 {
3663 	/*
3664 	 * Update run-time statistics of the 'current'.
3665 	 */
3666 	update_curr(cfs_rq);
3667 
3668 	/*
3669 	 * Ensure that runnable average is periodically updated.
3670 	 */
3671 	update_load_avg(curr, 1);
3672 	update_cfs_shares(cfs_rq);
3673 
3674 #ifdef CONFIG_SCHED_HRTICK
3675 	/*
3676 	 * queued ticks are scheduled to match the slice, so don't bother
3677 	 * validating it and just reschedule.
3678 	 */
3679 	if (queued) {
3680 		resched_curr(rq_of(cfs_rq));
3681 		return;
3682 	}
3683 	/*
3684 	 * don't let the period tick interfere with the hrtick preemption
3685 	 */
3686 	if (!sched_feat(DOUBLE_TICK) &&
3687 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3688 		return;
3689 #endif
3690 
3691 	if (cfs_rq->nr_running > 1)
3692 		check_preempt_tick(cfs_rq, curr);
3693 }
3694 
3695 
3696 /**************************************************
3697  * CFS bandwidth control machinery
3698  */
3699 
3700 #ifdef CONFIG_CFS_BANDWIDTH
3701 
3702 #ifdef HAVE_JUMP_LABEL
3703 static struct static_key __cfs_bandwidth_used;
3704 
3705 static inline bool cfs_bandwidth_used(void)
3706 {
3707 	return static_key_false(&__cfs_bandwidth_used);
3708 }
3709 
3710 void cfs_bandwidth_usage_inc(void)
3711 {
3712 	static_key_slow_inc(&__cfs_bandwidth_used);
3713 }
3714 
3715 void cfs_bandwidth_usage_dec(void)
3716 {
3717 	static_key_slow_dec(&__cfs_bandwidth_used);
3718 }
3719 #else /* HAVE_JUMP_LABEL */
3720 static bool cfs_bandwidth_used(void)
3721 {
3722 	return true;
3723 }
3724 
3725 void cfs_bandwidth_usage_inc(void) {}
3726 void cfs_bandwidth_usage_dec(void) {}
3727 #endif /* HAVE_JUMP_LABEL */
3728 
3729 /*
3730  * default period for cfs group bandwidth.
3731  * default: 0.1s, units: nanoseconds
3732  */
3733 static inline u64 default_cfs_period(void)
3734 {
3735 	return 100000000ULL;
3736 }
3737 
3738 static inline u64 sched_cfs_bandwidth_slice(void)
3739 {
3740 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3741 }
3742 
3743 /*
3744  * Replenish runtime according to assigned quota and update expiration time.
3745  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3746  * additional synchronization around rq->lock.
3747  *
3748  * requires cfs_b->lock
3749  */
3750 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3751 {
3752 	u64 now;
3753 
3754 	if (cfs_b->quota == RUNTIME_INF)
3755 		return;
3756 
3757 	now = sched_clock_cpu(smp_processor_id());
3758 	cfs_b->runtime = cfs_b->quota;
3759 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3760 }
3761 
3762 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3763 {
3764 	return &tg->cfs_bandwidth;
3765 }
3766 
3767 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3768 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3769 {
3770 	if (unlikely(cfs_rq->throttle_count))
3771 		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
3772 
3773 	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3774 }
3775 
3776 /* returns 0 on failure to allocate runtime */
3777 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3778 {
3779 	struct task_group *tg = cfs_rq->tg;
3780 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3781 	u64 amount = 0, min_amount, expires;
3782 
3783 	/* note: this is a positive sum as runtime_remaining <= 0 */
3784 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3785 
3786 	raw_spin_lock(&cfs_b->lock);
3787 	if (cfs_b->quota == RUNTIME_INF)
3788 		amount = min_amount;
3789 	else {
3790 		start_cfs_bandwidth(cfs_b);
3791 
3792 		if (cfs_b->runtime > 0) {
3793 			amount = min(cfs_b->runtime, min_amount);
3794 			cfs_b->runtime -= amount;
3795 			cfs_b->idle = 0;
3796 		}
3797 	}
3798 	expires = cfs_b->runtime_expires;
3799 	raw_spin_unlock(&cfs_b->lock);
3800 
3801 	cfs_rq->runtime_remaining += amount;
3802 	/*
3803 	 * we may have advanced our local expiration to account for allowed
3804 	 * spread between our sched_clock and the one on which runtime was
3805 	 * issued.
3806 	 */
3807 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3808 		cfs_rq->runtime_expires = expires;
3809 
3810 	return cfs_rq->runtime_remaining > 0;
3811 }
3812 
3813 /*
3814  * Note: This depends on the synchronization provided by sched_clock and the
3815  * fact that rq->clock snapshots this value.
3816  */
3817 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3818 {
3819 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3820 
3821 	/* if the deadline is ahead of our clock, nothing to do */
3822 	if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3823 		return;
3824 
3825 	if (cfs_rq->runtime_remaining < 0)
3826 		return;
3827 
3828 	/*
3829 	 * If the local deadline has passed we have to consider the
3830 	 * possibility that our sched_clock is 'fast' and the global deadline
3831 	 * has not truly expired.
3832 	 *
3833 	 * Fortunately we can check determine whether this the case by checking
3834 	 * whether the global deadline has advanced. It is valid to compare
3835 	 * cfs_b->runtime_expires without any locks since we only care about
3836 	 * exact equality, so a partial write will still work.
3837 	 */
3838 
3839 	if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3840 		/* extend local deadline, drift is bounded above by 2 ticks */
3841 		cfs_rq->runtime_expires += TICK_NSEC;
3842 	} else {
3843 		/* global deadline is ahead, expiration has passed */
3844 		cfs_rq->runtime_remaining = 0;
3845 	}
3846 }
3847 
3848 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3849 {
3850 	/* dock delta_exec before expiring quota (as it could span periods) */
3851 	cfs_rq->runtime_remaining -= delta_exec;
3852 	expire_cfs_rq_runtime(cfs_rq);
3853 
3854 	if (likely(cfs_rq->runtime_remaining > 0))
3855 		return;
3856 
3857 	/*
3858 	 * if we're unable to extend our runtime we resched so that the active
3859 	 * hierarchy can be throttled
3860 	 */
3861 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3862 		resched_curr(rq_of(cfs_rq));
3863 }
3864 
3865 static __always_inline
3866 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3867 {
3868 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3869 		return;
3870 
3871 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
3872 }
3873 
3874 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3875 {
3876 	return cfs_bandwidth_used() && cfs_rq->throttled;
3877 }
3878 
3879 /* check whether cfs_rq, or any parent, is throttled */
3880 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3881 {
3882 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
3883 }
3884 
3885 /*
3886  * Ensure that neither of the group entities corresponding to src_cpu or
3887  * dest_cpu are members of a throttled hierarchy when performing group
3888  * load-balance operations.
3889  */
3890 static inline int throttled_lb_pair(struct task_group *tg,
3891 				    int src_cpu, int dest_cpu)
3892 {
3893 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3894 
3895 	src_cfs_rq = tg->cfs_rq[src_cpu];
3896 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
3897 
3898 	return throttled_hierarchy(src_cfs_rq) ||
3899 	       throttled_hierarchy(dest_cfs_rq);
3900 }
3901 
3902 /* updated child weight may affect parent so we have to do this bottom up */
3903 static int tg_unthrottle_up(struct task_group *tg, void *data)
3904 {
3905 	struct rq *rq = data;
3906 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3907 
3908 	cfs_rq->throttle_count--;
3909 	if (!cfs_rq->throttle_count) {
3910 		/* adjust cfs_rq_clock_task() */
3911 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3912 					     cfs_rq->throttled_clock_task;
3913 	}
3914 
3915 	return 0;
3916 }
3917 
3918 static int tg_throttle_down(struct task_group *tg, void *data)
3919 {
3920 	struct rq *rq = data;
3921 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3922 
3923 	/* group is entering throttled state, stop time */
3924 	if (!cfs_rq->throttle_count)
3925 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
3926 	cfs_rq->throttle_count++;
3927 
3928 	return 0;
3929 }
3930 
3931 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3932 {
3933 	struct rq *rq = rq_of(cfs_rq);
3934 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3935 	struct sched_entity *se;
3936 	long task_delta, dequeue = 1;
3937 	bool empty;
3938 
3939 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3940 
3941 	/* freeze hierarchy runnable averages while throttled */
3942 	rcu_read_lock();
3943 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3944 	rcu_read_unlock();
3945 
3946 	task_delta = cfs_rq->h_nr_running;
3947 	for_each_sched_entity(se) {
3948 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3949 		/* throttled entity or throttle-on-deactivate */
3950 		if (!se->on_rq)
3951 			break;
3952 
3953 		if (dequeue)
3954 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3955 		qcfs_rq->h_nr_running -= task_delta;
3956 
3957 		if (qcfs_rq->load.weight)
3958 			dequeue = 0;
3959 	}
3960 
3961 	if (!se)
3962 		sub_nr_running(rq, task_delta);
3963 
3964 	cfs_rq->throttled = 1;
3965 	cfs_rq->throttled_clock = rq_clock(rq);
3966 	raw_spin_lock(&cfs_b->lock);
3967 	empty = list_empty(&cfs_b->throttled_cfs_rq);
3968 
3969 	/*
3970 	 * Add to the _head_ of the list, so that an already-started
3971 	 * distribute_cfs_runtime will not see us
3972 	 */
3973 	list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3974 
3975 	/*
3976 	 * If we're the first throttled task, make sure the bandwidth
3977 	 * timer is running.
3978 	 */
3979 	if (empty)
3980 		start_cfs_bandwidth(cfs_b);
3981 
3982 	raw_spin_unlock(&cfs_b->lock);
3983 }
3984 
3985 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3986 {
3987 	struct rq *rq = rq_of(cfs_rq);
3988 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3989 	struct sched_entity *se;
3990 	int enqueue = 1;
3991 	long task_delta;
3992 
3993 	se = cfs_rq->tg->se[cpu_of(rq)];
3994 
3995 	cfs_rq->throttled = 0;
3996 
3997 	update_rq_clock(rq);
3998 
3999 	raw_spin_lock(&cfs_b->lock);
4000 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
4001 	list_del_rcu(&cfs_rq->throttled_list);
4002 	raw_spin_unlock(&cfs_b->lock);
4003 
4004 	/* update hierarchical throttle state */
4005 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4006 
4007 	if (!cfs_rq->load.weight)
4008 		return;
4009 
4010 	task_delta = cfs_rq->h_nr_running;
4011 	for_each_sched_entity(se) {
4012 		if (se->on_rq)
4013 			enqueue = 0;
4014 
4015 		cfs_rq = cfs_rq_of(se);
4016 		if (enqueue)
4017 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4018 		cfs_rq->h_nr_running += task_delta;
4019 
4020 		if (cfs_rq_throttled(cfs_rq))
4021 			break;
4022 	}
4023 
4024 	if (!se)
4025 		add_nr_running(rq, task_delta);
4026 
4027 	/* determine whether we need to wake up potentially idle cpu */
4028 	if (rq->curr == rq->idle && rq->cfs.nr_running)
4029 		resched_curr(rq);
4030 }
4031 
4032 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4033 		u64 remaining, u64 expires)
4034 {
4035 	struct cfs_rq *cfs_rq;
4036 	u64 runtime;
4037 	u64 starting_runtime = remaining;
4038 
4039 	rcu_read_lock();
4040 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4041 				throttled_list) {
4042 		struct rq *rq = rq_of(cfs_rq);
4043 
4044 		raw_spin_lock(&rq->lock);
4045 		if (!cfs_rq_throttled(cfs_rq))
4046 			goto next;
4047 
4048 		runtime = -cfs_rq->runtime_remaining + 1;
4049 		if (runtime > remaining)
4050 			runtime = remaining;
4051 		remaining -= runtime;
4052 
4053 		cfs_rq->runtime_remaining += runtime;
4054 		cfs_rq->runtime_expires = expires;
4055 
4056 		/* we check whether we're throttled above */
4057 		if (cfs_rq->runtime_remaining > 0)
4058 			unthrottle_cfs_rq(cfs_rq);
4059 
4060 next:
4061 		raw_spin_unlock(&rq->lock);
4062 
4063 		if (!remaining)
4064 			break;
4065 	}
4066 	rcu_read_unlock();
4067 
4068 	return starting_runtime - remaining;
4069 }
4070 
4071 /*
4072  * Responsible for refilling a task_group's bandwidth and unthrottling its
4073  * cfs_rqs as appropriate. If there has been no activity within the last
4074  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4075  * used to track this state.
4076  */
4077 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4078 {
4079 	u64 runtime, runtime_expires;
4080 	int throttled;
4081 
4082 	/* no need to continue the timer with no bandwidth constraint */
4083 	if (cfs_b->quota == RUNTIME_INF)
4084 		goto out_deactivate;
4085 
4086 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4087 	cfs_b->nr_periods += overrun;
4088 
4089 	/*
4090 	 * idle depends on !throttled (for the case of a large deficit), and if
4091 	 * we're going inactive then everything else can be deferred
4092 	 */
4093 	if (cfs_b->idle && !throttled)
4094 		goto out_deactivate;
4095 
4096 	__refill_cfs_bandwidth_runtime(cfs_b);
4097 
4098 	if (!throttled) {
4099 		/* mark as potentially idle for the upcoming period */
4100 		cfs_b->idle = 1;
4101 		return 0;
4102 	}
4103 
4104 	/* account preceding periods in which throttling occurred */
4105 	cfs_b->nr_throttled += overrun;
4106 
4107 	runtime_expires = cfs_b->runtime_expires;
4108 
4109 	/*
4110 	 * This check is repeated as we are holding onto the new bandwidth while
4111 	 * we unthrottle. This can potentially race with an unthrottled group
4112 	 * trying to acquire new bandwidth from the global pool. This can result
4113 	 * in us over-using our runtime if it is all used during this loop, but
4114 	 * only by limited amounts in that extreme case.
4115 	 */
4116 	while (throttled && cfs_b->runtime > 0) {
4117 		runtime = cfs_b->runtime;
4118 		raw_spin_unlock(&cfs_b->lock);
4119 		/* we can't nest cfs_b->lock while distributing bandwidth */
4120 		runtime = distribute_cfs_runtime(cfs_b, runtime,
4121 						 runtime_expires);
4122 		raw_spin_lock(&cfs_b->lock);
4123 
4124 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4125 
4126 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
4127 	}
4128 
4129 	/*
4130 	 * While we are ensured activity in the period following an
4131 	 * unthrottle, this also covers the case in which the new bandwidth is
4132 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
4133 	 * timer to remain active while there are any throttled entities.)
4134 	 */
4135 	cfs_b->idle = 0;
4136 
4137 	return 0;
4138 
4139 out_deactivate:
4140 	return 1;
4141 }
4142 
4143 /* a cfs_rq won't donate quota below this amount */
4144 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4145 /* minimum remaining period time to redistribute slack quota */
4146 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4147 /* how long we wait to gather additional slack before distributing */
4148 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4149 
4150 /*
4151  * Are we near the end of the current quota period?
4152  *
4153  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4154  * hrtimer base being cleared by hrtimer_start. In the case of
4155  * migrate_hrtimers, base is never cleared, so we are fine.
4156  */
4157 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4158 {
4159 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
4160 	u64 remaining;
4161 
4162 	/* if the call-back is running a quota refresh is already occurring */
4163 	if (hrtimer_callback_running(refresh_timer))
4164 		return 1;
4165 
4166 	/* is a quota refresh about to occur? */
4167 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4168 	if (remaining < min_expire)
4169 		return 1;
4170 
4171 	return 0;
4172 }
4173 
4174 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4175 {
4176 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4177 
4178 	/* if there's a quota refresh soon don't bother with slack */
4179 	if (runtime_refresh_within(cfs_b, min_left))
4180 		return;
4181 
4182 	hrtimer_start(&cfs_b->slack_timer,
4183 			ns_to_ktime(cfs_bandwidth_slack_period),
4184 			HRTIMER_MODE_REL);
4185 }
4186 
4187 /* we know any runtime found here is valid as update_curr() precedes return */
4188 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4189 {
4190 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4191 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4192 
4193 	if (slack_runtime <= 0)
4194 		return;
4195 
4196 	raw_spin_lock(&cfs_b->lock);
4197 	if (cfs_b->quota != RUNTIME_INF &&
4198 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4199 		cfs_b->runtime += slack_runtime;
4200 
4201 		/* we are under rq->lock, defer unthrottling using a timer */
4202 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4203 		    !list_empty(&cfs_b->throttled_cfs_rq))
4204 			start_cfs_slack_bandwidth(cfs_b);
4205 	}
4206 	raw_spin_unlock(&cfs_b->lock);
4207 
4208 	/* even if it's not valid for return we don't want to try again */
4209 	cfs_rq->runtime_remaining -= slack_runtime;
4210 }
4211 
4212 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4213 {
4214 	if (!cfs_bandwidth_used())
4215 		return;
4216 
4217 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4218 		return;
4219 
4220 	__return_cfs_rq_runtime(cfs_rq);
4221 }
4222 
4223 /*
4224  * This is done with a timer (instead of inline with bandwidth return) since
4225  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4226  */
4227 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4228 {
4229 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4230 	u64 expires;
4231 
4232 	/* confirm we're still not at a refresh boundary */
4233 	raw_spin_lock(&cfs_b->lock);
4234 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4235 		raw_spin_unlock(&cfs_b->lock);
4236 		return;
4237 	}
4238 
4239 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4240 		runtime = cfs_b->runtime;
4241 
4242 	expires = cfs_b->runtime_expires;
4243 	raw_spin_unlock(&cfs_b->lock);
4244 
4245 	if (!runtime)
4246 		return;
4247 
4248 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4249 
4250 	raw_spin_lock(&cfs_b->lock);
4251 	if (expires == cfs_b->runtime_expires)
4252 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
4253 	raw_spin_unlock(&cfs_b->lock);
4254 }
4255 
4256 /*
4257  * When a group wakes up we want to make sure that its quota is not already
4258  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4259  * runtime as update_curr() throttling can not not trigger until it's on-rq.
4260  */
4261 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4262 {
4263 	if (!cfs_bandwidth_used())
4264 		return;
4265 
4266 	/* an active group must be handled by the update_curr()->put() path */
4267 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4268 		return;
4269 
4270 	/* ensure the group is not already throttled */
4271 	if (cfs_rq_throttled(cfs_rq))
4272 		return;
4273 
4274 	/* update runtime allocation */
4275 	account_cfs_rq_runtime(cfs_rq, 0);
4276 	if (cfs_rq->runtime_remaining <= 0)
4277 		throttle_cfs_rq(cfs_rq);
4278 }
4279 
4280 static void sync_throttle(struct task_group *tg, int cpu)
4281 {
4282 	struct cfs_rq *pcfs_rq, *cfs_rq;
4283 
4284 	if (!cfs_bandwidth_used())
4285 		return;
4286 
4287 	if (!tg->parent)
4288 		return;
4289 
4290 	cfs_rq = tg->cfs_rq[cpu];
4291 	pcfs_rq = tg->parent->cfs_rq[cpu];
4292 
4293 	cfs_rq->throttle_count = pcfs_rq->throttle_count;
4294 	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4295 }
4296 
4297 /* conditionally throttle active cfs_rq's from put_prev_entity() */
4298 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4299 {
4300 	if (!cfs_bandwidth_used())
4301 		return false;
4302 
4303 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4304 		return false;
4305 
4306 	/*
4307 	 * it's possible for a throttled entity to be forced into a running
4308 	 * state (e.g. set_curr_task), in this case we're finished.
4309 	 */
4310 	if (cfs_rq_throttled(cfs_rq))
4311 		return true;
4312 
4313 	throttle_cfs_rq(cfs_rq);
4314 	return true;
4315 }
4316 
4317 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4318 {
4319 	struct cfs_bandwidth *cfs_b =
4320 		container_of(timer, struct cfs_bandwidth, slack_timer);
4321 
4322 	do_sched_cfs_slack_timer(cfs_b);
4323 
4324 	return HRTIMER_NORESTART;
4325 }
4326 
4327 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4328 {
4329 	struct cfs_bandwidth *cfs_b =
4330 		container_of(timer, struct cfs_bandwidth, period_timer);
4331 	int overrun;
4332 	int idle = 0;
4333 
4334 	raw_spin_lock(&cfs_b->lock);
4335 	for (;;) {
4336 		overrun = hrtimer_forward_now(timer, cfs_b->period);
4337 		if (!overrun)
4338 			break;
4339 
4340 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
4341 	}
4342 	if (idle)
4343 		cfs_b->period_active = 0;
4344 	raw_spin_unlock(&cfs_b->lock);
4345 
4346 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4347 }
4348 
4349 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4350 {
4351 	raw_spin_lock_init(&cfs_b->lock);
4352 	cfs_b->runtime = 0;
4353 	cfs_b->quota = RUNTIME_INF;
4354 	cfs_b->period = ns_to_ktime(default_cfs_period());
4355 
4356 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4357 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4358 	cfs_b->period_timer.function = sched_cfs_period_timer;
4359 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4360 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
4361 }
4362 
4363 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4364 {
4365 	cfs_rq->runtime_enabled = 0;
4366 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
4367 }
4368 
4369 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4370 {
4371 	lockdep_assert_held(&cfs_b->lock);
4372 
4373 	if (!cfs_b->period_active) {
4374 		cfs_b->period_active = 1;
4375 		hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4376 		hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4377 	}
4378 }
4379 
4380 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4381 {
4382 	/* init_cfs_bandwidth() was not called */
4383 	if (!cfs_b->throttled_cfs_rq.next)
4384 		return;
4385 
4386 	hrtimer_cancel(&cfs_b->period_timer);
4387 	hrtimer_cancel(&cfs_b->slack_timer);
4388 }
4389 
4390 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4391 {
4392 	struct cfs_rq *cfs_rq;
4393 
4394 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4395 		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4396 
4397 		raw_spin_lock(&cfs_b->lock);
4398 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4399 		raw_spin_unlock(&cfs_b->lock);
4400 	}
4401 }
4402 
4403 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4404 {
4405 	struct cfs_rq *cfs_rq;
4406 
4407 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4408 		if (!cfs_rq->runtime_enabled)
4409 			continue;
4410 
4411 		/*
4412 		 * clock_task is not advancing so we just need to make sure
4413 		 * there's some valid quota amount
4414 		 */
4415 		cfs_rq->runtime_remaining = 1;
4416 		/*
4417 		 * Offline rq is schedulable till cpu is completely disabled
4418 		 * in take_cpu_down(), so we prevent new cfs throttling here.
4419 		 */
4420 		cfs_rq->runtime_enabled = 0;
4421 
4422 		if (cfs_rq_throttled(cfs_rq))
4423 			unthrottle_cfs_rq(cfs_rq);
4424 	}
4425 }
4426 
4427 #else /* CONFIG_CFS_BANDWIDTH */
4428 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4429 {
4430 	return rq_clock_task(rq_of(cfs_rq));
4431 }
4432 
4433 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4434 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4435 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4436 static inline void sync_throttle(struct task_group *tg, int cpu) {}
4437 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4438 
4439 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4440 {
4441 	return 0;
4442 }
4443 
4444 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4445 {
4446 	return 0;
4447 }
4448 
4449 static inline int throttled_lb_pair(struct task_group *tg,
4450 				    int src_cpu, int dest_cpu)
4451 {
4452 	return 0;
4453 }
4454 
4455 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4456 
4457 #ifdef CONFIG_FAIR_GROUP_SCHED
4458 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4459 #endif
4460 
4461 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4462 {
4463 	return NULL;
4464 }
4465 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4466 static inline void update_runtime_enabled(struct rq *rq) {}
4467 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4468 
4469 #endif /* CONFIG_CFS_BANDWIDTH */
4470 
4471 /**************************************************
4472  * CFS operations on tasks:
4473  */
4474 
4475 #ifdef CONFIG_SCHED_HRTICK
4476 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4477 {
4478 	struct sched_entity *se = &p->se;
4479 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
4480 
4481 	SCHED_WARN_ON(task_rq(p) != rq);
4482 
4483 	if (rq->cfs.h_nr_running > 1) {
4484 		u64 slice = sched_slice(cfs_rq, se);
4485 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4486 		s64 delta = slice - ran;
4487 
4488 		if (delta < 0) {
4489 			if (rq->curr == p)
4490 				resched_curr(rq);
4491 			return;
4492 		}
4493 		hrtick_start(rq, delta);
4494 	}
4495 }
4496 
4497 /*
4498  * called from enqueue/dequeue and updates the hrtick when the
4499  * current task is from our class and nr_running is low enough
4500  * to matter.
4501  */
4502 static void hrtick_update(struct rq *rq)
4503 {
4504 	struct task_struct *curr = rq->curr;
4505 
4506 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4507 		return;
4508 
4509 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4510 		hrtick_start_fair(rq, curr);
4511 }
4512 #else /* !CONFIG_SCHED_HRTICK */
4513 static inline void
4514 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4515 {
4516 }
4517 
4518 static inline void hrtick_update(struct rq *rq)
4519 {
4520 }
4521 #endif
4522 
4523 /*
4524  * The enqueue_task method is called before nr_running is
4525  * increased. Here we update the fair scheduling stats and
4526  * then put the task into the rbtree:
4527  */
4528 static void
4529 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4530 {
4531 	struct cfs_rq *cfs_rq;
4532 	struct sched_entity *se = &p->se;
4533 
4534 	/*
4535 	 * If in_iowait is set, the code below may not trigger any cpufreq
4536 	 * utilization updates, so do it here explicitly with the IOWAIT flag
4537 	 * passed.
4538 	 */
4539 	if (p->in_iowait)
4540 		cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
4541 
4542 	for_each_sched_entity(se) {
4543 		if (se->on_rq)
4544 			break;
4545 		cfs_rq = cfs_rq_of(se);
4546 		enqueue_entity(cfs_rq, se, flags);
4547 
4548 		/*
4549 		 * end evaluation on encountering a throttled cfs_rq
4550 		 *
4551 		 * note: in the case of encountering a throttled cfs_rq we will
4552 		 * post the final h_nr_running increment below.
4553 		 */
4554 		if (cfs_rq_throttled(cfs_rq))
4555 			break;
4556 		cfs_rq->h_nr_running++;
4557 
4558 		flags = ENQUEUE_WAKEUP;
4559 	}
4560 
4561 	for_each_sched_entity(se) {
4562 		cfs_rq = cfs_rq_of(se);
4563 		cfs_rq->h_nr_running++;
4564 
4565 		if (cfs_rq_throttled(cfs_rq))
4566 			break;
4567 
4568 		update_load_avg(se, 1);
4569 		update_cfs_shares(cfs_rq);
4570 	}
4571 
4572 	if (!se)
4573 		add_nr_running(rq, 1);
4574 
4575 	hrtick_update(rq);
4576 }
4577 
4578 static void set_next_buddy(struct sched_entity *se);
4579 
4580 /*
4581  * The dequeue_task method is called before nr_running is
4582  * decreased. We remove the task from the rbtree and
4583  * update the fair scheduling stats:
4584  */
4585 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4586 {
4587 	struct cfs_rq *cfs_rq;
4588 	struct sched_entity *se = &p->se;
4589 	int task_sleep = flags & DEQUEUE_SLEEP;
4590 
4591 	for_each_sched_entity(se) {
4592 		cfs_rq = cfs_rq_of(se);
4593 		dequeue_entity(cfs_rq, se, flags);
4594 
4595 		/*
4596 		 * end evaluation on encountering a throttled cfs_rq
4597 		 *
4598 		 * note: in the case of encountering a throttled cfs_rq we will
4599 		 * post the final h_nr_running decrement below.
4600 		*/
4601 		if (cfs_rq_throttled(cfs_rq))
4602 			break;
4603 		cfs_rq->h_nr_running--;
4604 
4605 		/* Don't dequeue parent if it has other entities besides us */
4606 		if (cfs_rq->load.weight) {
4607 			/* Avoid re-evaluating load for this entity: */
4608 			se = parent_entity(se);
4609 			/*
4610 			 * Bias pick_next to pick a task from this cfs_rq, as
4611 			 * p is sleeping when it is within its sched_slice.
4612 			 */
4613 			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4614 				set_next_buddy(se);
4615 			break;
4616 		}
4617 		flags |= DEQUEUE_SLEEP;
4618 	}
4619 
4620 	for_each_sched_entity(se) {
4621 		cfs_rq = cfs_rq_of(se);
4622 		cfs_rq->h_nr_running--;
4623 
4624 		if (cfs_rq_throttled(cfs_rq))
4625 			break;
4626 
4627 		update_load_avg(se, 1);
4628 		update_cfs_shares(cfs_rq);
4629 	}
4630 
4631 	if (!se)
4632 		sub_nr_running(rq, 1);
4633 
4634 	hrtick_update(rq);
4635 }
4636 
4637 #ifdef CONFIG_SMP
4638 
4639 /* Working cpumask for: load_balance, load_balance_newidle. */
4640 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4641 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
4642 
4643 #ifdef CONFIG_NO_HZ_COMMON
4644 /*
4645  * per rq 'load' arrray crap; XXX kill this.
4646  */
4647 
4648 /*
4649  * The exact cpuload calculated at every tick would be:
4650  *
4651  *   load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4652  *
4653  * If a cpu misses updates for n ticks (as it was idle) and update gets
4654  * called on the n+1-th tick when cpu may be busy, then we have:
4655  *
4656  *   load_n   = (1 - 1/2^i)^n * load_0
4657  *   load_n+1 = (1 - 1/2^i)   * load_n + (1/2^i) * cur_load
4658  *
4659  * decay_load_missed() below does efficient calculation of
4660  *
4661  *   load' = (1 - 1/2^i)^n * load
4662  *
4663  * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4664  * This allows us to precompute the above in said factors, thereby allowing the
4665  * reduction of an arbitrary n in O(log_2 n) steps. (See also
4666  * fixed_power_int())
4667  *
4668  * The calculation is approximated on a 128 point scale.
4669  */
4670 #define DEGRADE_SHIFT		7
4671 
4672 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4673 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4674 	{   0,   0,  0,  0,  0,  0, 0, 0 },
4675 	{  64,  32,  8,  0,  0,  0, 0, 0 },
4676 	{  96,  72, 40, 12,  1,  0, 0, 0 },
4677 	{ 112,  98, 75, 43, 15,  1, 0, 0 },
4678 	{ 120, 112, 98, 76, 45, 16, 2, 0 }
4679 };
4680 
4681 /*
4682  * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4683  * would be when CPU is idle and so we just decay the old load without
4684  * adding any new load.
4685  */
4686 static unsigned long
4687 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4688 {
4689 	int j = 0;
4690 
4691 	if (!missed_updates)
4692 		return load;
4693 
4694 	if (missed_updates >= degrade_zero_ticks[idx])
4695 		return 0;
4696 
4697 	if (idx == 1)
4698 		return load >> missed_updates;
4699 
4700 	while (missed_updates) {
4701 		if (missed_updates % 2)
4702 			load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4703 
4704 		missed_updates >>= 1;
4705 		j++;
4706 	}
4707 	return load;
4708 }
4709 #endif /* CONFIG_NO_HZ_COMMON */
4710 
4711 /**
4712  * __cpu_load_update - update the rq->cpu_load[] statistics
4713  * @this_rq: The rq to update statistics for
4714  * @this_load: The current load
4715  * @pending_updates: The number of missed updates
4716  *
4717  * Update rq->cpu_load[] statistics. This function is usually called every
4718  * scheduler tick (TICK_NSEC).
4719  *
4720  * This function computes a decaying average:
4721  *
4722  *   load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4723  *
4724  * Because of NOHZ it might not get called on every tick which gives need for
4725  * the @pending_updates argument.
4726  *
4727  *   load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4728  *             = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4729  *             = A * (A * load[i]_n-2 + B) + B
4730  *             = A * (A * (A * load[i]_n-3 + B) + B) + B
4731  *             = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
4732  *             = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
4733  *             = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
4734  *             = (1 - 1/2^i)^n * (load[i]_0 - load) + load
4735  *
4736  * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
4737  * any change in load would have resulted in the tick being turned back on.
4738  *
4739  * For regular NOHZ, this reduces to:
4740  *
4741  *   load[i]_n = (1 - 1/2^i)^n * load[i]_0
4742  *
4743  * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
4744  * term.
4745  */
4746 static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4747 			    unsigned long pending_updates)
4748 {
4749 	unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
4750 	int i, scale;
4751 
4752 	this_rq->nr_load_updates++;
4753 
4754 	/* Update our load: */
4755 	this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4756 	for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4757 		unsigned long old_load, new_load;
4758 
4759 		/* scale is effectively 1 << i now, and >> i divides by scale */
4760 
4761 		old_load = this_rq->cpu_load[i];
4762 #ifdef CONFIG_NO_HZ_COMMON
4763 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
4764 		if (tickless_load) {
4765 			old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4766 			/*
4767 			 * old_load can never be a negative value because a
4768 			 * decayed tickless_load cannot be greater than the
4769 			 * original tickless_load.
4770 			 */
4771 			old_load += tickless_load;
4772 		}
4773 #endif
4774 		new_load = this_load;
4775 		/*
4776 		 * Round up the averaging division if load is increasing. This
4777 		 * prevents us from getting stuck on 9 if the load is 10, for
4778 		 * example.
4779 		 */
4780 		if (new_load > old_load)
4781 			new_load += scale - 1;
4782 
4783 		this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4784 	}
4785 
4786 	sched_avg_update(this_rq);
4787 }
4788 
4789 /* Used instead of source_load when we know the type == 0 */
4790 static unsigned long weighted_cpuload(const int cpu)
4791 {
4792 	return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4793 }
4794 
4795 #ifdef CONFIG_NO_HZ_COMMON
4796 /*
4797  * There is no sane way to deal with nohz on smp when using jiffies because the
4798  * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4799  * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4800  *
4801  * Therefore we need to avoid the delta approach from the regular tick when
4802  * possible since that would seriously skew the load calculation. This is why we
4803  * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
4804  * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
4805  * loop exit, nohz_idle_balance, nohz full exit...)
4806  *
4807  * This means we might still be one tick off for nohz periods.
4808  */
4809 
4810 static void cpu_load_update_nohz(struct rq *this_rq,
4811 				 unsigned long curr_jiffies,
4812 				 unsigned long load)
4813 {
4814 	unsigned long pending_updates;
4815 
4816 	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4817 	if (pending_updates) {
4818 		this_rq->last_load_update_tick = curr_jiffies;
4819 		/*
4820 		 * In the regular NOHZ case, we were idle, this means load 0.
4821 		 * In the NOHZ_FULL case, we were non-idle, we should consider
4822 		 * its weighted load.
4823 		 */
4824 		cpu_load_update(this_rq, load, pending_updates);
4825 	}
4826 }
4827 
4828 /*
4829  * Called from nohz_idle_balance() to update the load ratings before doing the
4830  * idle balance.
4831  */
4832 static void cpu_load_update_idle(struct rq *this_rq)
4833 {
4834 	/*
4835 	 * bail if there's load or we're actually up-to-date.
4836 	 */
4837 	if (weighted_cpuload(cpu_of(this_rq)))
4838 		return;
4839 
4840 	cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
4841 }
4842 
4843 /*
4844  * Record CPU load on nohz entry so we know the tickless load to account
4845  * on nohz exit. cpu_load[0] happens then to be updated more frequently
4846  * than other cpu_load[idx] but it should be fine as cpu_load readers
4847  * shouldn't rely into synchronized cpu_load[*] updates.
4848  */
4849 void cpu_load_update_nohz_start(void)
4850 {
4851 	struct rq *this_rq = this_rq();
4852 
4853 	/*
4854 	 * This is all lockless but should be fine. If weighted_cpuload changes
4855 	 * concurrently we'll exit nohz. And cpu_load write can race with
4856 	 * cpu_load_update_idle() but both updater would be writing the same.
4857 	 */
4858 	this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
4859 }
4860 
4861 /*
4862  * Account the tickless load in the end of a nohz frame.
4863  */
4864 void cpu_load_update_nohz_stop(void)
4865 {
4866 	unsigned long curr_jiffies = READ_ONCE(jiffies);
4867 	struct rq *this_rq = this_rq();
4868 	unsigned long load;
4869 
4870 	if (curr_jiffies == this_rq->last_load_update_tick)
4871 		return;
4872 
4873 	load = weighted_cpuload(cpu_of(this_rq));
4874 	raw_spin_lock(&this_rq->lock);
4875 	update_rq_clock(this_rq);
4876 	cpu_load_update_nohz(this_rq, curr_jiffies, load);
4877 	raw_spin_unlock(&this_rq->lock);
4878 }
4879 #else /* !CONFIG_NO_HZ_COMMON */
4880 static inline void cpu_load_update_nohz(struct rq *this_rq,
4881 					unsigned long curr_jiffies,
4882 					unsigned long load) { }
4883 #endif /* CONFIG_NO_HZ_COMMON */
4884 
4885 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
4886 {
4887 #ifdef CONFIG_NO_HZ_COMMON
4888 	/* See the mess around cpu_load_update_nohz(). */
4889 	this_rq->last_load_update_tick = READ_ONCE(jiffies);
4890 #endif
4891 	cpu_load_update(this_rq, load, 1);
4892 }
4893 
4894 /*
4895  * Called from scheduler_tick()
4896  */
4897 void cpu_load_update_active(struct rq *this_rq)
4898 {
4899 	unsigned long load = weighted_cpuload(cpu_of(this_rq));
4900 
4901 	if (tick_nohz_tick_stopped())
4902 		cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
4903 	else
4904 		cpu_load_update_periodic(this_rq, load);
4905 }
4906 
4907 /*
4908  * Return a low guess at the load of a migration-source cpu weighted
4909  * according to the scheduling class and "nice" value.
4910  *
4911  * We want to under-estimate the load of migration sources, to
4912  * balance conservatively.
4913  */
4914 static unsigned long source_load(int cpu, int type)
4915 {
4916 	struct rq *rq = cpu_rq(cpu);
4917 	unsigned long total = weighted_cpuload(cpu);
4918 
4919 	if (type == 0 || !sched_feat(LB_BIAS))
4920 		return total;
4921 
4922 	return min(rq->cpu_load[type-1], total);
4923 }
4924 
4925 /*
4926  * Return a high guess at the load of a migration-target cpu weighted
4927  * according to the scheduling class and "nice" value.
4928  */
4929 static unsigned long target_load(int cpu, int type)
4930 {
4931 	struct rq *rq = cpu_rq(cpu);
4932 	unsigned long total = weighted_cpuload(cpu);
4933 
4934 	if (type == 0 || !sched_feat(LB_BIAS))
4935 		return total;
4936 
4937 	return max(rq->cpu_load[type-1], total);
4938 }
4939 
4940 static unsigned long capacity_of(int cpu)
4941 {
4942 	return cpu_rq(cpu)->cpu_capacity;
4943 }
4944 
4945 static unsigned long capacity_orig_of(int cpu)
4946 {
4947 	return cpu_rq(cpu)->cpu_capacity_orig;
4948 }
4949 
4950 static unsigned long cpu_avg_load_per_task(int cpu)
4951 {
4952 	struct rq *rq = cpu_rq(cpu);
4953 	unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
4954 	unsigned long load_avg = weighted_cpuload(cpu);
4955 
4956 	if (nr_running)
4957 		return load_avg / nr_running;
4958 
4959 	return 0;
4960 }
4961 
4962 #ifdef CONFIG_FAIR_GROUP_SCHED
4963 /*
4964  * effective_load() calculates the load change as seen from the root_task_group
4965  *
4966  * Adding load to a group doesn't make a group heavier, but can cause movement
4967  * of group shares between cpus. Assuming the shares were perfectly aligned one
4968  * can calculate the shift in shares.
4969  *
4970  * Calculate the effective load difference if @wl is added (subtracted) to @tg
4971  * on this @cpu and results in a total addition (subtraction) of @wg to the
4972  * total group weight.
4973  *
4974  * Given a runqueue weight distribution (rw_i) we can compute a shares
4975  * distribution (s_i) using:
4976  *
4977  *   s_i = rw_i / \Sum rw_j						(1)
4978  *
4979  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4980  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4981  * shares distribution (s_i):
4982  *
4983  *   rw_i = {   2,   4,   1,   0 }
4984  *   s_i  = { 2/7, 4/7, 1/7,   0 }
4985  *
4986  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4987  * task used to run on and the CPU the waker is running on), we need to
4988  * compute the effect of waking a task on either CPU and, in case of a sync
4989  * wakeup, compute the effect of the current task going to sleep.
4990  *
4991  * So for a change of @wl to the local @cpu with an overall group weight change
4992  * of @wl we can compute the new shares distribution (s'_i) using:
4993  *
4994  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
4995  *
4996  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4997  * differences in waking a task to CPU 0. The additional task changes the
4998  * weight and shares distributions like:
4999  *
5000  *   rw'_i = {   3,   4,   1,   0 }
5001  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
5002  *
5003  * We can then compute the difference in effective weight by using:
5004  *
5005  *   dw_i = S * (s'_i - s_i)						(3)
5006  *
5007  * Where 'S' is the group weight as seen by its parent.
5008  *
5009  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5010  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5011  * 4/7) times the weight of the group.
5012  */
5013 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5014 {
5015 	struct sched_entity *se = tg->se[cpu];
5016 
5017 	if (!tg->parent)	/* the trivial, non-cgroup case */
5018 		return wl;
5019 
5020 	for_each_sched_entity(se) {
5021 		struct cfs_rq *cfs_rq = se->my_q;
5022 		long W, w = cfs_rq_load_avg(cfs_rq);
5023 
5024 		tg = cfs_rq->tg;
5025 
5026 		/*
5027 		 * W = @wg + \Sum rw_j
5028 		 */
5029 		W = wg + atomic_long_read(&tg->load_avg);
5030 
5031 		/* Ensure \Sum rw_j >= rw_i */
5032 		W -= cfs_rq->tg_load_avg_contrib;
5033 		W += w;
5034 
5035 		/*
5036 		 * w = rw_i + @wl
5037 		 */
5038 		w += wl;
5039 
5040 		/*
5041 		 * wl = S * s'_i; see (2)
5042 		 */
5043 		if (W > 0 && w < W)
5044 			wl = (w * (long)scale_load_down(tg->shares)) / W;
5045 		else
5046 			wl = scale_load_down(tg->shares);
5047 
5048 		/*
5049 		 * Per the above, wl is the new se->load.weight value; since
5050 		 * those are clipped to [MIN_SHARES, ...) do so now. See
5051 		 * calc_cfs_shares().
5052 		 */
5053 		if (wl < MIN_SHARES)
5054 			wl = MIN_SHARES;
5055 
5056 		/*
5057 		 * wl = dw_i = S * (s'_i - s_i); see (3)
5058 		 */
5059 		wl -= se->avg.load_avg;
5060 
5061 		/*
5062 		 * Recursively apply this logic to all parent groups to compute
5063 		 * the final effective load change on the root group. Since
5064 		 * only the @tg group gets extra weight, all parent groups can
5065 		 * only redistribute existing shares. @wl is the shift in shares
5066 		 * resulting from this level per the above.
5067 		 */
5068 		wg = 0;
5069 	}
5070 
5071 	return wl;
5072 }
5073 #else
5074 
5075 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5076 {
5077 	return wl;
5078 }
5079 
5080 #endif
5081 
5082 static void record_wakee(struct task_struct *p)
5083 {
5084 	/*
5085 	 * Only decay a single time; tasks that have less then 1 wakeup per
5086 	 * jiffy will not have built up many flips.
5087 	 */
5088 	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5089 		current->wakee_flips >>= 1;
5090 		current->wakee_flip_decay_ts = jiffies;
5091 	}
5092 
5093 	if (current->last_wakee != p) {
5094 		current->last_wakee = p;
5095 		current->wakee_flips++;
5096 	}
5097 }
5098 
5099 /*
5100  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5101  *
5102  * A waker of many should wake a different task than the one last awakened
5103  * at a frequency roughly N times higher than one of its wakees.
5104  *
5105  * In order to determine whether we should let the load spread vs consolidating
5106  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5107  * partner, and a factor of lls_size higher frequency in the other.
5108  *
5109  * With both conditions met, we can be relatively sure that the relationship is
5110  * non-monogamous, with partner count exceeding socket size.
5111  *
5112  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5113  * whatever is irrelevant, spread criteria is apparent partner count exceeds
5114  * socket size.
5115  */
5116 static int wake_wide(struct task_struct *p)
5117 {
5118 	unsigned int master = current->wakee_flips;
5119 	unsigned int slave = p->wakee_flips;
5120 	int factor = this_cpu_read(sd_llc_size);
5121 
5122 	if (master < slave)
5123 		swap(master, slave);
5124 	if (slave < factor || master < slave * factor)
5125 		return 0;
5126 	return 1;
5127 }
5128 
5129 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5130 		       int prev_cpu, int sync)
5131 {
5132 	s64 this_load, load;
5133 	s64 this_eff_load, prev_eff_load;
5134 	int idx, this_cpu;
5135 	struct task_group *tg;
5136 	unsigned long weight;
5137 	int balanced;
5138 
5139 	idx	  = sd->wake_idx;
5140 	this_cpu  = smp_processor_id();
5141 	load	  = source_load(prev_cpu, idx);
5142 	this_load = target_load(this_cpu, idx);
5143 
5144 	/*
5145 	 * If sync wakeup then subtract the (maximum possible)
5146 	 * effect of the currently running task from the load
5147 	 * of the current CPU:
5148 	 */
5149 	if (sync) {
5150 		tg = task_group(current);
5151 		weight = current->se.avg.load_avg;
5152 
5153 		this_load += effective_load(tg, this_cpu, -weight, -weight);
5154 		load += effective_load(tg, prev_cpu, 0, -weight);
5155 	}
5156 
5157 	tg = task_group(p);
5158 	weight = p->se.avg.load_avg;
5159 
5160 	/*
5161 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
5162 	 * due to the sync cause above having dropped this_load to 0, we'll
5163 	 * always have an imbalance, but there's really nothing you can do
5164 	 * about that, so that's good too.
5165 	 *
5166 	 * Otherwise check if either cpus are near enough in load to allow this
5167 	 * task to be woken on this_cpu.
5168 	 */
5169 	this_eff_load = 100;
5170 	this_eff_load *= capacity_of(prev_cpu);
5171 
5172 	prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5173 	prev_eff_load *= capacity_of(this_cpu);
5174 
5175 	if (this_load > 0) {
5176 		this_eff_load *= this_load +
5177 			effective_load(tg, this_cpu, weight, weight);
5178 
5179 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
5180 	}
5181 
5182 	balanced = this_eff_load <= prev_eff_load;
5183 
5184 	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5185 
5186 	if (!balanced)
5187 		return 0;
5188 
5189 	schedstat_inc(sd->ttwu_move_affine);
5190 	schedstat_inc(p->se.statistics.nr_wakeups_affine);
5191 
5192 	return 1;
5193 }
5194 
5195 /*
5196  * find_idlest_group finds and returns the least busy CPU group within the
5197  * domain.
5198  */
5199 static struct sched_group *
5200 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5201 		  int this_cpu, int sd_flag)
5202 {
5203 	struct sched_group *idlest = NULL, *group = sd->groups;
5204 	unsigned long min_load = ULONG_MAX, this_load = 0;
5205 	int load_idx = sd->forkexec_idx;
5206 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
5207 
5208 	if (sd_flag & SD_BALANCE_WAKE)
5209 		load_idx = sd->wake_idx;
5210 
5211 	do {
5212 		unsigned long load, avg_load;
5213 		int local_group;
5214 		int i;
5215 
5216 		/* Skip over this group if it has no CPUs allowed */
5217 		if (!cpumask_intersects(sched_group_cpus(group),
5218 					tsk_cpus_allowed(p)))
5219 			continue;
5220 
5221 		local_group = cpumask_test_cpu(this_cpu,
5222 					       sched_group_cpus(group));
5223 
5224 		/* Tally up the load of all CPUs in the group */
5225 		avg_load = 0;
5226 
5227 		for_each_cpu(i, sched_group_cpus(group)) {
5228 			/* Bias balancing toward cpus of our domain */
5229 			if (local_group)
5230 				load = source_load(i, load_idx);
5231 			else
5232 				load = target_load(i, load_idx);
5233 
5234 			avg_load += load;
5235 		}
5236 
5237 		/* Adjust by relative CPU capacity of the group */
5238 		avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
5239 
5240 		if (local_group) {
5241 			this_load = avg_load;
5242 		} else if (avg_load < min_load) {
5243 			min_load = avg_load;
5244 			idlest = group;
5245 		}
5246 	} while (group = group->next, group != sd->groups);
5247 
5248 	if (!idlest || 100*this_load < imbalance*min_load)
5249 		return NULL;
5250 	return idlest;
5251 }
5252 
5253 /*
5254  * find_idlest_cpu - find the idlest cpu among the cpus in group.
5255  */
5256 static int
5257 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5258 {
5259 	unsigned long load, min_load = ULONG_MAX;
5260 	unsigned int min_exit_latency = UINT_MAX;
5261 	u64 latest_idle_timestamp = 0;
5262 	int least_loaded_cpu = this_cpu;
5263 	int shallowest_idle_cpu = -1;
5264 	int i;
5265 
5266 	/* Check if we have any choice: */
5267 	if (group->group_weight == 1)
5268 		return cpumask_first(sched_group_cpus(group));
5269 
5270 	/* Traverse only the allowed CPUs */
5271 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
5272 		if (idle_cpu(i)) {
5273 			struct rq *rq = cpu_rq(i);
5274 			struct cpuidle_state *idle = idle_get_state(rq);
5275 			if (idle && idle->exit_latency < min_exit_latency) {
5276 				/*
5277 				 * We give priority to a CPU whose idle state
5278 				 * has the smallest exit latency irrespective
5279 				 * of any idle timestamp.
5280 				 */
5281 				min_exit_latency = idle->exit_latency;
5282 				latest_idle_timestamp = rq->idle_stamp;
5283 				shallowest_idle_cpu = i;
5284 			} else if ((!idle || idle->exit_latency == min_exit_latency) &&
5285 				   rq->idle_stamp > latest_idle_timestamp) {
5286 				/*
5287 				 * If equal or no active idle state, then
5288 				 * the most recently idled CPU might have
5289 				 * a warmer cache.
5290 				 */
5291 				latest_idle_timestamp = rq->idle_stamp;
5292 				shallowest_idle_cpu = i;
5293 			}
5294 		} else if (shallowest_idle_cpu == -1) {
5295 			load = weighted_cpuload(i);
5296 			if (load < min_load || (load == min_load && i == this_cpu)) {
5297 				min_load = load;
5298 				least_loaded_cpu = i;
5299 			}
5300 		}
5301 	}
5302 
5303 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5304 }
5305 
5306 /*
5307  * Implement a for_each_cpu() variant that starts the scan at a given cpu
5308  * (@start), and wraps around.
5309  *
5310  * This is used to scan for idle CPUs; such that not all CPUs looking for an
5311  * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5312  * through the LLC domain.
5313  *
5314  * Especially tbench is found sensitive to this.
5315  */
5316 
5317 static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5318 {
5319 	int next;
5320 
5321 again:
5322 	next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5323 
5324 	if (*wrapped) {
5325 		if (next >= start)
5326 			return nr_cpumask_bits;
5327 	} else {
5328 		if (next >= nr_cpumask_bits) {
5329 			*wrapped = 1;
5330 			n = -1;
5331 			goto again;
5332 		}
5333 	}
5334 
5335 	return next;
5336 }
5337 
5338 #define for_each_cpu_wrap(cpu, mask, start, wrap)				\
5339 	for ((wrap) = 0, (cpu) = (start)-1;					\
5340 		(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),	\
5341 		(cpu) < nr_cpumask_bits; )
5342 
5343 #ifdef CONFIG_SCHED_SMT
5344 
5345 static inline void set_idle_cores(int cpu, int val)
5346 {
5347 	struct sched_domain_shared *sds;
5348 
5349 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5350 	if (sds)
5351 		WRITE_ONCE(sds->has_idle_cores, val);
5352 }
5353 
5354 static inline bool test_idle_cores(int cpu, bool def)
5355 {
5356 	struct sched_domain_shared *sds;
5357 
5358 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5359 	if (sds)
5360 		return READ_ONCE(sds->has_idle_cores);
5361 
5362 	return def;
5363 }
5364 
5365 /*
5366  * Scans the local SMT mask to see if the entire core is idle, and records this
5367  * information in sd_llc_shared->has_idle_cores.
5368  *
5369  * Since SMT siblings share all cache levels, inspecting this limited remote
5370  * state should be fairly cheap.
5371  */
5372 void __update_idle_core(struct rq *rq)
5373 {
5374 	int core = cpu_of(rq);
5375 	int cpu;
5376 
5377 	rcu_read_lock();
5378 	if (test_idle_cores(core, true))
5379 		goto unlock;
5380 
5381 	for_each_cpu(cpu, cpu_smt_mask(core)) {
5382 		if (cpu == core)
5383 			continue;
5384 
5385 		if (!idle_cpu(cpu))
5386 			goto unlock;
5387 	}
5388 
5389 	set_idle_cores(core, 1);
5390 unlock:
5391 	rcu_read_unlock();
5392 }
5393 
5394 /*
5395  * Scan the entire LLC domain for idle cores; this dynamically switches off if
5396  * there are no idle cores left in the system; tracked through
5397  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5398  */
5399 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5400 {
5401 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5402 	int core, cpu, wrap;
5403 
5404 	if (!static_branch_likely(&sched_smt_present))
5405 		return -1;
5406 
5407 	if (!test_idle_cores(target, false))
5408 		return -1;
5409 
5410 	cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
5411 
5412 	for_each_cpu_wrap(core, cpus, target, wrap) {
5413 		bool idle = true;
5414 
5415 		for_each_cpu(cpu, cpu_smt_mask(core)) {
5416 			cpumask_clear_cpu(cpu, cpus);
5417 			if (!idle_cpu(cpu))
5418 				idle = false;
5419 		}
5420 
5421 		if (idle)
5422 			return core;
5423 	}
5424 
5425 	/*
5426 	 * Failed to find an idle core; stop looking for one.
5427 	 */
5428 	set_idle_cores(target, 0);
5429 
5430 	return -1;
5431 }
5432 
5433 /*
5434  * Scan the local SMT mask for idle CPUs.
5435  */
5436 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5437 {
5438 	int cpu;
5439 
5440 	if (!static_branch_likely(&sched_smt_present))
5441 		return -1;
5442 
5443 	for_each_cpu(cpu, cpu_smt_mask(target)) {
5444 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5445 			continue;
5446 		if (idle_cpu(cpu))
5447 			return cpu;
5448 	}
5449 
5450 	return -1;
5451 }
5452 
5453 #else /* CONFIG_SCHED_SMT */
5454 
5455 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5456 {
5457 	return -1;
5458 }
5459 
5460 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5461 {
5462 	return -1;
5463 }
5464 
5465 #endif /* CONFIG_SCHED_SMT */
5466 
5467 /*
5468  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
5469  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
5470  * average idle time for this rq (as found in rq->avg_idle).
5471  */
5472 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5473 {
5474 	struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5475 	u64 avg_idle = this_rq()->avg_idle;
5476 	u64 avg_cost = this_sd->avg_scan_cost;
5477 	u64 time, cost;
5478 	s64 delta;
5479 	int cpu, wrap;
5480 
5481 	/*
5482 	 * Due to large variance we need a large fuzz factor; hackbench in
5483 	 * particularly is sensitive here.
5484 	 */
5485 	if ((avg_idle / 512) < avg_cost)
5486 		return -1;
5487 
5488 	time = local_clock();
5489 
5490 	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5491 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5492 			continue;
5493 		if (idle_cpu(cpu))
5494 			break;
5495 	}
5496 
5497 	time = local_clock() - time;
5498 	cost = this_sd->avg_scan_cost;
5499 	delta = (s64)(time - cost) / 8;
5500 	this_sd->avg_scan_cost += delta;
5501 
5502 	return cpu;
5503 }
5504 
5505 /*
5506  * Try and locate an idle core/thread in the LLC cache domain.
5507  */
5508 static int select_idle_sibling(struct task_struct *p, int prev, int target)
5509 {
5510 	struct sched_domain *sd;
5511 	int i;
5512 
5513 	if (idle_cpu(target))
5514 		return target;
5515 
5516 	/*
5517 	 * If the previous cpu is cache affine and idle, don't be stupid.
5518 	 */
5519 	if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
5520 		return prev;
5521 
5522 	sd = rcu_dereference(per_cpu(sd_llc, target));
5523 	if (!sd)
5524 		return target;
5525 
5526 	i = select_idle_core(p, sd, target);
5527 	if ((unsigned)i < nr_cpumask_bits)
5528 		return i;
5529 
5530 	i = select_idle_cpu(p, sd, target);
5531 	if ((unsigned)i < nr_cpumask_bits)
5532 		return i;
5533 
5534 	i = select_idle_smt(p, sd, target);
5535 	if ((unsigned)i < nr_cpumask_bits)
5536 		return i;
5537 
5538 	return target;
5539 }
5540 
5541 /*
5542  * cpu_util returns the amount of capacity of a CPU that is used by CFS
5543  * tasks. The unit of the return value must be the one of capacity so we can
5544  * compare the utilization with the capacity of the CPU that is available for
5545  * CFS task (ie cpu_capacity).
5546  *
5547  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5548  * recent utilization of currently non-runnable tasks on a CPU. It represents
5549  * the amount of utilization of a CPU in the range [0..capacity_orig] where
5550  * capacity_orig is the cpu_capacity available at the highest frequency
5551  * (arch_scale_freq_capacity()).
5552  * The utilization of a CPU converges towards a sum equal to or less than the
5553  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5554  * the running time on this CPU scaled by capacity_curr.
5555  *
5556  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5557  * higher than capacity_orig because of unfortunate rounding in
5558  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5559  * the average stabilizes with the new running time. We need to check that the
5560  * utilization stays within the range of [0..capacity_orig] and cap it if
5561  * necessary. Without utilization capping, a group could be seen as overloaded
5562  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5563  * available capacity. We allow utilization to overshoot capacity_curr (but not
5564  * capacity_orig) as it useful for predicting the capacity required after task
5565  * migrations (scheduler-driven DVFS).
5566  */
5567 static int cpu_util(int cpu)
5568 {
5569 	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5570 	unsigned long capacity = capacity_orig_of(cpu);
5571 
5572 	return (util >= capacity) ? capacity : util;
5573 }
5574 
5575 static inline int task_util(struct task_struct *p)
5576 {
5577 	return p->se.avg.util_avg;
5578 }
5579 
5580 /*
5581  * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
5582  * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
5583  *
5584  * In that case WAKE_AFFINE doesn't make sense and we'll let
5585  * BALANCE_WAKE sort things out.
5586  */
5587 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
5588 {
5589 	long min_cap, max_cap;
5590 
5591 	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
5592 	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
5593 
5594 	/* Minimum capacity is close to max, no need to abort wake_affine */
5595 	if (max_cap - min_cap < max_cap >> 3)
5596 		return 0;
5597 
5598 	return min_cap * 1024 < task_util(p) * capacity_margin;
5599 }
5600 
5601 /*
5602  * select_task_rq_fair: Select target runqueue for the waking task in domains
5603  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5604  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
5605  *
5606  * Balances load by selecting the idlest cpu in the idlest group, or under
5607  * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
5608  *
5609  * Returns the target cpu number.
5610  *
5611  * preempt must be disabled.
5612  */
5613 static int
5614 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5615 {
5616 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5617 	int cpu = smp_processor_id();
5618 	int new_cpu = prev_cpu;
5619 	int want_affine = 0;
5620 	int sync = wake_flags & WF_SYNC;
5621 
5622 	if (sd_flag & SD_BALANCE_WAKE) {
5623 		record_wakee(p);
5624 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
5625 			      && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
5626 	}
5627 
5628 	rcu_read_lock();
5629 	for_each_domain(cpu, tmp) {
5630 		if (!(tmp->flags & SD_LOAD_BALANCE))
5631 			break;
5632 
5633 		/*
5634 		 * If both cpu and prev_cpu are part of this domain,
5635 		 * cpu is a valid SD_WAKE_AFFINE target.
5636 		 */
5637 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5638 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5639 			affine_sd = tmp;
5640 			break;
5641 		}
5642 
5643 		if (tmp->flags & sd_flag)
5644 			sd = tmp;
5645 		else if (!want_affine)
5646 			break;
5647 	}
5648 
5649 	if (affine_sd) {
5650 		sd = NULL; /* Prefer wake_affine over balance flags */
5651 		if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
5652 			new_cpu = cpu;
5653 	}
5654 
5655 	if (!sd) {
5656 		if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
5657 			new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
5658 
5659 	} else while (sd) {
5660 		struct sched_group *group;
5661 		int weight;
5662 
5663 		if (!(sd->flags & sd_flag)) {
5664 			sd = sd->child;
5665 			continue;
5666 		}
5667 
5668 		group = find_idlest_group(sd, p, cpu, sd_flag);
5669 		if (!group) {
5670 			sd = sd->child;
5671 			continue;
5672 		}
5673 
5674 		new_cpu = find_idlest_cpu(group, p, cpu);
5675 		if (new_cpu == -1 || new_cpu == cpu) {
5676 			/* Now try balancing at a lower domain level of cpu */
5677 			sd = sd->child;
5678 			continue;
5679 		}
5680 
5681 		/* Now try balancing at a lower domain level of new_cpu */
5682 		cpu = new_cpu;
5683 		weight = sd->span_weight;
5684 		sd = NULL;
5685 		for_each_domain(cpu, tmp) {
5686 			if (weight <= tmp->span_weight)
5687 				break;
5688 			if (tmp->flags & sd_flag)
5689 				sd = tmp;
5690 		}
5691 		/* while loop will break here if sd == NULL */
5692 	}
5693 	rcu_read_unlock();
5694 
5695 	return new_cpu;
5696 }
5697 
5698 /*
5699  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
5700  * cfs_rq_of(p) references at time of call are still valid and identify the
5701  * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
5702  */
5703 static void migrate_task_rq_fair(struct task_struct *p)
5704 {
5705 	/*
5706 	 * As blocked tasks retain absolute vruntime the migration needs to
5707 	 * deal with this by subtracting the old and adding the new
5708 	 * min_vruntime -- the latter is done by enqueue_entity() when placing
5709 	 * the task on the new runqueue.
5710 	 */
5711 	if (p->state == TASK_WAKING) {
5712 		struct sched_entity *se = &p->se;
5713 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5714 		u64 min_vruntime;
5715 
5716 #ifndef CONFIG_64BIT
5717 		u64 min_vruntime_copy;
5718 
5719 		do {
5720 			min_vruntime_copy = cfs_rq->min_vruntime_copy;
5721 			smp_rmb();
5722 			min_vruntime = cfs_rq->min_vruntime;
5723 		} while (min_vruntime != min_vruntime_copy);
5724 #else
5725 		min_vruntime = cfs_rq->min_vruntime;
5726 #endif
5727 
5728 		se->vruntime -= min_vruntime;
5729 	}
5730 
5731 	/*
5732 	 * We are supposed to update the task to "current" time, then its up to date
5733 	 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
5734 	 * what current time is, so simply throw away the out-of-date time. This
5735 	 * will result in the wakee task is less decayed, but giving the wakee more
5736 	 * load sounds not bad.
5737 	 */
5738 	remove_entity_load_avg(&p->se);
5739 
5740 	/* Tell new CPU we are migrated */
5741 	p->se.avg.last_update_time = 0;
5742 
5743 	/* We have migrated, no longer consider this task hot */
5744 	p->se.exec_start = 0;
5745 }
5746 
5747 static void task_dead_fair(struct task_struct *p)
5748 {
5749 	remove_entity_load_avg(&p->se);
5750 }
5751 #endif /* CONFIG_SMP */
5752 
5753 static unsigned long
5754 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
5755 {
5756 	unsigned long gran = sysctl_sched_wakeup_granularity;
5757 
5758 	/*
5759 	 * Since its curr running now, convert the gran from real-time
5760 	 * to virtual-time in his units.
5761 	 *
5762 	 * By using 'se' instead of 'curr' we penalize light tasks, so
5763 	 * they get preempted easier. That is, if 'se' < 'curr' then
5764 	 * the resulting gran will be larger, therefore penalizing the
5765 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
5766 	 * be smaller, again penalizing the lighter task.
5767 	 *
5768 	 * This is especially important for buddies when the leftmost
5769 	 * task is higher priority than the buddy.
5770 	 */
5771 	return calc_delta_fair(gran, se);
5772 }
5773 
5774 /*
5775  * Should 'se' preempt 'curr'.
5776  *
5777  *             |s1
5778  *        |s2
5779  *   |s3
5780  *         g
5781  *      |<--->|c
5782  *
5783  *  w(c, s1) = -1
5784  *  w(c, s2) =  0
5785  *  w(c, s3) =  1
5786  *
5787  */
5788 static int
5789 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5790 {
5791 	s64 gran, vdiff = curr->vruntime - se->vruntime;
5792 
5793 	if (vdiff <= 0)
5794 		return -1;
5795 
5796 	gran = wakeup_gran(curr, se);
5797 	if (vdiff > gran)
5798 		return 1;
5799 
5800 	return 0;
5801 }
5802 
5803 static void set_last_buddy(struct sched_entity *se)
5804 {
5805 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5806 		return;
5807 
5808 	for_each_sched_entity(se)
5809 		cfs_rq_of(se)->last = se;
5810 }
5811 
5812 static void set_next_buddy(struct sched_entity *se)
5813 {
5814 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5815 		return;
5816 
5817 	for_each_sched_entity(se)
5818 		cfs_rq_of(se)->next = se;
5819 }
5820 
5821 static void set_skip_buddy(struct sched_entity *se)
5822 {
5823 	for_each_sched_entity(se)
5824 		cfs_rq_of(se)->skip = se;
5825 }
5826 
5827 /*
5828  * Preempt the current task with a newly woken task if needed:
5829  */
5830 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
5831 {
5832 	struct task_struct *curr = rq->curr;
5833 	struct sched_entity *se = &curr->se, *pse = &p->se;
5834 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5835 	int scale = cfs_rq->nr_running >= sched_nr_latency;
5836 	int next_buddy_marked = 0;
5837 
5838 	if (unlikely(se == pse))
5839 		return;
5840 
5841 	/*
5842 	 * This is possible from callers such as attach_tasks(), in which we
5843 	 * unconditionally check_prempt_curr() after an enqueue (which may have
5844 	 * lead to a throttle).  This both saves work and prevents false
5845 	 * next-buddy nomination below.
5846 	 */
5847 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5848 		return;
5849 
5850 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
5851 		set_next_buddy(pse);
5852 		next_buddy_marked = 1;
5853 	}
5854 
5855 	/*
5856 	 * We can come here with TIF_NEED_RESCHED already set from new task
5857 	 * wake up path.
5858 	 *
5859 	 * Note: this also catches the edge-case of curr being in a throttled
5860 	 * group (e.g. via set_curr_task), since update_curr() (in the
5861 	 * enqueue of curr) will have resulted in resched being set.  This
5862 	 * prevents us from potentially nominating it as a false LAST_BUDDY
5863 	 * below.
5864 	 */
5865 	if (test_tsk_need_resched(curr))
5866 		return;
5867 
5868 	/* Idle tasks are by definition preempted by non-idle tasks. */
5869 	if (unlikely(curr->policy == SCHED_IDLE) &&
5870 	    likely(p->policy != SCHED_IDLE))
5871 		goto preempt;
5872 
5873 	/*
5874 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
5875 	 * is driven by the tick):
5876 	 */
5877 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
5878 		return;
5879 
5880 	find_matching_se(&se, &pse);
5881 	update_curr(cfs_rq_of(se));
5882 	BUG_ON(!pse);
5883 	if (wakeup_preempt_entity(se, pse) == 1) {
5884 		/*
5885 		 * Bias pick_next to pick the sched entity that is
5886 		 * triggering this preemption.
5887 		 */
5888 		if (!next_buddy_marked)
5889 			set_next_buddy(pse);
5890 		goto preempt;
5891 	}
5892 
5893 	return;
5894 
5895 preempt:
5896 	resched_curr(rq);
5897 	/*
5898 	 * Only set the backward buddy when the current task is still
5899 	 * on the rq. This can happen when a wakeup gets interleaved
5900 	 * with schedule on the ->pre_schedule() or idle_balance()
5901 	 * point, either of which can * drop the rq lock.
5902 	 *
5903 	 * Also, during early boot the idle thread is in the fair class,
5904 	 * for obvious reasons its a bad idea to schedule back to it.
5905 	 */
5906 	if (unlikely(!se->on_rq || curr == rq->idle))
5907 		return;
5908 
5909 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5910 		set_last_buddy(se);
5911 }
5912 
5913 static struct task_struct *
5914 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
5915 {
5916 	struct cfs_rq *cfs_rq = &rq->cfs;
5917 	struct sched_entity *se;
5918 	struct task_struct *p;
5919 	int new_tasks;
5920 
5921 again:
5922 #ifdef CONFIG_FAIR_GROUP_SCHED
5923 	if (!cfs_rq->nr_running)
5924 		goto idle;
5925 
5926 	if (prev->sched_class != &fair_sched_class)
5927 		goto simple;
5928 
5929 	/*
5930 	 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5931 	 * likely that a next task is from the same cgroup as the current.
5932 	 *
5933 	 * Therefore attempt to avoid putting and setting the entire cgroup
5934 	 * hierarchy, only change the part that actually changes.
5935 	 */
5936 
5937 	do {
5938 		struct sched_entity *curr = cfs_rq->curr;
5939 
5940 		/*
5941 		 * Since we got here without doing put_prev_entity() we also
5942 		 * have to consider cfs_rq->curr. If it is still a runnable
5943 		 * entity, update_curr() will update its vruntime, otherwise
5944 		 * forget we've ever seen it.
5945 		 */
5946 		if (curr) {
5947 			if (curr->on_rq)
5948 				update_curr(cfs_rq);
5949 			else
5950 				curr = NULL;
5951 
5952 			/*
5953 			 * This call to check_cfs_rq_runtime() will do the
5954 			 * throttle and dequeue its entity in the parent(s).
5955 			 * Therefore the 'simple' nr_running test will indeed
5956 			 * be correct.
5957 			 */
5958 			if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5959 				goto simple;
5960 		}
5961 
5962 		se = pick_next_entity(cfs_rq, curr);
5963 		cfs_rq = group_cfs_rq(se);
5964 	} while (cfs_rq);
5965 
5966 	p = task_of(se);
5967 
5968 	/*
5969 	 * Since we haven't yet done put_prev_entity and if the selected task
5970 	 * is a different task than we started out with, try and touch the
5971 	 * least amount of cfs_rqs.
5972 	 */
5973 	if (prev != p) {
5974 		struct sched_entity *pse = &prev->se;
5975 
5976 		while (!(cfs_rq = is_same_group(se, pse))) {
5977 			int se_depth = se->depth;
5978 			int pse_depth = pse->depth;
5979 
5980 			if (se_depth <= pse_depth) {
5981 				put_prev_entity(cfs_rq_of(pse), pse);
5982 				pse = parent_entity(pse);
5983 			}
5984 			if (se_depth >= pse_depth) {
5985 				set_next_entity(cfs_rq_of(se), se);
5986 				se = parent_entity(se);
5987 			}
5988 		}
5989 
5990 		put_prev_entity(cfs_rq, pse);
5991 		set_next_entity(cfs_rq, se);
5992 	}
5993 
5994 	if (hrtick_enabled(rq))
5995 		hrtick_start_fair(rq, p);
5996 
5997 	return p;
5998 simple:
5999 	cfs_rq = &rq->cfs;
6000 #endif
6001 
6002 	if (!cfs_rq->nr_running)
6003 		goto idle;
6004 
6005 	put_prev_task(rq, prev);
6006 
6007 	do {
6008 		se = pick_next_entity(cfs_rq, NULL);
6009 		set_next_entity(cfs_rq, se);
6010 		cfs_rq = group_cfs_rq(se);
6011 	} while (cfs_rq);
6012 
6013 	p = task_of(se);
6014 
6015 	if (hrtick_enabled(rq))
6016 		hrtick_start_fair(rq, p);
6017 
6018 	return p;
6019 
6020 idle:
6021 	/*
6022 	 * This is OK, because current is on_cpu, which avoids it being picked
6023 	 * for load-balance and preemption/IRQs are still disabled avoiding
6024 	 * further scheduler activity on it and we're being very careful to
6025 	 * re-start the picking loop.
6026 	 */
6027 	lockdep_unpin_lock(&rq->lock, cookie);
6028 	new_tasks = idle_balance(rq);
6029 	lockdep_repin_lock(&rq->lock, cookie);
6030 	/*
6031 	 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6032 	 * possible for any higher priority task to appear. In that case we
6033 	 * must re-start the pick_next_entity() loop.
6034 	 */
6035 	if (new_tasks < 0)
6036 		return RETRY_TASK;
6037 
6038 	if (new_tasks > 0)
6039 		goto again;
6040 
6041 	return NULL;
6042 }
6043 
6044 /*
6045  * Account for a descheduled task:
6046  */
6047 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6048 {
6049 	struct sched_entity *se = &prev->se;
6050 	struct cfs_rq *cfs_rq;
6051 
6052 	for_each_sched_entity(se) {
6053 		cfs_rq = cfs_rq_of(se);
6054 		put_prev_entity(cfs_rq, se);
6055 	}
6056 }
6057 
6058 /*
6059  * sched_yield() is very simple
6060  *
6061  * The magic of dealing with the ->skip buddy is in pick_next_entity.
6062  */
6063 static void yield_task_fair(struct rq *rq)
6064 {
6065 	struct task_struct *curr = rq->curr;
6066 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6067 	struct sched_entity *se = &curr->se;
6068 
6069 	/*
6070 	 * Are we the only task in the tree?
6071 	 */
6072 	if (unlikely(rq->nr_running == 1))
6073 		return;
6074 
6075 	clear_buddies(cfs_rq, se);
6076 
6077 	if (curr->policy != SCHED_BATCH) {
6078 		update_rq_clock(rq);
6079 		/*
6080 		 * Update run-time statistics of the 'current'.
6081 		 */
6082 		update_curr(cfs_rq);
6083 		/*
6084 		 * Tell update_rq_clock() that we've just updated,
6085 		 * so we don't do microscopic update in schedule()
6086 		 * and double the fastpath cost.
6087 		 */
6088 		rq_clock_skip_update(rq, true);
6089 	}
6090 
6091 	set_skip_buddy(se);
6092 }
6093 
6094 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6095 {
6096 	struct sched_entity *se = &p->se;
6097 
6098 	/* throttled hierarchies are not runnable */
6099 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
6100 		return false;
6101 
6102 	/* Tell the scheduler that we'd really like pse to run next. */
6103 	set_next_buddy(se);
6104 
6105 	yield_task_fair(rq);
6106 
6107 	return true;
6108 }
6109 
6110 #ifdef CONFIG_SMP
6111 /**************************************************
6112  * Fair scheduling class load-balancing methods.
6113  *
6114  * BASICS
6115  *
6116  * The purpose of load-balancing is to achieve the same basic fairness the
6117  * per-cpu scheduler provides, namely provide a proportional amount of compute
6118  * time to each task. This is expressed in the following equation:
6119  *
6120  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
6121  *
6122  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
6123  * W_i,0 is defined as:
6124  *
6125  *   W_i,0 = \Sum_j w_i,j                                             (2)
6126  *
6127  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
6128  * is derived from the nice value as per sched_prio_to_weight[].
6129  *
6130  * The weight average is an exponential decay average of the instantaneous
6131  * weight:
6132  *
6133  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
6134  *
6135  * C_i is the compute capacity of cpu i, typically it is the
6136  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6137  * can also include other factors [XXX].
6138  *
6139  * To achieve this balance we define a measure of imbalance which follows
6140  * directly from (1):
6141  *
6142  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
6143  *
6144  * We them move tasks around to minimize the imbalance. In the continuous
6145  * function space it is obvious this converges, in the discrete case we get
6146  * a few fun cases generally called infeasible weight scenarios.
6147  *
6148  * [XXX expand on:
6149  *     - infeasible weights;
6150  *     - local vs global optima in the discrete case. ]
6151  *
6152  *
6153  * SCHED DOMAINS
6154  *
6155  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
6156  * for all i,j solution, we create a tree of cpus that follows the hardware
6157  * topology where each level pairs two lower groups (or better). This results
6158  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
6159  * tree to only the first of the previous level and we decrease the frequency
6160  * of load-balance at each level inv. proportional to the number of cpus in
6161  * the groups.
6162  *
6163  * This yields:
6164  *
6165  *     log_2 n     1     n
6166  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
6167  *     i = 0      2^i   2^i
6168  *                               `- size of each group
6169  *         |         |     `- number of cpus doing load-balance
6170  *         |         `- freq
6171  *         `- sum over all levels
6172  *
6173  * Coupled with a limit on how many tasks we can migrate every balance pass,
6174  * this makes (5) the runtime complexity of the balancer.
6175  *
6176  * An important property here is that each CPU is still (indirectly) connected
6177  * to every other cpu in at most O(log n) steps:
6178  *
6179  * The adjacency matrix of the resulting graph is given by:
6180  *
6181  *             log_2 n
6182  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
6183  *             k = 0
6184  *
6185  * And you'll find that:
6186  *
6187  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
6188  *
6189  * Showing there's indeed a path between every cpu in at most O(log n) steps.
6190  * The task movement gives a factor of O(m), giving a convergence complexity
6191  * of:
6192  *
6193  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
6194  *
6195  *
6196  * WORK CONSERVING
6197  *
6198  * In order to avoid CPUs going idle while there's still work to do, new idle
6199  * balancing is more aggressive and has the newly idle cpu iterate up the domain
6200  * tree itself instead of relying on other CPUs to bring it work.
6201  *
6202  * This adds some complexity to both (5) and (8) but it reduces the total idle
6203  * time.
6204  *
6205  * [XXX more?]
6206  *
6207  *
6208  * CGROUPS
6209  *
6210  * Cgroups make a horror show out of (2), instead of a simple sum we get:
6211  *
6212  *                                s_k,i
6213  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
6214  *                                 S_k
6215  *
6216  * Where
6217  *
6218  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
6219  *
6220  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
6221  *
6222  * The big problem is S_k, its a global sum needed to compute a local (W_i)
6223  * property.
6224  *
6225  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6226  *      rewrite all of this once again.]
6227  */
6228 
6229 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6230 
6231 enum fbq_type { regular, remote, all };
6232 
6233 #define LBF_ALL_PINNED	0x01
6234 #define LBF_NEED_BREAK	0x02
6235 #define LBF_DST_PINNED  0x04
6236 #define LBF_SOME_PINNED	0x08
6237 
6238 struct lb_env {
6239 	struct sched_domain	*sd;
6240 
6241 	struct rq		*src_rq;
6242 	int			src_cpu;
6243 
6244 	int			dst_cpu;
6245 	struct rq		*dst_rq;
6246 
6247 	struct cpumask		*dst_grpmask;
6248 	int			new_dst_cpu;
6249 	enum cpu_idle_type	idle;
6250 	long			imbalance;
6251 	/* The set of CPUs under consideration for load-balancing */
6252 	struct cpumask		*cpus;
6253 
6254 	unsigned int		flags;
6255 
6256 	unsigned int		loop;
6257 	unsigned int		loop_break;
6258 	unsigned int		loop_max;
6259 
6260 	enum fbq_type		fbq_type;
6261 	struct list_head	tasks;
6262 };
6263 
6264 /*
6265  * Is this task likely cache-hot:
6266  */
6267 static int task_hot(struct task_struct *p, struct lb_env *env)
6268 {
6269 	s64 delta;
6270 
6271 	lockdep_assert_held(&env->src_rq->lock);
6272 
6273 	if (p->sched_class != &fair_sched_class)
6274 		return 0;
6275 
6276 	if (unlikely(p->policy == SCHED_IDLE))
6277 		return 0;
6278 
6279 	/*
6280 	 * Buddy candidates are cache hot:
6281 	 */
6282 	if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
6283 			(&p->se == cfs_rq_of(&p->se)->next ||
6284 			 &p->se == cfs_rq_of(&p->se)->last))
6285 		return 1;
6286 
6287 	if (sysctl_sched_migration_cost == -1)
6288 		return 1;
6289 	if (sysctl_sched_migration_cost == 0)
6290 		return 0;
6291 
6292 	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
6293 
6294 	return delta < (s64)sysctl_sched_migration_cost;
6295 }
6296 
6297 #ifdef CONFIG_NUMA_BALANCING
6298 /*
6299  * Returns 1, if task migration degrades locality
6300  * Returns 0, if task migration improves locality i.e migration preferred.
6301  * Returns -1, if task migration is not affected by locality.
6302  */
6303 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
6304 {
6305 	struct numa_group *numa_group = rcu_dereference(p->numa_group);
6306 	unsigned long src_faults, dst_faults;
6307 	int src_nid, dst_nid;
6308 
6309 	if (!static_branch_likely(&sched_numa_balancing))
6310 		return -1;
6311 
6312 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
6313 		return -1;
6314 
6315 	src_nid = cpu_to_node(env->src_cpu);
6316 	dst_nid = cpu_to_node(env->dst_cpu);
6317 
6318 	if (src_nid == dst_nid)
6319 		return -1;
6320 
6321 	/* Migrating away from the preferred node is always bad. */
6322 	if (src_nid == p->numa_preferred_nid) {
6323 		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6324 			return 1;
6325 		else
6326 			return -1;
6327 	}
6328 
6329 	/* Encourage migration to the preferred node. */
6330 	if (dst_nid == p->numa_preferred_nid)
6331 		return 0;
6332 
6333 	if (numa_group) {
6334 		src_faults = group_faults(p, src_nid);
6335 		dst_faults = group_faults(p, dst_nid);
6336 	} else {
6337 		src_faults = task_faults(p, src_nid);
6338 		dst_faults = task_faults(p, dst_nid);
6339 	}
6340 
6341 	return dst_faults < src_faults;
6342 }
6343 
6344 #else
6345 static inline int migrate_degrades_locality(struct task_struct *p,
6346 					     struct lb_env *env)
6347 {
6348 	return -1;
6349 }
6350 #endif
6351 
6352 /*
6353  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6354  */
6355 static
6356 int can_migrate_task(struct task_struct *p, struct lb_env *env)
6357 {
6358 	int tsk_cache_hot;
6359 
6360 	lockdep_assert_held(&env->src_rq->lock);
6361 
6362 	/*
6363 	 * We do not migrate tasks that are:
6364 	 * 1) throttled_lb_pair, or
6365 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
6366 	 * 3) running (obviously), or
6367 	 * 4) are cache-hot on their current CPU.
6368 	 */
6369 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6370 		return 0;
6371 
6372 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
6373 		int cpu;
6374 
6375 		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
6376 
6377 		env->flags |= LBF_SOME_PINNED;
6378 
6379 		/*
6380 		 * Remember if this task can be migrated to any other cpu in
6381 		 * our sched_group. We may want to revisit it if we couldn't
6382 		 * meet load balance goals by pulling other tasks on src_cpu.
6383 		 *
6384 		 * Also avoid computing new_dst_cpu if we have already computed
6385 		 * one in current iteration.
6386 		 */
6387 		if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
6388 			return 0;
6389 
6390 		/* Prevent to re-select dst_cpu via env's cpus */
6391 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6392 			if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6393 				env->flags |= LBF_DST_PINNED;
6394 				env->new_dst_cpu = cpu;
6395 				break;
6396 			}
6397 		}
6398 
6399 		return 0;
6400 	}
6401 
6402 	/* Record that we found atleast one task that could run on dst_cpu */
6403 	env->flags &= ~LBF_ALL_PINNED;
6404 
6405 	if (task_running(env->src_rq, p)) {
6406 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
6407 		return 0;
6408 	}
6409 
6410 	/*
6411 	 * Aggressive migration if:
6412 	 * 1) destination numa is preferred
6413 	 * 2) task is cache cold, or
6414 	 * 3) too many balance attempts have failed.
6415 	 */
6416 	tsk_cache_hot = migrate_degrades_locality(p, env);
6417 	if (tsk_cache_hot == -1)
6418 		tsk_cache_hot = task_hot(p, env);
6419 
6420 	if (tsk_cache_hot <= 0 ||
6421 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
6422 		if (tsk_cache_hot == 1) {
6423 			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
6424 			schedstat_inc(p->se.statistics.nr_forced_migrations);
6425 		}
6426 		return 1;
6427 	}
6428 
6429 	schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
6430 	return 0;
6431 }
6432 
6433 /*
6434  * detach_task() -- detach the task for the migration specified in env
6435  */
6436 static void detach_task(struct task_struct *p, struct lb_env *env)
6437 {
6438 	lockdep_assert_held(&env->src_rq->lock);
6439 
6440 	p->on_rq = TASK_ON_RQ_MIGRATING;
6441 	deactivate_task(env->src_rq, p, 0);
6442 	set_task_cpu(p, env->dst_cpu);
6443 }
6444 
6445 /*
6446  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
6447  * part of active balancing operations within "domain".
6448  *
6449  * Returns a task if successful and NULL otherwise.
6450  */
6451 static struct task_struct *detach_one_task(struct lb_env *env)
6452 {
6453 	struct task_struct *p, *n;
6454 
6455 	lockdep_assert_held(&env->src_rq->lock);
6456 
6457 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
6458 		if (!can_migrate_task(p, env))
6459 			continue;
6460 
6461 		detach_task(p, env);
6462 
6463 		/*
6464 		 * Right now, this is only the second place where
6465 		 * lb_gained[env->idle] is updated (other is detach_tasks)
6466 		 * so we can safely collect stats here rather than
6467 		 * inside detach_tasks().
6468 		 */
6469 		schedstat_inc(env->sd->lb_gained[env->idle]);
6470 		return p;
6471 	}
6472 	return NULL;
6473 }
6474 
6475 static const unsigned int sched_nr_migrate_break = 32;
6476 
6477 /*
6478  * detach_tasks() -- tries to detach up to imbalance weighted load from
6479  * busiest_rq, as part of a balancing operation within domain "sd".
6480  *
6481  * Returns number of detached tasks if successful and 0 otherwise.
6482  */
6483 static int detach_tasks(struct lb_env *env)
6484 {
6485 	struct list_head *tasks = &env->src_rq->cfs_tasks;
6486 	struct task_struct *p;
6487 	unsigned long load;
6488 	int detached = 0;
6489 
6490 	lockdep_assert_held(&env->src_rq->lock);
6491 
6492 	if (env->imbalance <= 0)
6493 		return 0;
6494 
6495 	while (!list_empty(tasks)) {
6496 		/*
6497 		 * We don't want to steal all, otherwise we may be treated likewise,
6498 		 * which could at worst lead to a livelock crash.
6499 		 */
6500 		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6501 			break;
6502 
6503 		p = list_first_entry(tasks, struct task_struct, se.group_node);
6504 
6505 		env->loop++;
6506 		/* We've more or less seen every task there is, call it quits */
6507 		if (env->loop > env->loop_max)
6508 			break;
6509 
6510 		/* take a breather every nr_migrate tasks */
6511 		if (env->loop > env->loop_break) {
6512 			env->loop_break += sched_nr_migrate_break;
6513 			env->flags |= LBF_NEED_BREAK;
6514 			break;
6515 		}
6516 
6517 		if (!can_migrate_task(p, env))
6518 			goto next;
6519 
6520 		load = task_h_load(p);
6521 
6522 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6523 			goto next;
6524 
6525 		if ((load / 2) > env->imbalance)
6526 			goto next;
6527 
6528 		detach_task(p, env);
6529 		list_add(&p->se.group_node, &env->tasks);
6530 
6531 		detached++;
6532 		env->imbalance -= load;
6533 
6534 #ifdef CONFIG_PREEMPT
6535 		/*
6536 		 * NEWIDLE balancing is a source of latency, so preemptible
6537 		 * kernels will stop after the first task is detached to minimize
6538 		 * the critical section.
6539 		 */
6540 		if (env->idle == CPU_NEWLY_IDLE)
6541 			break;
6542 #endif
6543 
6544 		/*
6545 		 * We only want to steal up to the prescribed amount of
6546 		 * weighted load.
6547 		 */
6548 		if (env->imbalance <= 0)
6549 			break;
6550 
6551 		continue;
6552 next:
6553 		list_move_tail(&p->se.group_node, tasks);
6554 	}
6555 
6556 	/*
6557 	 * Right now, this is one of only two places we collect this stat
6558 	 * so we can safely collect detach_one_task() stats here rather
6559 	 * than inside detach_one_task().
6560 	 */
6561 	schedstat_add(env->sd->lb_gained[env->idle], detached);
6562 
6563 	return detached;
6564 }
6565 
6566 /*
6567  * attach_task() -- attach the task detached by detach_task() to its new rq.
6568  */
6569 static void attach_task(struct rq *rq, struct task_struct *p)
6570 {
6571 	lockdep_assert_held(&rq->lock);
6572 
6573 	BUG_ON(task_rq(p) != rq);
6574 	activate_task(rq, p, 0);
6575 	p->on_rq = TASK_ON_RQ_QUEUED;
6576 	check_preempt_curr(rq, p, 0);
6577 }
6578 
6579 /*
6580  * attach_one_task() -- attaches the task returned from detach_one_task() to
6581  * its new rq.
6582  */
6583 static void attach_one_task(struct rq *rq, struct task_struct *p)
6584 {
6585 	raw_spin_lock(&rq->lock);
6586 	attach_task(rq, p);
6587 	raw_spin_unlock(&rq->lock);
6588 }
6589 
6590 /*
6591  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6592  * new rq.
6593  */
6594 static void attach_tasks(struct lb_env *env)
6595 {
6596 	struct list_head *tasks = &env->tasks;
6597 	struct task_struct *p;
6598 
6599 	raw_spin_lock(&env->dst_rq->lock);
6600 
6601 	while (!list_empty(tasks)) {
6602 		p = list_first_entry(tasks, struct task_struct, se.group_node);
6603 		list_del_init(&p->se.group_node);
6604 
6605 		attach_task(env->dst_rq, p);
6606 	}
6607 
6608 	raw_spin_unlock(&env->dst_rq->lock);
6609 }
6610 
6611 #ifdef CONFIG_FAIR_GROUP_SCHED
6612 static void update_blocked_averages(int cpu)
6613 {
6614 	struct rq *rq = cpu_rq(cpu);
6615 	struct cfs_rq *cfs_rq;
6616 	unsigned long flags;
6617 
6618 	raw_spin_lock_irqsave(&rq->lock, flags);
6619 	update_rq_clock(rq);
6620 
6621 	/*
6622 	 * Iterates the task_group tree in a bottom up fashion, see
6623 	 * list_add_leaf_cfs_rq() for details.
6624 	 */
6625 	for_each_leaf_cfs_rq(rq, cfs_rq) {
6626 		/* throttled entities do not contribute to load */
6627 		if (throttled_hierarchy(cfs_rq))
6628 			continue;
6629 
6630 		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
6631 			update_tg_load_avg(cfs_rq, 0);
6632 	}
6633 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6634 }
6635 
6636 /*
6637  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
6638  * This needs to be done in a top-down fashion because the load of a child
6639  * group is a fraction of its parents load.
6640  */
6641 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
6642 {
6643 	struct rq *rq = rq_of(cfs_rq);
6644 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
6645 	unsigned long now = jiffies;
6646 	unsigned long load;
6647 
6648 	if (cfs_rq->last_h_load_update == now)
6649 		return;
6650 
6651 	cfs_rq->h_load_next = NULL;
6652 	for_each_sched_entity(se) {
6653 		cfs_rq = cfs_rq_of(se);
6654 		cfs_rq->h_load_next = se;
6655 		if (cfs_rq->last_h_load_update == now)
6656 			break;
6657 	}
6658 
6659 	if (!se) {
6660 		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
6661 		cfs_rq->last_h_load_update = now;
6662 	}
6663 
6664 	while ((se = cfs_rq->h_load_next) != NULL) {
6665 		load = cfs_rq->h_load;
6666 		load = div64_ul(load * se->avg.load_avg,
6667 			cfs_rq_load_avg(cfs_rq) + 1);
6668 		cfs_rq = group_cfs_rq(se);
6669 		cfs_rq->h_load = load;
6670 		cfs_rq->last_h_load_update = now;
6671 	}
6672 }
6673 
6674 static unsigned long task_h_load(struct task_struct *p)
6675 {
6676 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
6677 
6678 	update_cfs_rq_h_load(cfs_rq);
6679 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
6680 			cfs_rq_load_avg(cfs_rq) + 1);
6681 }
6682 #else
6683 static inline void update_blocked_averages(int cpu)
6684 {
6685 	struct rq *rq = cpu_rq(cpu);
6686 	struct cfs_rq *cfs_rq = &rq->cfs;
6687 	unsigned long flags;
6688 
6689 	raw_spin_lock_irqsave(&rq->lock, flags);
6690 	update_rq_clock(rq);
6691 	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
6692 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6693 }
6694 
6695 static unsigned long task_h_load(struct task_struct *p)
6696 {
6697 	return p->se.avg.load_avg;
6698 }
6699 #endif
6700 
6701 /********** Helpers for find_busiest_group ************************/
6702 
6703 enum group_type {
6704 	group_other = 0,
6705 	group_imbalanced,
6706 	group_overloaded,
6707 };
6708 
6709 /*
6710  * sg_lb_stats - stats of a sched_group required for load_balancing
6711  */
6712 struct sg_lb_stats {
6713 	unsigned long avg_load; /*Avg load across the CPUs of the group */
6714 	unsigned long group_load; /* Total load over the CPUs of the group */
6715 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
6716 	unsigned long load_per_task;
6717 	unsigned long group_capacity;
6718 	unsigned long group_util; /* Total utilization of the group */
6719 	unsigned int sum_nr_running; /* Nr tasks running in the group */
6720 	unsigned int idle_cpus;
6721 	unsigned int group_weight;
6722 	enum group_type group_type;
6723 	int group_no_capacity;
6724 #ifdef CONFIG_NUMA_BALANCING
6725 	unsigned int nr_numa_running;
6726 	unsigned int nr_preferred_running;
6727 #endif
6728 };
6729 
6730 /*
6731  * sd_lb_stats - Structure to store the statistics of a sched_domain
6732  *		 during load balancing.
6733  */
6734 struct sd_lb_stats {
6735 	struct sched_group *busiest;	/* Busiest group in this sd */
6736 	struct sched_group *local;	/* Local group in this sd */
6737 	unsigned long total_load;	/* Total load of all groups in sd */
6738 	unsigned long total_capacity;	/* Total capacity of all groups in sd */
6739 	unsigned long avg_load;	/* Average load across all groups in sd */
6740 
6741 	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
6742 	struct sg_lb_stats local_stat;	/* Statistics of the local group */
6743 };
6744 
6745 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6746 {
6747 	/*
6748 	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
6749 	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
6750 	 * We must however clear busiest_stat::avg_load because
6751 	 * update_sd_pick_busiest() reads this before assignment.
6752 	 */
6753 	*sds = (struct sd_lb_stats){
6754 		.busiest = NULL,
6755 		.local = NULL,
6756 		.total_load = 0UL,
6757 		.total_capacity = 0UL,
6758 		.busiest_stat = {
6759 			.avg_load = 0UL,
6760 			.sum_nr_running = 0,
6761 			.group_type = group_other,
6762 		},
6763 	};
6764 }
6765 
6766 /**
6767  * get_sd_load_idx - Obtain the load index for a given sched domain.
6768  * @sd: The sched_domain whose load_idx is to be obtained.
6769  * @idle: The idle status of the CPU for whose sd load_idx is obtained.
6770  *
6771  * Return: The load index.
6772  */
6773 static inline int get_sd_load_idx(struct sched_domain *sd,
6774 					enum cpu_idle_type idle)
6775 {
6776 	int load_idx;
6777 
6778 	switch (idle) {
6779 	case CPU_NOT_IDLE:
6780 		load_idx = sd->busy_idx;
6781 		break;
6782 
6783 	case CPU_NEWLY_IDLE:
6784 		load_idx = sd->newidle_idx;
6785 		break;
6786 	default:
6787 		load_idx = sd->idle_idx;
6788 		break;
6789 	}
6790 
6791 	return load_idx;
6792 }
6793 
6794 static unsigned long scale_rt_capacity(int cpu)
6795 {
6796 	struct rq *rq = cpu_rq(cpu);
6797 	u64 total, used, age_stamp, avg;
6798 	s64 delta;
6799 
6800 	/*
6801 	 * Since we're reading these variables without serialization make sure
6802 	 * we read them once before doing sanity checks on them.
6803 	 */
6804 	age_stamp = READ_ONCE(rq->age_stamp);
6805 	avg = READ_ONCE(rq->rt_avg);
6806 	delta = __rq_clock_broken(rq) - age_stamp;
6807 
6808 	if (unlikely(delta < 0))
6809 		delta = 0;
6810 
6811 	total = sched_avg_period() + delta;
6812 
6813 	used = div_u64(avg, total);
6814 
6815 	if (likely(used < SCHED_CAPACITY_SCALE))
6816 		return SCHED_CAPACITY_SCALE - used;
6817 
6818 	return 1;
6819 }
6820 
6821 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6822 {
6823 	unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
6824 	struct sched_group *sdg = sd->groups;
6825 
6826 	cpu_rq(cpu)->cpu_capacity_orig = capacity;
6827 
6828 	capacity *= scale_rt_capacity(cpu);
6829 	capacity >>= SCHED_CAPACITY_SHIFT;
6830 
6831 	if (!capacity)
6832 		capacity = 1;
6833 
6834 	cpu_rq(cpu)->cpu_capacity = capacity;
6835 	sdg->sgc->capacity = capacity;
6836 }
6837 
6838 void update_group_capacity(struct sched_domain *sd, int cpu)
6839 {
6840 	struct sched_domain *child = sd->child;
6841 	struct sched_group *group, *sdg = sd->groups;
6842 	unsigned long capacity;
6843 	unsigned long interval;
6844 
6845 	interval = msecs_to_jiffies(sd->balance_interval);
6846 	interval = clamp(interval, 1UL, max_load_balance_interval);
6847 	sdg->sgc->next_update = jiffies + interval;
6848 
6849 	if (!child) {
6850 		update_cpu_capacity(sd, cpu);
6851 		return;
6852 	}
6853 
6854 	capacity = 0;
6855 
6856 	if (child->flags & SD_OVERLAP) {
6857 		/*
6858 		 * SD_OVERLAP domains cannot assume that child groups
6859 		 * span the current group.
6860 		 */
6861 
6862 		for_each_cpu(cpu, sched_group_cpus(sdg)) {
6863 			struct sched_group_capacity *sgc;
6864 			struct rq *rq = cpu_rq(cpu);
6865 
6866 			/*
6867 			 * build_sched_domains() -> init_sched_groups_capacity()
6868 			 * gets here before we've attached the domains to the
6869 			 * runqueues.
6870 			 *
6871 			 * Use capacity_of(), which is set irrespective of domains
6872 			 * in update_cpu_capacity().
6873 			 *
6874 			 * This avoids capacity from being 0 and
6875 			 * causing divide-by-zero issues on boot.
6876 			 */
6877 			if (unlikely(!rq->sd)) {
6878 				capacity += capacity_of(cpu);
6879 				continue;
6880 			}
6881 
6882 			sgc = rq->sd->groups->sgc;
6883 			capacity += sgc->capacity;
6884 		}
6885 	} else  {
6886 		/*
6887 		 * !SD_OVERLAP domains can assume that child groups
6888 		 * span the current group.
6889 		 */
6890 
6891 		group = child->groups;
6892 		do {
6893 			capacity += group->sgc->capacity;
6894 			group = group->next;
6895 		} while (group != child->groups);
6896 	}
6897 
6898 	sdg->sgc->capacity = capacity;
6899 }
6900 
6901 /*
6902  * Check whether the capacity of the rq has been noticeably reduced by side
6903  * activity. The imbalance_pct is used for the threshold.
6904  * Return true is the capacity is reduced
6905  */
6906 static inline int
6907 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
6908 {
6909 	return ((rq->cpu_capacity * sd->imbalance_pct) <
6910 				(rq->cpu_capacity_orig * 100));
6911 }
6912 
6913 /*
6914  * Group imbalance indicates (and tries to solve) the problem where balancing
6915  * groups is inadequate due to tsk_cpus_allowed() constraints.
6916  *
6917  * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6918  * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6919  * Something like:
6920  *
6921  * 	{ 0 1 2 3 } { 4 5 6 7 }
6922  * 	        *     * * *
6923  *
6924  * If we were to balance group-wise we'd place two tasks in the first group and
6925  * two tasks in the second group. Clearly this is undesired as it will overload
6926  * cpu 3 and leave one of the cpus in the second group unused.
6927  *
6928  * The current solution to this issue is detecting the skew in the first group
6929  * by noticing the lower domain failed to reach balance and had difficulty
6930  * moving tasks due to affinity constraints.
6931  *
6932  * When this is so detected; this group becomes a candidate for busiest; see
6933  * update_sd_pick_busiest(). And calculate_imbalance() and
6934  * find_busiest_group() avoid some of the usual balance conditions to allow it
6935  * to create an effective group imbalance.
6936  *
6937  * This is a somewhat tricky proposition since the next run might not find the
6938  * group imbalance and decide the groups need to be balanced again. A most
6939  * subtle and fragile situation.
6940  */
6941 
6942 static inline int sg_imbalanced(struct sched_group *group)
6943 {
6944 	return group->sgc->imbalance;
6945 }
6946 
6947 /*
6948  * group_has_capacity returns true if the group has spare capacity that could
6949  * be used by some tasks.
6950  * We consider that a group has spare capacity if the  * number of task is
6951  * smaller than the number of CPUs or if the utilization is lower than the
6952  * available capacity for CFS tasks.
6953  * For the latter, we use a threshold to stabilize the state, to take into
6954  * account the variance of the tasks' load and to return true if the available
6955  * capacity in meaningful for the load balancer.
6956  * As an example, an available capacity of 1% can appear but it doesn't make
6957  * any benefit for the load balance.
6958  */
6959 static inline bool
6960 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6961 {
6962 	if (sgs->sum_nr_running < sgs->group_weight)
6963 		return true;
6964 
6965 	if ((sgs->group_capacity * 100) >
6966 			(sgs->group_util * env->sd->imbalance_pct))
6967 		return true;
6968 
6969 	return false;
6970 }
6971 
6972 /*
6973  *  group_is_overloaded returns true if the group has more tasks than it can
6974  *  handle.
6975  *  group_is_overloaded is not equals to !group_has_capacity because a group
6976  *  with the exact right number of tasks, has no more spare capacity but is not
6977  *  overloaded so both group_has_capacity and group_is_overloaded return
6978  *  false.
6979  */
6980 static inline bool
6981 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6982 {
6983 	if (sgs->sum_nr_running <= sgs->group_weight)
6984 		return false;
6985 
6986 	if ((sgs->group_capacity * 100) <
6987 			(sgs->group_util * env->sd->imbalance_pct))
6988 		return true;
6989 
6990 	return false;
6991 }
6992 
6993 static inline enum
6994 group_type group_classify(struct sched_group *group,
6995 			  struct sg_lb_stats *sgs)
6996 {
6997 	if (sgs->group_no_capacity)
6998 		return group_overloaded;
6999 
7000 	if (sg_imbalanced(group))
7001 		return group_imbalanced;
7002 
7003 	return group_other;
7004 }
7005 
7006 /**
7007  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
7008  * @env: The load balancing environment.
7009  * @group: sched_group whose statistics are to be updated.
7010  * @load_idx: Load index of sched_domain of this_cpu for load calc.
7011  * @local_group: Does group contain this_cpu.
7012  * @sgs: variable to hold the statistics for this group.
7013  * @overload: Indicate more than one runnable task for any CPU.
7014  */
7015 static inline void update_sg_lb_stats(struct lb_env *env,
7016 			struct sched_group *group, int load_idx,
7017 			int local_group, struct sg_lb_stats *sgs,
7018 			bool *overload)
7019 {
7020 	unsigned long load;
7021 	int i, nr_running;
7022 
7023 	memset(sgs, 0, sizeof(*sgs));
7024 
7025 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7026 		struct rq *rq = cpu_rq(i);
7027 
7028 		/* Bias balancing toward cpus of our domain */
7029 		if (local_group)
7030 			load = target_load(i, load_idx);
7031 		else
7032 			load = source_load(i, load_idx);
7033 
7034 		sgs->group_load += load;
7035 		sgs->group_util += cpu_util(i);
7036 		sgs->sum_nr_running += rq->cfs.h_nr_running;
7037 
7038 		nr_running = rq->nr_running;
7039 		if (nr_running > 1)
7040 			*overload = true;
7041 
7042 #ifdef CONFIG_NUMA_BALANCING
7043 		sgs->nr_numa_running += rq->nr_numa_running;
7044 		sgs->nr_preferred_running += rq->nr_preferred_running;
7045 #endif
7046 		sgs->sum_weighted_load += weighted_cpuload(i);
7047 		/*
7048 		 * No need to call idle_cpu() if nr_running is not 0
7049 		 */
7050 		if (!nr_running && idle_cpu(i))
7051 			sgs->idle_cpus++;
7052 	}
7053 
7054 	/* Adjust by relative CPU capacity of the group */
7055 	sgs->group_capacity = group->sgc->capacity;
7056 	sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
7057 
7058 	if (sgs->sum_nr_running)
7059 		sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
7060 
7061 	sgs->group_weight = group->group_weight;
7062 
7063 	sgs->group_no_capacity = group_is_overloaded(env, sgs);
7064 	sgs->group_type = group_classify(group, sgs);
7065 }
7066 
7067 /**
7068  * update_sd_pick_busiest - return 1 on busiest group
7069  * @env: The load balancing environment.
7070  * @sds: sched_domain statistics
7071  * @sg: sched_group candidate to be checked for being the busiest
7072  * @sgs: sched_group statistics
7073  *
7074  * Determine if @sg is a busier group than the previously selected
7075  * busiest group.
7076  *
7077  * Return: %true if @sg is a busier group than the previously selected
7078  * busiest group. %false otherwise.
7079  */
7080 static bool update_sd_pick_busiest(struct lb_env *env,
7081 				   struct sd_lb_stats *sds,
7082 				   struct sched_group *sg,
7083 				   struct sg_lb_stats *sgs)
7084 {
7085 	struct sg_lb_stats *busiest = &sds->busiest_stat;
7086 
7087 	if (sgs->group_type > busiest->group_type)
7088 		return true;
7089 
7090 	if (sgs->group_type < busiest->group_type)
7091 		return false;
7092 
7093 	if (sgs->avg_load <= busiest->avg_load)
7094 		return false;
7095 
7096 	/* This is the busiest node in its class. */
7097 	if (!(env->sd->flags & SD_ASYM_PACKING))
7098 		return true;
7099 
7100 	/* No ASYM_PACKING if target cpu is already busy */
7101 	if (env->idle == CPU_NOT_IDLE)
7102 		return true;
7103 	/*
7104 	 * ASYM_PACKING needs to move all the work to the lowest
7105 	 * numbered CPUs in the group, therefore mark all groups
7106 	 * higher than ourself as busy.
7107 	 */
7108 	if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
7109 		if (!sds->busiest)
7110 			return true;
7111 
7112 		/* Prefer to move from highest possible cpu's work */
7113 		if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
7114 			return true;
7115 	}
7116 
7117 	return false;
7118 }
7119 
7120 #ifdef CONFIG_NUMA_BALANCING
7121 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7122 {
7123 	if (sgs->sum_nr_running > sgs->nr_numa_running)
7124 		return regular;
7125 	if (sgs->sum_nr_running > sgs->nr_preferred_running)
7126 		return remote;
7127 	return all;
7128 }
7129 
7130 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7131 {
7132 	if (rq->nr_running > rq->nr_numa_running)
7133 		return regular;
7134 	if (rq->nr_running > rq->nr_preferred_running)
7135 		return remote;
7136 	return all;
7137 }
7138 #else
7139 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7140 {
7141 	return all;
7142 }
7143 
7144 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7145 {
7146 	return regular;
7147 }
7148 #endif /* CONFIG_NUMA_BALANCING */
7149 
7150 /**
7151  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
7152  * @env: The load balancing environment.
7153  * @sds: variable to hold the statistics for this sched_domain.
7154  */
7155 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7156 {
7157 	struct sched_domain *child = env->sd->child;
7158 	struct sched_group *sg = env->sd->groups;
7159 	struct sg_lb_stats tmp_sgs;
7160 	int load_idx, prefer_sibling = 0;
7161 	bool overload = false;
7162 
7163 	if (child && child->flags & SD_PREFER_SIBLING)
7164 		prefer_sibling = 1;
7165 
7166 	load_idx = get_sd_load_idx(env->sd, env->idle);
7167 
7168 	do {
7169 		struct sg_lb_stats *sgs = &tmp_sgs;
7170 		int local_group;
7171 
7172 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
7173 		if (local_group) {
7174 			sds->local = sg;
7175 			sgs = &sds->local_stat;
7176 
7177 			if (env->idle != CPU_NEWLY_IDLE ||
7178 			    time_after_eq(jiffies, sg->sgc->next_update))
7179 				update_group_capacity(env->sd, env->dst_cpu);
7180 		}
7181 
7182 		update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7183 						&overload);
7184 
7185 		if (local_group)
7186 			goto next_group;
7187 
7188 		/*
7189 		 * In case the child domain prefers tasks go to siblings
7190 		 * first, lower the sg capacity so that we'll try
7191 		 * and move all the excess tasks away. We lower the capacity
7192 		 * of a group only if the local group has the capacity to fit
7193 		 * these excess tasks. The extra check prevents the case where
7194 		 * you always pull from the heaviest group when it is already
7195 		 * under-utilized (possible with a large weight task outweighs
7196 		 * the tasks on the system).
7197 		 */
7198 		if (prefer_sibling && sds->local &&
7199 		    group_has_capacity(env, &sds->local_stat) &&
7200 		    (sgs->sum_nr_running > 1)) {
7201 			sgs->group_no_capacity = 1;
7202 			sgs->group_type = group_classify(sg, sgs);
7203 		}
7204 
7205 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
7206 			sds->busiest = sg;
7207 			sds->busiest_stat = *sgs;
7208 		}
7209 
7210 next_group:
7211 		/* Now, start updating sd_lb_stats */
7212 		sds->total_load += sgs->group_load;
7213 		sds->total_capacity += sgs->group_capacity;
7214 
7215 		sg = sg->next;
7216 	} while (sg != env->sd->groups);
7217 
7218 	if (env->sd->flags & SD_NUMA)
7219 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
7220 
7221 	if (!env->sd->parent) {
7222 		/* update overload indicator if we are at root domain */
7223 		if (env->dst_rq->rd->overload != overload)
7224 			env->dst_rq->rd->overload = overload;
7225 	}
7226 
7227 }
7228 
7229 /**
7230  * check_asym_packing - Check to see if the group is packed into the
7231  *			sched doman.
7232  *
7233  * This is primarily intended to used at the sibling level.  Some
7234  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
7235  * case of POWER7, it can move to lower SMT modes only when higher
7236  * threads are idle.  When in lower SMT modes, the threads will
7237  * perform better since they share less core resources.  Hence when we
7238  * have idle threads, we want them to be the higher ones.
7239  *
7240  * This packing function is run on idle threads.  It checks to see if
7241  * the busiest CPU in this domain (core in the P7 case) has a higher
7242  * CPU number than the packing function is being run on.  Here we are
7243  * assuming lower CPU number will be equivalent to lower a SMT thread
7244  * number.
7245  *
7246  * Return: 1 when packing is required and a task should be moved to
7247  * this CPU.  The amount of the imbalance is returned in *imbalance.
7248  *
7249  * @env: The load balancing environment.
7250  * @sds: Statistics of the sched_domain which is to be packed
7251  */
7252 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
7253 {
7254 	int busiest_cpu;
7255 
7256 	if (!(env->sd->flags & SD_ASYM_PACKING))
7257 		return 0;
7258 
7259 	if (env->idle == CPU_NOT_IDLE)
7260 		return 0;
7261 
7262 	if (!sds->busiest)
7263 		return 0;
7264 
7265 	busiest_cpu = group_first_cpu(sds->busiest);
7266 	if (env->dst_cpu > busiest_cpu)
7267 		return 0;
7268 
7269 	env->imbalance = DIV_ROUND_CLOSEST(
7270 		sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
7271 		SCHED_CAPACITY_SCALE);
7272 
7273 	return 1;
7274 }
7275 
7276 /**
7277  * fix_small_imbalance - Calculate the minor imbalance that exists
7278  *			amongst the groups of a sched_domain, during
7279  *			load balancing.
7280  * @env: The load balancing environment.
7281  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
7282  */
7283 static inline
7284 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7285 {
7286 	unsigned long tmp, capa_now = 0, capa_move = 0;
7287 	unsigned int imbn = 2;
7288 	unsigned long scaled_busy_load_per_task;
7289 	struct sg_lb_stats *local, *busiest;
7290 
7291 	local = &sds->local_stat;
7292 	busiest = &sds->busiest_stat;
7293 
7294 	if (!local->sum_nr_running)
7295 		local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7296 	else if (busiest->load_per_task > local->load_per_task)
7297 		imbn = 1;
7298 
7299 	scaled_busy_load_per_task =
7300 		(busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7301 		busiest->group_capacity;
7302 
7303 	if (busiest->avg_load + scaled_busy_load_per_task >=
7304 	    local->avg_load + (scaled_busy_load_per_task * imbn)) {
7305 		env->imbalance = busiest->load_per_task;
7306 		return;
7307 	}
7308 
7309 	/*
7310 	 * OK, we don't have enough imbalance to justify moving tasks,
7311 	 * however we may be able to increase total CPU capacity used by
7312 	 * moving them.
7313 	 */
7314 
7315 	capa_now += busiest->group_capacity *
7316 			min(busiest->load_per_task, busiest->avg_load);
7317 	capa_now += local->group_capacity *
7318 			min(local->load_per_task, local->avg_load);
7319 	capa_now /= SCHED_CAPACITY_SCALE;
7320 
7321 	/* Amount of load we'd subtract */
7322 	if (busiest->avg_load > scaled_busy_load_per_task) {
7323 		capa_move += busiest->group_capacity *
7324 			    min(busiest->load_per_task,
7325 				busiest->avg_load - scaled_busy_load_per_task);
7326 	}
7327 
7328 	/* Amount of load we'd add */
7329 	if (busiest->avg_load * busiest->group_capacity <
7330 	    busiest->load_per_task * SCHED_CAPACITY_SCALE) {
7331 		tmp = (busiest->avg_load * busiest->group_capacity) /
7332 		      local->group_capacity;
7333 	} else {
7334 		tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7335 		      local->group_capacity;
7336 	}
7337 	capa_move += local->group_capacity *
7338 		    min(local->load_per_task, local->avg_load + tmp);
7339 	capa_move /= SCHED_CAPACITY_SCALE;
7340 
7341 	/* Move if we gain throughput */
7342 	if (capa_move > capa_now)
7343 		env->imbalance = busiest->load_per_task;
7344 }
7345 
7346 /**
7347  * calculate_imbalance - Calculate the amount of imbalance present within the
7348  *			 groups of a given sched_domain during load balance.
7349  * @env: load balance environment
7350  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
7351  */
7352 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7353 {
7354 	unsigned long max_pull, load_above_capacity = ~0UL;
7355 	struct sg_lb_stats *local, *busiest;
7356 
7357 	local = &sds->local_stat;
7358 	busiest = &sds->busiest_stat;
7359 
7360 	if (busiest->group_type == group_imbalanced) {
7361 		/*
7362 		 * In the group_imb case we cannot rely on group-wide averages
7363 		 * to ensure cpu-load equilibrium, look at wider averages. XXX
7364 		 */
7365 		busiest->load_per_task =
7366 			min(busiest->load_per_task, sds->avg_load);
7367 	}
7368 
7369 	/*
7370 	 * Avg load of busiest sg can be less and avg load of local sg can
7371 	 * be greater than avg load across all sgs of sd because avg load
7372 	 * factors in sg capacity and sgs with smaller group_type are
7373 	 * skipped when updating the busiest sg:
7374 	 */
7375 	if (busiest->avg_load <= sds->avg_load ||
7376 	    local->avg_load >= sds->avg_load) {
7377 		env->imbalance = 0;
7378 		return fix_small_imbalance(env, sds);
7379 	}
7380 
7381 	/*
7382 	 * If there aren't any idle cpus, avoid creating some.
7383 	 */
7384 	if (busiest->group_type == group_overloaded &&
7385 	    local->group_type   == group_overloaded) {
7386 		load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
7387 		if (load_above_capacity > busiest->group_capacity) {
7388 			load_above_capacity -= busiest->group_capacity;
7389 			load_above_capacity *= scale_load_down(NICE_0_LOAD);
7390 			load_above_capacity /= busiest->group_capacity;
7391 		} else
7392 			load_above_capacity = ~0UL;
7393 	}
7394 
7395 	/*
7396 	 * We're trying to get all the cpus to the average_load, so we don't
7397 	 * want to push ourselves above the average load, nor do we wish to
7398 	 * reduce the max loaded cpu below the average load. At the same time,
7399 	 * we also don't want to reduce the group load below the group
7400 	 * capacity. Thus we look for the minimum possible imbalance.
7401 	 */
7402 	max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
7403 
7404 	/* How much load to actually move to equalise the imbalance */
7405 	env->imbalance = min(
7406 		max_pull * busiest->group_capacity,
7407 		(sds->avg_load - local->avg_load) * local->group_capacity
7408 	) / SCHED_CAPACITY_SCALE;
7409 
7410 	/*
7411 	 * if *imbalance is less than the average load per runnable task
7412 	 * there is no guarantee that any tasks will be moved so we'll have
7413 	 * a think about bumping its value to force at least one task to be
7414 	 * moved
7415 	 */
7416 	if (env->imbalance < busiest->load_per_task)
7417 		return fix_small_imbalance(env, sds);
7418 }
7419 
7420 /******* find_busiest_group() helpers end here *********************/
7421 
7422 /**
7423  * find_busiest_group - Returns the busiest group within the sched_domain
7424  * if there is an imbalance.
7425  *
7426  * Also calculates the amount of weighted load which should be moved
7427  * to restore balance.
7428  *
7429  * @env: The load balancing environment.
7430  *
7431  * Return:	- The busiest group if imbalance exists.
7432  */
7433 static struct sched_group *find_busiest_group(struct lb_env *env)
7434 {
7435 	struct sg_lb_stats *local, *busiest;
7436 	struct sd_lb_stats sds;
7437 
7438 	init_sd_lb_stats(&sds);
7439 
7440 	/*
7441 	 * Compute the various statistics relavent for load balancing at
7442 	 * this level.
7443 	 */
7444 	update_sd_lb_stats(env, &sds);
7445 	local = &sds.local_stat;
7446 	busiest = &sds.busiest_stat;
7447 
7448 	/* ASYM feature bypasses nice load balance check */
7449 	if (check_asym_packing(env, &sds))
7450 		return sds.busiest;
7451 
7452 	/* There is no busy sibling group to pull tasks from */
7453 	if (!sds.busiest || busiest->sum_nr_running == 0)
7454 		goto out_balanced;
7455 
7456 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7457 						/ sds.total_capacity;
7458 
7459 	/*
7460 	 * If the busiest group is imbalanced the below checks don't
7461 	 * work because they assume all things are equal, which typically
7462 	 * isn't true due to cpus_allowed constraints and the like.
7463 	 */
7464 	if (busiest->group_type == group_imbalanced)
7465 		goto force_balance;
7466 
7467 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
7468 	if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7469 	    busiest->group_no_capacity)
7470 		goto force_balance;
7471 
7472 	/*
7473 	 * If the local group is busier than the selected busiest group
7474 	 * don't try and pull any tasks.
7475 	 */
7476 	if (local->avg_load >= busiest->avg_load)
7477 		goto out_balanced;
7478 
7479 	/*
7480 	 * Don't pull any tasks if this group is already above the domain
7481 	 * average load.
7482 	 */
7483 	if (local->avg_load >= sds.avg_load)
7484 		goto out_balanced;
7485 
7486 	if (env->idle == CPU_IDLE) {
7487 		/*
7488 		 * This cpu is idle. If the busiest group is not overloaded
7489 		 * and there is no imbalance between this and busiest group
7490 		 * wrt idle cpus, it is balanced. The imbalance becomes
7491 		 * significant if the diff is greater than 1 otherwise we
7492 		 * might end up to just move the imbalance on another group
7493 		 */
7494 		if ((busiest->group_type != group_overloaded) &&
7495 				(local->idle_cpus <= (busiest->idle_cpus + 1)))
7496 			goto out_balanced;
7497 	} else {
7498 		/*
7499 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7500 		 * imbalance_pct to be conservative.
7501 		 */
7502 		if (100 * busiest->avg_load <=
7503 				env->sd->imbalance_pct * local->avg_load)
7504 			goto out_balanced;
7505 	}
7506 
7507 force_balance:
7508 	/* Looks like there is an imbalance. Compute it */
7509 	calculate_imbalance(env, &sds);
7510 	return sds.busiest;
7511 
7512 out_balanced:
7513 	env->imbalance = 0;
7514 	return NULL;
7515 }
7516 
7517 /*
7518  * find_busiest_queue - find the busiest runqueue among the cpus in group.
7519  */
7520 static struct rq *find_busiest_queue(struct lb_env *env,
7521 				     struct sched_group *group)
7522 {
7523 	struct rq *busiest = NULL, *rq;
7524 	unsigned long busiest_load = 0, busiest_capacity = 1;
7525 	int i;
7526 
7527 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7528 		unsigned long capacity, wl;
7529 		enum fbq_type rt;
7530 
7531 		rq = cpu_rq(i);
7532 		rt = fbq_classify_rq(rq);
7533 
7534 		/*
7535 		 * We classify groups/runqueues into three groups:
7536 		 *  - regular: there are !numa tasks
7537 		 *  - remote:  there are numa tasks that run on the 'wrong' node
7538 		 *  - all:     there is no distinction
7539 		 *
7540 		 * In order to avoid migrating ideally placed numa tasks,
7541 		 * ignore those when there's better options.
7542 		 *
7543 		 * If we ignore the actual busiest queue to migrate another
7544 		 * task, the next balance pass can still reduce the busiest
7545 		 * queue by moving tasks around inside the node.
7546 		 *
7547 		 * If we cannot move enough load due to this classification
7548 		 * the next pass will adjust the group classification and
7549 		 * allow migration of more tasks.
7550 		 *
7551 		 * Both cases only affect the total convergence complexity.
7552 		 */
7553 		if (rt > env->fbq_type)
7554 			continue;
7555 
7556 		capacity = capacity_of(i);
7557 
7558 		wl = weighted_cpuload(i);
7559 
7560 		/*
7561 		 * When comparing with imbalance, use weighted_cpuload()
7562 		 * which is not scaled with the cpu capacity.
7563 		 */
7564 
7565 		if (rq->nr_running == 1 && wl > env->imbalance &&
7566 		    !check_cpu_capacity(rq, env->sd))
7567 			continue;
7568 
7569 		/*
7570 		 * For the load comparisons with the other cpu's, consider
7571 		 * the weighted_cpuload() scaled with the cpu capacity, so
7572 		 * that the load can be moved away from the cpu that is
7573 		 * potentially running at a lower capacity.
7574 		 *
7575 		 * Thus we're looking for max(wl_i / capacity_i), crosswise
7576 		 * multiplication to rid ourselves of the division works out
7577 		 * to: wl_i * capacity_j > wl_j * capacity_i;  where j is
7578 		 * our previous maximum.
7579 		 */
7580 		if (wl * busiest_capacity > busiest_load * capacity) {
7581 			busiest_load = wl;
7582 			busiest_capacity = capacity;
7583 			busiest = rq;
7584 		}
7585 	}
7586 
7587 	return busiest;
7588 }
7589 
7590 /*
7591  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
7592  * so long as it is large enough.
7593  */
7594 #define MAX_PINNED_INTERVAL	512
7595 
7596 static int need_active_balance(struct lb_env *env)
7597 {
7598 	struct sched_domain *sd = env->sd;
7599 
7600 	if (env->idle == CPU_NEWLY_IDLE) {
7601 
7602 		/*
7603 		 * ASYM_PACKING needs to force migrate tasks from busy but
7604 		 * higher numbered CPUs in order to pack all tasks in the
7605 		 * lowest numbered CPUs.
7606 		 */
7607 		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
7608 			return 1;
7609 	}
7610 
7611 	/*
7612 	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
7613 	 * It's worth migrating the task if the src_cpu's capacity is reduced
7614 	 * because of other sched_class or IRQs if more capacity stays
7615 	 * available on dst_cpu.
7616 	 */
7617 	if ((env->idle != CPU_NOT_IDLE) &&
7618 	    (env->src_rq->cfs.h_nr_running == 1)) {
7619 		if ((check_cpu_capacity(env->src_rq, sd)) &&
7620 		    (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7621 			return 1;
7622 	}
7623 
7624 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7625 }
7626 
7627 static int active_load_balance_cpu_stop(void *data);
7628 
7629 static int should_we_balance(struct lb_env *env)
7630 {
7631 	struct sched_group *sg = env->sd->groups;
7632 	struct cpumask *sg_cpus, *sg_mask;
7633 	int cpu, balance_cpu = -1;
7634 
7635 	/*
7636 	 * In the newly idle case, we will allow all the cpu's
7637 	 * to do the newly idle load balance.
7638 	 */
7639 	if (env->idle == CPU_NEWLY_IDLE)
7640 		return 1;
7641 
7642 	sg_cpus = sched_group_cpus(sg);
7643 	sg_mask = sched_group_mask(sg);
7644 	/* Try to find first idle cpu */
7645 	for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7646 		if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7647 			continue;
7648 
7649 		balance_cpu = cpu;
7650 		break;
7651 	}
7652 
7653 	if (balance_cpu == -1)
7654 		balance_cpu = group_balance_cpu(sg);
7655 
7656 	/*
7657 	 * First idle cpu or the first cpu(busiest) in this sched group
7658 	 * is eligible for doing load balancing at this and above domains.
7659 	 */
7660 	return balance_cpu == env->dst_cpu;
7661 }
7662 
7663 /*
7664  * Check this_cpu to ensure it is balanced within domain. Attempt to move
7665  * tasks if there is an imbalance.
7666  */
7667 static int load_balance(int this_cpu, struct rq *this_rq,
7668 			struct sched_domain *sd, enum cpu_idle_type idle,
7669 			int *continue_balancing)
7670 {
7671 	int ld_moved, cur_ld_moved, active_balance = 0;
7672 	struct sched_domain *sd_parent = sd->parent;
7673 	struct sched_group *group;
7674 	struct rq *busiest;
7675 	unsigned long flags;
7676 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
7677 
7678 	struct lb_env env = {
7679 		.sd		= sd,
7680 		.dst_cpu	= this_cpu,
7681 		.dst_rq		= this_rq,
7682 		.dst_grpmask    = sched_group_cpus(sd->groups),
7683 		.idle		= idle,
7684 		.loop_break	= sched_nr_migrate_break,
7685 		.cpus		= cpus,
7686 		.fbq_type	= all,
7687 		.tasks		= LIST_HEAD_INIT(env.tasks),
7688 	};
7689 
7690 	/*
7691 	 * For NEWLY_IDLE load_balancing, we don't need to consider
7692 	 * other cpus in our group
7693 	 */
7694 	if (idle == CPU_NEWLY_IDLE)
7695 		env.dst_grpmask = NULL;
7696 
7697 	cpumask_copy(cpus, cpu_active_mask);
7698 
7699 	schedstat_inc(sd->lb_count[idle]);
7700 
7701 redo:
7702 	if (!should_we_balance(&env)) {
7703 		*continue_balancing = 0;
7704 		goto out_balanced;
7705 	}
7706 
7707 	group = find_busiest_group(&env);
7708 	if (!group) {
7709 		schedstat_inc(sd->lb_nobusyg[idle]);
7710 		goto out_balanced;
7711 	}
7712 
7713 	busiest = find_busiest_queue(&env, group);
7714 	if (!busiest) {
7715 		schedstat_inc(sd->lb_nobusyq[idle]);
7716 		goto out_balanced;
7717 	}
7718 
7719 	BUG_ON(busiest == env.dst_rq);
7720 
7721 	schedstat_add(sd->lb_imbalance[idle], env.imbalance);
7722 
7723 	env.src_cpu = busiest->cpu;
7724 	env.src_rq = busiest;
7725 
7726 	ld_moved = 0;
7727 	if (busiest->nr_running > 1) {
7728 		/*
7729 		 * Attempt to move tasks. If find_busiest_group has found
7730 		 * an imbalance but busiest->nr_running <= 1, the group is
7731 		 * still unbalanced. ld_moved simply stays zero, so it is
7732 		 * correctly treated as an imbalance.
7733 		 */
7734 		env.flags |= LBF_ALL_PINNED;
7735 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
7736 
7737 more_balance:
7738 		raw_spin_lock_irqsave(&busiest->lock, flags);
7739 
7740 		/*
7741 		 * cur_ld_moved - load moved in current iteration
7742 		 * ld_moved     - cumulative load moved across iterations
7743 		 */
7744 		cur_ld_moved = detach_tasks(&env);
7745 
7746 		/*
7747 		 * We've detached some tasks from busiest_rq. Every
7748 		 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
7749 		 * unlock busiest->lock, and we are able to be sure
7750 		 * that nobody can manipulate the tasks in parallel.
7751 		 * See task_rq_lock() family for the details.
7752 		 */
7753 
7754 		raw_spin_unlock(&busiest->lock);
7755 
7756 		if (cur_ld_moved) {
7757 			attach_tasks(&env);
7758 			ld_moved += cur_ld_moved;
7759 		}
7760 
7761 		local_irq_restore(flags);
7762 
7763 		if (env.flags & LBF_NEED_BREAK) {
7764 			env.flags &= ~LBF_NEED_BREAK;
7765 			goto more_balance;
7766 		}
7767 
7768 		/*
7769 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
7770 		 * us and move them to an alternate dst_cpu in our sched_group
7771 		 * where they can run. The upper limit on how many times we
7772 		 * iterate on same src_cpu is dependent on number of cpus in our
7773 		 * sched_group.
7774 		 *
7775 		 * This changes load balance semantics a bit on who can move
7776 		 * load to a given_cpu. In addition to the given_cpu itself
7777 		 * (or a ilb_cpu acting on its behalf where given_cpu is
7778 		 * nohz-idle), we now have balance_cpu in a position to move
7779 		 * load to given_cpu. In rare situations, this may cause
7780 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7781 		 * _independently_ and at _same_ time to move some load to
7782 		 * given_cpu) causing exceess load to be moved to given_cpu.
7783 		 * This however should not happen so much in practice and
7784 		 * moreover subsequent load balance cycles should correct the
7785 		 * excess load moved.
7786 		 */
7787 		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
7788 
7789 			/* Prevent to re-select dst_cpu via env's cpus */
7790 			cpumask_clear_cpu(env.dst_cpu, env.cpus);
7791 
7792 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
7793 			env.dst_cpu	 = env.new_dst_cpu;
7794 			env.flags	&= ~LBF_DST_PINNED;
7795 			env.loop	 = 0;
7796 			env.loop_break	 = sched_nr_migrate_break;
7797 
7798 			/*
7799 			 * Go back to "more_balance" rather than "redo" since we
7800 			 * need to continue with same src_cpu.
7801 			 */
7802 			goto more_balance;
7803 		}
7804 
7805 		/*
7806 		 * We failed to reach balance because of affinity.
7807 		 */
7808 		if (sd_parent) {
7809 			int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7810 
7811 			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
7812 				*group_imbalance = 1;
7813 		}
7814 
7815 		/* All tasks on this runqueue were pinned by CPU affinity */
7816 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
7817 			cpumask_clear_cpu(cpu_of(busiest), cpus);
7818 			if (!cpumask_empty(cpus)) {
7819 				env.loop = 0;
7820 				env.loop_break = sched_nr_migrate_break;
7821 				goto redo;
7822 			}
7823 			goto out_all_pinned;
7824 		}
7825 	}
7826 
7827 	if (!ld_moved) {
7828 		schedstat_inc(sd->lb_failed[idle]);
7829 		/*
7830 		 * Increment the failure counter only on periodic balance.
7831 		 * We do not want newidle balance, which can be very
7832 		 * frequent, pollute the failure counter causing
7833 		 * excessive cache_hot migrations and active balances.
7834 		 */
7835 		if (idle != CPU_NEWLY_IDLE)
7836 			sd->nr_balance_failed++;
7837 
7838 		if (need_active_balance(&env)) {
7839 			raw_spin_lock_irqsave(&busiest->lock, flags);
7840 
7841 			/* don't kick the active_load_balance_cpu_stop,
7842 			 * if the curr task on busiest cpu can't be
7843 			 * moved to this_cpu
7844 			 */
7845 			if (!cpumask_test_cpu(this_cpu,
7846 					tsk_cpus_allowed(busiest->curr))) {
7847 				raw_spin_unlock_irqrestore(&busiest->lock,
7848 							    flags);
7849 				env.flags |= LBF_ALL_PINNED;
7850 				goto out_one_pinned;
7851 			}
7852 
7853 			/*
7854 			 * ->active_balance synchronizes accesses to
7855 			 * ->active_balance_work.  Once set, it's cleared
7856 			 * only after active load balance is finished.
7857 			 */
7858 			if (!busiest->active_balance) {
7859 				busiest->active_balance = 1;
7860 				busiest->push_cpu = this_cpu;
7861 				active_balance = 1;
7862 			}
7863 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
7864 
7865 			if (active_balance) {
7866 				stop_one_cpu_nowait(cpu_of(busiest),
7867 					active_load_balance_cpu_stop, busiest,
7868 					&busiest->active_balance_work);
7869 			}
7870 
7871 			/* We've kicked active balancing, force task migration. */
7872 			sd->nr_balance_failed = sd->cache_nice_tries+1;
7873 		}
7874 	} else
7875 		sd->nr_balance_failed = 0;
7876 
7877 	if (likely(!active_balance)) {
7878 		/* We were unbalanced, so reset the balancing interval */
7879 		sd->balance_interval = sd->min_interval;
7880 	} else {
7881 		/*
7882 		 * If we've begun active balancing, start to back off. This
7883 		 * case may not be covered by the all_pinned logic if there
7884 		 * is only 1 task on the busy runqueue (because we don't call
7885 		 * detach_tasks).
7886 		 */
7887 		if (sd->balance_interval < sd->max_interval)
7888 			sd->balance_interval *= 2;
7889 	}
7890 
7891 	goto out;
7892 
7893 out_balanced:
7894 	/*
7895 	 * We reach balance although we may have faced some affinity
7896 	 * constraints. Clear the imbalance flag if it was set.
7897 	 */
7898 	if (sd_parent) {
7899 		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7900 
7901 		if (*group_imbalance)
7902 			*group_imbalance = 0;
7903 	}
7904 
7905 out_all_pinned:
7906 	/*
7907 	 * We reach balance because all tasks are pinned at this level so
7908 	 * we can't migrate them. Let the imbalance flag set so parent level
7909 	 * can try to migrate them.
7910 	 */
7911 	schedstat_inc(sd->lb_balanced[idle]);
7912 
7913 	sd->nr_balance_failed = 0;
7914 
7915 out_one_pinned:
7916 	/* tune up the balancing interval */
7917 	if (((env.flags & LBF_ALL_PINNED) &&
7918 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
7919 			(sd->balance_interval < sd->max_interval))
7920 		sd->balance_interval *= 2;
7921 
7922 	ld_moved = 0;
7923 out:
7924 	return ld_moved;
7925 }
7926 
7927 static inline unsigned long
7928 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7929 {
7930 	unsigned long interval = sd->balance_interval;
7931 
7932 	if (cpu_busy)
7933 		interval *= sd->busy_factor;
7934 
7935 	/* scale ms to jiffies */
7936 	interval = msecs_to_jiffies(interval);
7937 	interval = clamp(interval, 1UL, max_load_balance_interval);
7938 
7939 	return interval;
7940 }
7941 
7942 static inline void
7943 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
7944 {
7945 	unsigned long interval, next;
7946 
7947 	/* used by idle balance, so cpu_busy = 0 */
7948 	interval = get_sd_balance_interval(sd, 0);
7949 	next = sd->last_balance + interval;
7950 
7951 	if (time_after(*next_balance, next))
7952 		*next_balance = next;
7953 }
7954 
7955 /*
7956  * idle_balance is called by schedule() if this_cpu is about to become
7957  * idle. Attempts to pull tasks from other CPUs.
7958  */
7959 static int idle_balance(struct rq *this_rq)
7960 {
7961 	unsigned long next_balance = jiffies + HZ;
7962 	int this_cpu = this_rq->cpu;
7963 	struct sched_domain *sd;
7964 	int pulled_task = 0;
7965 	u64 curr_cost = 0;
7966 
7967 	/*
7968 	 * We must set idle_stamp _before_ calling idle_balance(), such that we
7969 	 * measure the duration of idle_balance() as idle time.
7970 	 */
7971 	this_rq->idle_stamp = rq_clock(this_rq);
7972 
7973 	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7974 	    !this_rq->rd->overload) {
7975 		rcu_read_lock();
7976 		sd = rcu_dereference_check_sched_domain(this_rq->sd);
7977 		if (sd)
7978 			update_next_balance(sd, &next_balance);
7979 		rcu_read_unlock();
7980 
7981 		goto out;
7982 	}
7983 
7984 	raw_spin_unlock(&this_rq->lock);
7985 
7986 	update_blocked_averages(this_cpu);
7987 	rcu_read_lock();
7988 	for_each_domain(this_cpu, sd) {
7989 		int continue_balancing = 1;
7990 		u64 t0, domain_cost;
7991 
7992 		if (!(sd->flags & SD_LOAD_BALANCE))
7993 			continue;
7994 
7995 		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7996 			update_next_balance(sd, &next_balance);
7997 			break;
7998 		}
7999 
8000 		if (sd->flags & SD_BALANCE_NEWIDLE) {
8001 			t0 = sched_clock_cpu(this_cpu);
8002 
8003 			pulled_task = load_balance(this_cpu, this_rq,
8004 						   sd, CPU_NEWLY_IDLE,
8005 						   &continue_balancing);
8006 
8007 			domain_cost = sched_clock_cpu(this_cpu) - t0;
8008 			if (domain_cost > sd->max_newidle_lb_cost)
8009 				sd->max_newidle_lb_cost = domain_cost;
8010 
8011 			curr_cost += domain_cost;
8012 		}
8013 
8014 		update_next_balance(sd, &next_balance);
8015 
8016 		/*
8017 		 * Stop searching for tasks to pull if there are
8018 		 * now runnable tasks on this rq.
8019 		 */
8020 		if (pulled_task || this_rq->nr_running > 0)
8021 			break;
8022 	}
8023 	rcu_read_unlock();
8024 
8025 	raw_spin_lock(&this_rq->lock);
8026 
8027 	if (curr_cost > this_rq->max_idle_balance_cost)
8028 		this_rq->max_idle_balance_cost = curr_cost;
8029 
8030 	/*
8031 	 * While browsing the domains, we released the rq lock, a task could
8032 	 * have been enqueued in the meantime. Since we're not going idle,
8033 	 * pretend we pulled a task.
8034 	 */
8035 	if (this_rq->cfs.h_nr_running && !pulled_task)
8036 		pulled_task = 1;
8037 
8038 out:
8039 	/* Move the next balance forward */
8040 	if (time_after(this_rq->next_balance, next_balance))
8041 		this_rq->next_balance = next_balance;
8042 
8043 	/* Is there a task of a high priority class? */
8044 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
8045 		pulled_task = -1;
8046 
8047 	if (pulled_task)
8048 		this_rq->idle_stamp = 0;
8049 
8050 	return pulled_task;
8051 }
8052 
8053 /*
8054  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
8055  * running tasks off the busiest CPU onto idle CPUs. It requires at
8056  * least 1 task to be running on each physical CPU where possible, and
8057  * avoids physical / logical imbalances.
8058  */
8059 static int active_load_balance_cpu_stop(void *data)
8060 {
8061 	struct rq *busiest_rq = data;
8062 	int busiest_cpu = cpu_of(busiest_rq);
8063 	int target_cpu = busiest_rq->push_cpu;
8064 	struct rq *target_rq = cpu_rq(target_cpu);
8065 	struct sched_domain *sd;
8066 	struct task_struct *p = NULL;
8067 
8068 	raw_spin_lock_irq(&busiest_rq->lock);
8069 
8070 	/* make sure the requested cpu hasn't gone down in the meantime */
8071 	if (unlikely(busiest_cpu != smp_processor_id() ||
8072 		     !busiest_rq->active_balance))
8073 		goto out_unlock;
8074 
8075 	/* Is there any task to move? */
8076 	if (busiest_rq->nr_running <= 1)
8077 		goto out_unlock;
8078 
8079 	/*
8080 	 * This condition is "impossible", if it occurs
8081 	 * we need to fix it. Originally reported by
8082 	 * Bjorn Helgaas on a 128-cpu setup.
8083 	 */
8084 	BUG_ON(busiest_rq == target_rq);
8085 
8086 	/* Search for an sd spanning us and the target CPU. */
8087 	rcu_read_lock();
8088 	for_each_domain(target_cpu, sd) {
8089 		if ((sd->flags & SD_LOAD_BALANCE) &&
8090 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8091 				break;
8092 	}
8093 
8094 	if (likely(sd)) {
8095 		struct lb_env env = {
8096 			.sd		= sd,
8097 			.dst_cpu	= target_cpu,
8098 			.dst_rq		= target_rq,
8099 			.src_cpu	= busiest_rq->cpu,
8100 			.src_rq		= busiest_rq,
8101 			.idle		= CPU_IDLE,
8102 		};
8103 
8104 		schedstat_inc(sd->alb_count);
8105 
8106 		p = detach_one_task(&env);
8107 		if (p) {
8108 			schedstat_inc(sd->alb_pushed);
8109 			/* Active balancing done, reset the failure counter. */
8110 			sd->nr_balance_failed = 0;
8111 		} else {
8112 			schedstat_inc(sd->alb_failed);
8113 		}
8114 	}
8115 	rcu_read_unlock();
8116 out_unlock:
8117 	busiest_rq->active_balance = 0;
8118 	raw_spin_unlock(&busiest_rq->lock);
8119 
8120 	if (p)
8121 		attach_one_task(target_rq, p);
8122 
8123 	local_irq_enable();
8124 
8125 	return 0;
8126 }
8127 
8128 static inline int on_null_domain(struct rq *rq)
8129 {
8130 	return unlikely(!rcu_dereference_sched(rq->sd));
8131 }
8132 
8133 #ifdef CONFIG_NO_HZ_COMMON
8134 /*
8135  * idle load balancing details
8136  * - When one of the busy CPUs notice that there may be an idle rebalancing
8137  *   needed, they will kick the idle load balancer, which then does idle
8138  *   load balancing for all the idle CPUs.
8139  */
8140 static struct {
8141 	cpumask_var_t idle_cpus_mask;
8142 	atomic_t nr_cpus;
8143 	unsigned long next_balance;     /* in jiffy units */
8144 } nohz ____cacheline_aligned;
8145 
8146 static inline int find_new_ilb(void)
8147 {
8148 	int ilb = cpumask_first(nohz.idle_cpus_mask);
8149 
8150 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
8151 		return ilb;
8152 
8153 	return nr_cpu_ids;
8154 }
8155 
8156 /*
8157  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8158  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8159  * CPU (if there is one).
8160  */
8161 static void nohz_balancer_kick(void)
8162 {
8163 	int ilb_cpu;
8164 
8165 	nohz.next_balance++;
8166 
8167 	ilb_cpu = find_new_ilb();
8168 
8169 	if (ilb_cpu >= nr_cpu_ids)
8170 		return;
8171 
8172 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
8173 		return;
8174 	/*
8175 	 * Use smp_send_reschedule() instead of resched_cpu().
8176 	 * This way we generate a sched IPI on the target cpu which
8177 	 * is idle. And the softirq performing nohz idle load balance
8178 	 * will be run before returning from the IPI.
8179 	 */
8180 	smp_send_reschedule(ilb_cpu);
8181 	return;
8182 }
8183 
8184 void nohz_balance_exit_idle(unsigned int cpu)
8185 {
8186 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
8187 		/*
8188 		 * Completely isolated CPUs don't ever set, so we must test.
8189 		 */
8190 		if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
8191 			cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
8192 			atomic_dec(&nohz.nr_cpus);
8193 		}
8194 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8195 	}
8196 }
8197 
8198 static inline void set_cpu_sd_state_busy(void)
8199 {
8200 	struct sched_domain *sd;
8201 	int cpu = smp_processor_id();
8202 
8203 	rcu_read_lock();
8204 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
8205 
8206 	if (!sd || !sd->nohz_idle)
8207 		goto unlock;
8208 	sd->nohz_idle = 0;
8209 
8210 	atomic_inc(&sd->shared->nr_busy_cpus);
8211 unlock:
8212 	rcu_read_unlock();
8213 }
8214 
8215 void set_cpu_sd_state_idle(void)
8216 {
8217 	struct sched_domain *sd;
8218 	int cpu = smp_processor_id();
8219 
8220 	rcu_read_lock();
8221 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
8222 
8223 	if (!sd || sd->nohz_idle)
8224 		goto unlock;
8225 	sd->nohz_idle = 1;
8226 
8227 	atomic_dec(&sd->shared->nr_busy_cpus);
8228 unlock:
8229 	rcu_read_unlock();
8230 }
8231 
8232 /*
8233  * This routine will record that the cpu is going idle with tick stopped.
8234  * This info will be used in performing idle load balancing in the future.
8235  */
8236 void nohz_balance_enter_idle(int cpu)
8237 {
8238 	/*
8239 	 * If this cpu is going down, then nothing needs to be done.
8240 	 */
8241 	if (!cpu_active(cpu))
8242 		return;
8243 
8244 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
8245 		return;
8246 
8247 	/*
8248 	 * If we're a completely isolated CPU, we don't play.
8249 	 */
8250 	if (on_null_domain(cpu_rq(cpu)))
8251 		return;
8252 
8253 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
8254 	atomic_inc(&nohz.nr_cpus);
8255 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8256 }
8257 #endif
8258 
8259 static DEFINE_SPINLOCK(balancing);
8260 
8261 /*
8262  * Scale the max load_balance interval with the number of CPUs in the system.
8263  * This trades load-balance latency on larger machines for less cross talk.
8264  */
8265 void update_max_interval(void)
8266 {
8267 	max_load_balance_interval = HZ*num_online_cpus()/10;
8268 }
8269 
8270 /*
8271  * It checks each scheduling domain to see if it is due to be balanced,
8272  * and initiates a balancing operation if so.
8273  *
8274  * Balancing parameters are set up in init_sched_domains.
8275  */
8276 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
8277 {
8278 	int continue_balancing = 1;
8279 	int cpu = rq->cpu;
8280 	unsigned long interval;
8281 	struct sched_domain *sd;
8282 	/* Earliest time when we have to do rebalance again */
8283 	unsigned long next_balance = jiffies + 60*HZ;
8284 	int update_next_balance = 0;
8285 	int need_serialize, need_decay = 0;
8286 	u64 max_cost = 0;
8287 
8288 	update_blocked_averages(cpu);
8289 
8290 	rcu_read_lock();
8291 	for_each_domain(cpu, sd) {
8292 		/*
8293 		 * Decay the newidle max times here because this is a regular
8294 		 * visit to all the domains. Decay ~1% per second.
8295 		 */
8296 		if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8297 			sd->max_newidle_lb_cost =
8298 				(sd->max_newidle_lb_cost * 253) / 256;
8299 			sd->next_decay_max_lb_cost = jiffies + HZ;
8300 			need_decay = 1;
8301 		}
8302 		max_cost += sd->max_newidle_lb_cost;
8303 
8304 		if (!(sd->flags & SD_LOAD_BALANCE))
8305 			continue;
8306 
8307 		/*
8308 		 * Stop the load balance at this level. There is another
8309 		 * CPU in our sched group which is doing load balancing more
8310 		 * actively.
8311 		 */
8312 		if (!continue_balancing) {
8313 			if (need_decay)
8314 				continue;
8315 			break;
8316 		}
8317 
8318 		interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8319 
8320 		need_serialize = sd->flags & SD_SERIALIZE;
8321 		if (need_serialize) {
8322 			if (!spin_trylock(&balancing))
8323 				goto out;
8324 		}
8325 
8326 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
8327 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
8328 				/*
8329 				 * The LBF_DST_PINNED logic could have changed
8330 				 * env->dst_cpu, so we can't know our idle
8331 				 * state even if we migrated tasks. Update it.
8332 				 */
8333 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
8334 			}
8335 			sd->last_balance = jiffies;
8336 			interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8337 		}
8338 		if (need_serialize)
8339 			spin_unlock(&balancing);
8340 out:
8341 		if (time_after(next_balance, sd->last_balance + interval)) {
8342 			next_balance = sd->last_balance + interval;
8343 			update_next_balance = 1;
8344 		}
8345 	}
8346 	if (need_decay) {
8347 		/*
8348 		 * Ensure the rq-wide value also decays but keep it at a
8349 		 * reasonable floor to avoid funnies with rq->avg_idle.
8350 		 */
8351 		rq->max_idle_balance_cost =
8352 			max((u64)sysctl_sched_migration_cost, max_cost);
8353 	}
8354 	rcu_read_unlock();
8355 
8356 	/*
8357 	 * next_balance will be updated only when there is a need.
8358 	 * When the cpu is attached to null domain for ex, it will not be
8359 	 * updated.
8360 	 */
8361 	if (likely(update_next_balance)) {
8362 		rq->next_balance = next_balance;
8363 
8364 #ifdef CONFIG_NO_HZ_COMMON
8365 		/*
8366 		 * If this CPU has been elected to perform the nohz idle
8367 		 * balance. Other idle CPUs have already rebalanced with
8368 		 * nohz_idle_balance() and nohz.next_balance has been
8369 		 * updated accordingly. This CPU is now running the idle load
8370 		 * balance for itself and we need to update the
8371 		 * nohz.next_balance accordingly.
8372 		 */
8373 		if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8374 			nohz.next_balance = rq->next_balance;
8375 #endif
8376 	}
8377 }
8378 
8379 #ifdef CONFIG_NO_HZ_COMMON
8380 /*
8381  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
8382  * rebalancing for all the cpus for whom scheduler ticks are stopped.
8383  */
8384 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
8385 {
8386 	int this_cpu = this_rq->cpu;
8387 	struct rq *rq;
8388 	int balance_cpu;
8389 	/* Earliest time when we have to do rebalance again */
8390 	unsigned long next_balance = jiffies + 60*HZ;
8391 	int update_next_balance = 0;
8392 
8393 	if (idle != CPU_IDLE ||
8394 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
8395 		goto end;
8396 
8397 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8398 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
8399 			continue;
8400 
8401 		/*
8402 		 * If this cpu gets work to do, stop the load balancing
8403 		 * work being done for other cpus. Next load
8404 		 * balancing owner will pick it up.
8405 		 */
8406 		if (need_resched())
8407 			break;
8408 
8409 		rq = cpu_rq(balance_cpu);
8410 
8411 		/*
8412 		 * If time for next balance is due,
8413 		 * do the balance.
8414 		 */
8415 		if (time_after_eq(jiffies, rq->next_balance)) {
8416 			raw_spin_lock_irq(&rq->lock);
8417 			update_rq_clock(rq);
8418 			cpu_load_update_idle(rq);
8419 			raw_spin_unlock_irq(&rq->lock);
8420 			rebalance_domains(rq, CPU_IDLE);
8421 		}
8422 
8423 		if (time_after(next_balance, rq->next_balance)) {
8424 			next_balance = rq->next_balance;
8425 			update_next_balance = 1;
8426 		}
8427 	}
8428 
8429 	/*
8430 	 * next_balance will be updated only when there is a need.
8431 	 * When the CPU is attached to null domain for ex, it will not be
8432 	 * updated.
8433 	 */
8434 	if (likely(update_next_balance))
8435 		nohz.next_balance = next_balance;
8436 end:
8437 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
8438 }
8439 
8440 /*
8441  * Current heuristic for kicking the idle load balancer in the presence
8442  * of an idle cpu in the system.
8443  *   - This rq has more than one task.
8444  *   - This rq has at least one CFS task and the capacity of the CPU is
8445  *     significantly reduced because of RT tasks or IRQs.
8446  *   - At parent of LLC scheduler domain level, this cpu's scheduler group has
8447  *     multiple busy cpu.
8448  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
8449  *     domain span are idle.
8450  */
8451 static inline bool nohz_kick_needed(struct rq *rq)
8452 {
8453 	unsigned long now = jiffies;
8454 	struct sched_domain_shared *sds;
8455 	struct sched_domain *sd;
8456 	int nr_busy, cpu = rq->cpu;
8457 	bool kick = false;
8458 
8459 	if (unlikely(rq->idle_balance))
8460 		return false;
8461 
8462        /*
8463 	* We may be recently in ticked or tickless idle mode. At the first
8464 	* busy tick after returning from idle, we will update the busy stats.
8465 	*/
8466 	set_cpu_sd_state_busy();
8467 	nohz_balance_exit_idle(cpu);
8468 
8469 	/*
8470 	 * None are in tickless mode and hence no need for NOHZ idle load
8471 	 * balancing.
8472 	 */
8473 	if (likely(!atomic_read(&nohz.nr_cpus)))
8474 		return false;
8475 
8476 	if (time_before(now, nohz.next_balance))
8477 		return false;
8478 
8479 	if (rq->nr_running >= 2)
8480 		return true;
8481 
8482 	rcu_read_lock();
8483 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
8484 	if (sds) {
8485 		/*
8486 		 * XXX: write a coherent comment on why we do this.
8487 		 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
8488 		 */
8489 		nr_busy = atomic_read(&sds->nr_busy_cpus);
8490 		if (nr_busy > 1) {
8491 			kick = true;
8492 			goto unlock;
8493 		}
8494 
8495 	}
8496 
8497 	sd = rcu_dereference(rq->sd);
8498 	if (sd) {
8499 		if ((rq->cfs.h_nr_running >= 1) &&
8500 				check_cpu_capacity(rq, sd)) {
8501 			kick = true;
8502 			goto unlock;
8503 		}
8504 	}
8505 
8506 	sd = rcu_dereference(per_cpu(sd_asym, cpu));
8507 	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
8508 				  sched_domain_span(sd)) < cpu)) {
8509 		kick = true;
8510 		goto unlock;
8511 	}
8512 
8513 unlock:
8514 	rcu_read_unlock();
8515 	return kick;
8516 }
8517 #else
8518 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
8519 #endif
8520 
8521 /*
8522  * run_rebalance_domains is triggered when needed from the scheduler tick.
8523  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
8524  */
8525 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
8526 {
8527 	struct rq *this_rq = this_rq();
8528 	enum cpu_idle_type idle = this_rq->idle_balance ?
8529 						CPU_IDLE : CPU_NOT_IDLE;
8530 
8531 	/*
8532 	 * If this cpu has a pending nohz_balance_kick, then do the
8533 	 * balancing on behalf of the other idle cpus whose ticks are
8534 	 * stopped. Do nohz_idle_balance *before* rebalance_domains to
8535 	 * give the idle cpus a chance to load balance. Else we may
8536 	 * load balance only within the local sched_domain hierarchy
8537 	 * and abort nohz_idle_balance altogether if we pull some load.
8538 	 */
8539 	nohz_idle_balance(this_rq, idle);
8540 	rebalance_domains(this_rq, idle);
8541 }
8542 
8543 /*
8544  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
8545  */
8546 void trigger_load_balance(struct rq *rq)
8547 {
8548 	/* Don't need to rebalance while attached to NULL domain */
8549 	if (unlikely(on_null_domain(rq)))
8550 		return;
8551 
8552 	if (time_after_eq(jiffies, rq->next_balance))
8553 		raise_softirq(SCHED_SOFTIRQ);
8554 #ifdef CONFIG_NO_HZ_COMMON
8555 	if (nohz_kick_needed(rq))
8556 		nohz_balancer_kick();
8557 #endif
8558 }
8559 
8560 static void rq_online_fair(struct rq *rq)
8561 {
8562 	update_sysctl();
8563 
8564 	update_runtime_enabled(rq);
8565 }
8566 
8567 static void rq_offline_fair(struct rq *rq)
8568 {
8569 	update_sysctl();
8570 
8571 	/* Ensure any throttled groups are reachable by pick_next_task */
8572 	unthrottle_offline_cfs_rqs(rq);
8573 }
8574 
8575 #endif /* CONFIG_SMP */
8576 
8577 /*
8578  * scheduler tick hitting a task of our scheduling class:
8579  */
8580 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
8581 {
8582 	struct cfs_rq *cfs_rq;
8583 	struct sched_entity *se = &curr->se;
8584 
8585 	for_each_sched_entity(se) {
8586 		cfs_rq = cfs_rq_of(se);
8587 		entity_tick(cfs_rq, se, queued);
8588 	}
8589 
8590 	if (static_branch_unlikely(&sched_numa_balancing))
8591 		task_tick_numa(rq, curr);
8592 }
8593 
8594 /*
8595  * called on fork with the child task as argument from the parent's context
8596  *  - child not yet on the tasklist
8597  *  - preemption disabled
8598  */
8599 static void task_fork_fair(struct task_struct *p)
8600 {
8601 	struct cfs_rq *cfs_rq;
8602 	struct sched_entity *se = &p->se, *curr;
8603 	struct rq *rq = this_rq();
8604 
8605 	raw_spin_lock(&rq->lock);
8606 	update_rq_clock(rq);
8607 
8608 	cfs_rq = task_cfs_rq(current);
8609 	curr = cfs_rq->curr;
8610 	if (curr) {
8611 		update_curr(cfs_rq);
8612 		se->vruntime = curr->vruntime;
8613 	}
8614 	place_entity(cfs_rq, se, 1);
8615 
8616 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
8617 		/*
8618 		 * Upon rescheduling, sched_class::put_prev_task() will place
8619 		 * 'current' within the tree based on its new key value.
8620 		 */
8621 		swap(curr->vruntime, se->vruntime);
8622 		resched_curr(rq);
8623 	}
8624 
8625 	se->vruntime -= cfs_rq->min_vruntime;
8626 	raw_spin_unlock(&rq->lock);
8627 }
8628 
8629 /*
8630  * Priority of the task has changed. Check to see if we preempt
8631  * the current task.
8632  */
8633 static void
8634 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
8635 {
8636 	if (!task_on_rq_queued(p))
8637 		return;
8638 
8639 	/*
8640 	 * Reschedule if we are currently running on this runqueue and
8641 	 * our priority decreased, or if we are not currently running on
8642 	 * this runqueue and our priority is higher than the current's
8643 	 */
8644 	if (rq->curr == p) {
8645 		if (p->prio > oldprio)
8646 			resched_curr(rq);
8647 	} else
8648 		check_preempt_curr(rq, p, 0);
8649 }
8650 
8651 static inline bool vruntime_normalized(struct task_struct *p)
8652 {
8653 	struct sched_entity *se = &p->se;
8654 
8655 	/*
8656 	 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
8657 	 * the dequeue_entity(.flags=0) will already have normalized the
8658 	 * vruntime.
8659 	 */
8660 	if (p->on_rq)
8661 		return true;
8662 
8663 	/*
8664 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
8665 	 * But there are some cases where it has already been normalized:
8666 	 *
8667 	 * - A forked child which is waiting for being woken up by
8668 	 *   wake_up_new_task().
8669 	 * - A task which has been woken up by try_to_wake_up() and
8670 	 *   waiting for actually being woken up by sched_ttwu_pending().
8671 	 */
8672 	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
8673 		return true;
8674 
8675 	return false;
8676 }
8677 
8678 static void detach_task_cfs_rq(struct task_struct *p)
8679 {
8680 	struct sched_entity *se = &p->se;
8681 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
8682 	u64 now = cfs_rq_clock_task(cfs_rq);
8683 
8684 	if (!vruntime_normalized(p)) {
8685 		/*
8686 		 * Fix up our vruntime so that the current sleep doesn't
8687 		 * cause 'unlimited' sleep bonus.
8688 		 */
8689 		place_entity(cfs_rq, se, 0);
8690 		se->vruntime -= cfs_rq->min_vruntime;
8691 	}
8692 
8693 	/* Catch up with the cfs_rq and remove our load when we leave */
8694 	update_cfs_rq_load_avg(now, cfs_rq, false);
8695 	detach_entity_load_avg(cfs_rq, se);
8696 	update_tg_load_avg(cfs_rq, false);
8697 }
8698 
8699 static void attach_task_cfs_rq(struct task_struct *p)
8700 {
8701 	struct sched_entity *se = &p->se;
8702 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
8703 	u64 now = cfs_rq_clock_task(cfs_rq);
8704 
8705 #ifdef CONFIG_FAIR_GROUP_SCHED
8706 	/*
8707 	 * Since the real-depth could have been changed (only FAIR
8708 	 * class maintain depth value), reset depth properly.
8709 	 */
8710 	se->depth = se->parent ? se->parent->depth + 1 : 0;
8711 #endif
8712 
8713 	/* Synchronize task with its cfs_rq */
8714 	update_cfs_rq_load_avg(now, cfs_rq, false);
8715 	attach_entity_load_avg(cfs_rq, se);
8716 	update_tg_load_avg(cfs_rq, false);
8717 
8718 	if (!vruntime_normalized(p))
8719 		se->vruntime += cfs_rq->min_vruntime;
8720 }
8721 
8722 static void switched_from_fair(struct rq *rq, struct task_struct *p)
8723 {
8724 	detach_task_cfs_rq(p);
8725 }
8726 
8727 static void switched_to_fair(struct rq *rq, struct task_struct *p)
8728 {
8729 	attach_task_cfs_rq(p);
8730 
8731 	if (task_on_rq_queued(p)) {
8732 		/*
8733 		 * We were most likely switched from sched_rt, so
8734 		 * kick off the schedule if running, otherwise just see
8735 		 * if we can still preempt the current task.
8736 		 */
8737 		if (rq->curr == p)
8738 			resched_curr(rq);
8739 		else
8740 			check_preempt_curr(rq, p, 0);
8741 	}
8742 }
8743 
8744 /* Account for a task changing its policy or group.
8745  *
8746  * This routine is mostly called to set cfs_rq->curr field when a task
8747  * migrates between groups/classes.
8748  */
8749 static void set_curr_task_fair(struct rq *rq)
8750 {
8751 	struct sched_entity *se = &rq->curr->se;
8752 
8753 	for_each_sched_entity(se) {
8754 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
8755 
8756 		set_next_entity(cfs_rq, se);
8757 		/* ensure bandwidth has been allocated on our new cfs_rq */
8758 		account_cfs_rq_runtime(cfs_rq, 0);
8759 	}
8760 }
8761 
8762 void init_cfs_rq(struct cfs_rq *cfs_rq)
8763 {
8764 	cfs_rq->tasks_timeline = RB_ROOT;
8765 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8766 #ifndef CONFIG_64BIT
8767 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8768 #endif
8769 #ifdef CONFIG_SMP
8770 	atomic_long_set(&cfs_rq->removed_load_avg, 0);
8771 	atomic_long_set(&cfs_rq->removed_util_avg, 0);
8772 #endif
8773 }
8774 
8775 #ifdef CONFIG_FAIR_GROUP_SCHED
8776 static void task_set_group_fair(struct task_struct *p)
8777 {
8778 	struct sched_entity *se = &p->se;
8779 
8780 	set_task_rq(p, task_cpu(p));
8781 	se->depth = se->parent ? se->parent->depth + 1 : 0;
8782 }
8783 
8784 static void task_move_group_fair(struct task_struct *p)
8785 {
8786 	detach_task_cfs_rq(p);
8787 	set_task_rq(p, task_cpu(p));
8788 
8789 #ifdef CONFIG_SMP
8790 	/* Tell se's cfs_rq has been changed -- migrated */
8791 	p->se.avg.last_update_time = 0;
8792 #endif
8793 	attach_task_cfs_rq(p);
8794 }
8795 
8796 static void task_change_group_fair(struct task_struct *p, int type)
8797 {
8798 	switch (type) {
8799 	case TASK_SET_GROUP:
8800 		task_set_group_fair(p);
8801 		break;
8802 
8803 	case TASK_MOVE_GROUP:
8804 		task_move_group_fair(p);
8805 		break;
8806 	}
8807 }
8808 
8809 void free_fair_sched_group(struct task_group *tg)
8810 {
8811 	int i;
8812 
8813 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8814 
8815 	for_each_possible_cpu(i) {
8816 		if (tg->cfs_rq)
8817 			kfree(tg->cfs_rq[i]);
8818 		if (tg->se)
8819 			kfree(tg->se[i]);
8820 	}
8821 
8822 	kfree(tg->cfs_rq);
8823 	kfree(tg->se);
8824 }
8825 
8826 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8827 {
8828 	struct sched_entity *se;
8829 	struct cfs_rq *cfs_rq;
8830 	struct rq *rq;
8831 	int i;
8832 
8833 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8834 	if (!tg->cfs_rq)
8835 		goto err;
8836 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8837 	if (!tg->se)
8838 		goto err;
8839 
8840 	tg->shares = NICE_0_LOAD;
8841 
8842 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8843 
8844 	for_each_possible_cpu(i) {
8845 		rq = cpu_rq(i);
8846 
8847 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8848 				      GFP_KERNEL, cpu_to_node(i));
8849 		if (!cfs_rq)
8850 			goto err;
8851 
8852 		se = kzalloc_node(sizeof(struct sched_entity),
8853 				  GFP_KERNEL, cpu_to_node(i));
8854 		if (!se)
8855 			goto err_free_rq;
8856 
8857 		init_cfs_rq(cfs_rq);
8858 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8859 		init_entity_runnable_average(se);
8860 	}
8861 
8862 	return 1;
8863 
8864 err_free_rq:
8865 	kfree(cfs_rq);
8866 err:
8867 	return 0;
8868 }
8869 
8870 void online_fair_sched_group(struct task_group *tg)
8871 {
8872 	struct sched_entity *se;
8873 	struct rq *rq;
8874 	int i;
8875 
8876 	for_each_possible_cpu(i) {
8877 		rq = cpu_rq(i);
8878 		se = tg->se[i];
8879 
8880 		raw_spin_lock_irq(&rq->lock);
8881 		post_init_entity_util_avg(se);
8882 		sync_throttle(tg, i);
8883 		raw_spin_unlock_irq(&rq->lock);
8884 	}
8885 }
8886 
8887 void unregister_fair_sched_group(struct task_group *tg)
8888 {
8889 	unsigned long flags;
8890 	struct rq *rq;
8891 	int cpu;
8892 
8893 	for_each_possible_cpu(cpu) {
8894 		if (tg->se[cpu])
8895 			remove_entity_load_avg(tg->se[cpu]);
8896 
8897 		/*
8898 		 * Only empty task groups can be destroyed; so we can speculatively
8899 		 * check on_list without danger of it being re-added.
8900 		 */
8901 		if (!tg->cfs_rq[cpu]->on_list)
8902 			continue;
8903 
8904 		rq = cpu_rq(cpu);
8905 
8906 		raw_spin_lock_irqsave(&rq->lock, flags);
8907 		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8908 		raw_spin_unlock_irqrestore(&rq->lock, flags);
8909 	}
8910 }
8911 
8912 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8913 			struct sched_entity *se, int cpu,
8914 			struct sched_entity *parent)
8915 {
8916 	struct rq *rq = cpu_rq(cpu);
8917 
8918 	cfs_rq->tg = tg;
8919 	cfs_rq->rq = rq;
8920 	init_cfs_rq_runtime(cfs_rq);
8921 
8922 	tg->cfs_rq[cpu] = cfs_rq;
8923 	tg->se[cpu] = se;
8924 
8925 	/* se could be NULL for root_task_group */
8926 	if (!se)
8927 		return;
8928 
8929 	if (!parent) {
8930 		se->cfs_rq = &rq->cfs;
8931 		se->depth = 0;
8932 	} else {
8933 		se->cfs_rq = parent->my_q;
8934 		se->depth = parent->depth + 1;
8935 	}
8936 
8937 	se->my_q = cfs_rq;
8938 	/* guarantee group entities always have weight */
8939 	update_load_set(&se->load, NICE_0_LOAD);
8940 	se->parent = parent;
8941 }
8942 
8943 static DEFINE_MUTEX(shares_mutex);
8944 
8945 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8946 {
8947 	int i;
8948 	unsigned long flags;
8949 
8950 	/*
8951 	 * We can't change the weight of the root cgroup.
8952 	 */
8953 	if (!tg->se[0])
8954 		return -EINVAL;
8955 
8956 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8957 
8958 	mutex_lock(&shares_mutex);
8959 	if (tg->shares == shares)
8960 		goto done;
8961 
8962 	tg->shares = shares;
8963 	for_each_possible_cpu(i) {
8964 		struct rq *rq = cpu_rq(i);
8965 		struct sched_entity *se;
8966 
8967 		se = tg->se[i];
8968 		/* Propagate contribution to hierarchy */
8969 		raw_spin_lock_irqsave(&rq->lock, flags);
8970 
8971 		/* Possible calls to update_curr() need rq clock */
8972 		update_rq_clock(rq);
8973 		for_each_sched_entity(se)
8974 			update_cfs_shares(group_cfs_rq(se));
8975 		raw_spin_unlock_irqrestore(&rq->lock, flags);
8976 	}
8977 
8978 done:
8979 	mutex_unlock(&shares_mutex);
8980 	return 0;
8981 }
8982 #else /* CONFIG_FAIR_GROUP_SCHED */
8983 
8984 void free_fair_sched_group(struct task_group *tg) { }
8985 
8986 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8987 {
8988 	return 1;
8989 }
8990 
8991 void online_fair_sched_group(struct task_group *tg) { }
8992 
8993 void unregister_fair_sched_group(struct task_group *tg) { }
8994 
8995 #endif /* CONFIG_FAIR_GROUP_SCHED */
8996 
8997 
8998 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
8999 {
9000 	struct sched_entity *se = &task->se;
9001 	unsigned int rr_interval = 0;
9002 
9003 	/*
9004 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
9005 	 * idle runqueue:
9006 	 */
9007 	if (rq->cfs.load.weight)
9008 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
9009 
9010 	return rr_interval;
9011 }
9012 
9013 /*
9014  * All the scheduling class methods:
9015  */
9016 const struct sched_class fair_sched_class = {
9017 	.next			= &idle_sched_class,
9018 	.enqueue_task		= enqueue_task_fair,
9019 	.dequeue_task		= dequeue_task_fair,
9020 	.yield_task		= yield_task_fair,
9021 	.yield_to_task		= yield_to_task_fair,
9022 
9023 	.check_preempt_curr	= check_preempt_wakeup,
9024 
9025 	.pick_next_task		= pick_next_task_fair,
9026 	.put_prev_task		= put_prev_task_fair,
9027 
9028 #ifdef CONFIG_SMP
9029 	.select_task_rq		= select_task_rq_fair,
9030 	.migrate_task_rq	= migrate_task_rq_fair,
9031 
9032 	.rq_online		= rq_online_fair,
9033 	.rq_offline		= rq_offline_fair,
9034 
9035 	.task_dead		= task_dead_fair,
9036 	.set_cpus_allowed	= set_cpus_allowed_common,
9037 #endif
9038 
9039 	.set_curr_task          = set_curr_task_fair,
9040 	.task_tick		= task_tick_fair,
9041 	.task_fork		= task_fork_fair,
9042 
9043 	.prio_changed		= prio_changed_fair,
9044 	.switched_from		= switched_from_fair,
9045 	.switched_to		= switched_to_fair,
9046 
9047 	.get_rr_interval	= get_rr_interval_fair,
9048 
9049 	.update_curr		= update_curr_fair,
9050 
9051 #ifdef CONFIG_FAIR_GROUP_SCHED
9052 	.task_change_group	= task_change_group_fair,
9053 #endif
9054 };
9055 
9056 #ifdef CONFIG_SCHED_DEBUG
9057 void print_cfs_stats(struct seq_file *m, int cpu)
9058 {
9059 	struct cfs_rq *cfs_rq;
9060 
9061 	rcu_read_lock();
9062 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
9063 		print_cfs_rq(m, cpu, cfs_rq);
9064 	rcu_read_unlock();
9065 }
9066 
9067 #ifdef CONFIG_NUMA_BALANCING
9068 void show_numa_stats(struct task_struct *p, struct seq_file *m)
9069 {
9070 	int node;
9071 	unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
9072 
9073 	for_each_online_node(node) {
9074 		if (p->numa_faults) {
9075 			tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
9076 			tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
9077 		}
9078 		if (p->numa_group) {
9079 			gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
9080 			gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
9081 		}
9082 		print_numa_stats(m, node, tsf, tpf, gsf, gpf);
9083 	}
9084 }
9085 #endif /* CONFIG_NUMA_BALANCING */
9086 #endif /* CONFIG_SCHED_DEBUG */
9087 
9088 __init void init_sched_fair_class(void)
9089 {
9090 #ifdef CONFIG_SMP
9091 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
9092 
9093 #ifdef CONFIG_NO_HZ_COMMON
9094 	nohz.next_balance = jiffies;
9095 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
9096 #endif
9097 #endif /* SMP */
9098 
9099 }
9100