xref: /openbmc/linux/kernel/sched/fair.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21  */
22 
23 #include <linux/sched.h>
24 #include <linux/latencytop.h>
25 #include <linux/cpumask.h>
26 #include <linux/cpuidle.h>
27 #include <linux/slab.h>
28 #include <linux/profile.h>
29 #include <linux/interrupt.h>
30 #include <linux/mempolicy.h>
31 #include <linux/migrate.h>
32 #include <linux/task_work.h>
33 
34 #include <trace/events/sched.h>
35 
36 #include "sched.h"
37 
38 /*
39  * Targeted preemption latency for CPU-bound tasks:
40  *
41  * NOTE: this latency value is not the same as the concept of
42  * 'timeslice length' - timeslices in CFS are of variable length
43  * and have no persistent notion like in traditional, time-slice
44  * based scheduling concepts.
45  *
46  * (to see the precise effective timeslice length of your workload,
47  *  run vmstat and monitor the context-switches (cs) field)
48  *
49  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
50  */
51 unsigned int sysctl_sched_latency			= 6000000ULL;
52 unsigned int normalized_sysctl_sched_latency		= 6000000ULL;
53 
54 /*
55  * The initial- and re-scaling of tunables is configurable
56  *
57  * Options are:
58  *
59  *   SCHED_TUNABLESCALING_NONE - unscaled, always *1
60  *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
61  *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
62  *
63  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
64  */
65 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
66 
67 /*
68  * Minimal preemption granularity for CPU-bound tasks:
69  *
70  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
71  */
72 unsigned int sysctl_sched_min_granularity		= 750000ULL;
73 unsigned int normalized_sysctl_sched_min_granularity	= 750000ULL;
74 
75 /*
76  * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
77  */
78 static unsigned int sched_nr_latency = 8;
79 
80 /*
81  * After fork, child runs first. If set to 0 (default) then
82  * parent will (try to) run first.
83  */
84 unsigned int sysctl_sched_child_runs_first __read_mostly;
85 
86 /*
87  * SCHED_OTHER wake-up granularity.
88  *
89  * This option delays the preemption effects of decoupled workloads
90  * and reduces their over-scheduling. Synchronous workloads will still
91  * have immediate wakeup/sleep latencies.
92  *
93  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
94  */
95 unsigned int sysctl_sched_wakeup_granularity		= 1000000UL;
96 unsigned int normalized_sysctl_sched_wakeup_granularity	= 1000000UL;
97 
98 const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
99 
100 #ifdef CONFIG_SMP
101 /*
102  * For asym packing, by default the lower numbered cpu has higher priority.
103  */
104 int __weak arch_asym_cpu_priority(int cpu)
105 {
106 	return -cpu;
107 }
108 #endif
109 
110 #ifdef CONFIG_CFS_BANDWIDTH
111 /*
112  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
113  * each time a cfs_rq requests quota.
114  *
115  * Note: in the case that the slice exceeds the runtime remaining (either due
116  * to consumption or the quota being specified to be smaller than the slice)
117  * we will always only issue the remaining available time.
118  *
119  * (default: 5 msec, units: microseconds)
120  */
121 unsigned int sysctl_sched_cfs_bandwidth_slice		= 5000UL;
122 #endif
123 
124 /*
125  * The margin used when comparing utilization with CPU capacity:
126  * util * margin < capacity * 1024
127  *
128  * (default: ~20%)
129  */
130 unsigned int capacity_margin				= 1280;
131 
132 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
133 {
134 	lw->weight += inc;
135 	lw->inv_weight = 0;
136 }
137 
138 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
139 {
140 	lw->weight -= dec;
141 	lw->inv_weight = 0;
142 }
143 
144 static inline void update_load_set(struct load_weight *lw, unsigned long w)
145 {
146 	lw->weight = w;
147 	lw->inv_weight = 0;
148 }
149 
150 /*
151  * Increase the granularity value when there are more CPUs,
152  * because with more CPUs the 'effective latency' as visible
153  * to users decreases. But the relationship is not linear,
154  * so pick a second-best guess by going with the log2 of the
155  * number of CPUs.
156  *
157  * This idea comes from the SD scheduler of Con Kolivas:
158  */
159 static unsigned int get_update_sysctl_factor(void)
160 {
161 	unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
162 	unsigned int factor;
163 
164 	switch (sysctl_sched_tunable_scaling) {
165 	case SCHED_TUNABLESCALING_NONE:
166 		factor = 1;
167 		break;
168 	case SCHED_TUNABLESCALING_LINEAR:
169 		factor = cpus;
170 		break;
171 	case SCHED_TUNABLESCALING_LOG:
172 	default:
173 		factor = 1 + ilog2(cpus);
174 		break;
175 	}
176 
177 	return factor;
178 }
179 
180 static void update_sysctl(void)
181 {
182 	unsigned int factor = get_update_sysctl_factor();
183 
184 #define SET_SYSCTL(name) \
185 	(sysctl_##name = (factor) * normalized_sysctl_##name)
186 	SET_SYSCTL(sched_min_granularity);
187 	SET_SYSCTL(sched_latency);
188 	SET_SYSCTL(sched_wakeup_granularity);
189 #undef SET_SYSCTL
190 }
191 
192 void sched_init_granularity(void)
193 {
194 	update_sysctl();
195 }
196 
197 #define WMULT_CONST	(~0U)
198 #define WMULT_SHIFT	32
199 
200 static void __update_inv_weight(struct load_weight *lw)
201 {
202 	unsigned long w;
203 
204 	if (likely(lw->inv_weight))
205 		return;
206 
207 	w = scale_load_down(lw->weight);
208 
209 	if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
210 		lw->inv_weight = 1;
211 	else if (unlikely(!w))
212 		lw->inv_weight = WMULT_CONST;
213 	else
214 		lw->inv_weight = WMULT_CONST / w;
215 }
216 
217 /*
218  * delta_exec * weight / lw.weight
219  *   OR
220  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
221  *
222  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
223  * we're guaranteed shift stays positive because inv_weight is guaranteed to
224  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
225  *
226  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
227  * weight/lw.weight <= 1, and therefore our shift will also be positive.
228  */
229 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
230 {
231 	u64 fact = scale_load_down(weight);
232 	int shift = WMULT_SHIFT;
233 
234 	__update_inv_weight(lw);
235 
236 	if (unlikely(fact >> 32)) {
237 		while (fact >> 32) {
238 			fact >>= 1;
239 			shift--;
240 		}
241 	}
242 
243 	/* hint to use a 32x32->64 mul */
244 	fact = (u64)(u32)fact * lw->inv_weight;
245 
246 	while (fact >> 32) {
247 		fact >>= 1;
248 		shift--;
249 	}
250 
251 	return mul_u64_u32_shr(delta_exec, fact, shift);
252 }
253 
254 
255 const struct sched_class fair_sched_class;
256 
257 /**************************************************************
258  * CFS operations on generic schedulable entities:
259  */
260 
261 #ifdef CONFIG_FAIR_GROUP_SCHED
262 
263 /* cpu runqueue to which this cfs_rq is attached */
264 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
265 {
266 	return cfs_rq->rq;
267 }
268 
269 /* An entity is a task if it doesn't "own" a runqueue */
270 #define entity_is_task(se)	(!se->my_q)
271 
272 static inline struct task_struct *task_of(struct sched_entity *se)
273 {
274 	SCHED_WARN_ON(!entity_is_task(se));
275 	return container_of(se, struct task_struct, se);
276 }
277 
278 /* Walk up scheduling entities hierarchy */
279 #define for_each_sched_entity(se) \
280 		for (; se; se = se->parent)
281 
282 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
283 {
284 	return p->se.cfs_rq;
285 }
286 
287 /* runqueue on which this entity is (to be) queued */
288 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
289 {
290 	return se->cfs_rq;
291 }
292 
293 /* runqueue "owned" by this group */
294 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
295 {
296 	return grp->my_q;
297 }
298 
299 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
300 {
301 	if (!cfs_rq->on_list) {
302 		struct rq *rq = rq_of(cfs_rq);
303 		int cpu = cpu_of(rq);
304 		/*
305 		 * Ensure we either appear before our parent (if already
306 		 * enqueued) or force our parent to appear after us when it is
307 		 * enqueued. The fact that we always enqueue bottom-up
308 		 * reduces this to two cases and a special case for the root
309 		 * cfs_rq. Furthermore, it also means that we will always reset
310 		 * tmp_alone_branch either when the branch is connected
311 		 * to a tree or when we reach the beg of the tree
312 		 */
313 		if (cfs_rq->tg->parent &&
314 		    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
315 			/*
316 			 * If parent is already on the list, we add the child
317 			 * just before. Thanks to circular linked property of
318 			 * the list, this means to put the child at the tail
319 			 * of the list that starts by parent.
320 			 */
321 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
322 				&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
323 			/*
324 			 * The branch is now connected to its tree so we can
325 			 * reset tmp_alone_branch to the beginning of the
326 			 * list.
327 			 */
328 			rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
329 		} else if (!cfs_rq->tg->parent) {
330 			/*
331 			 * cfs rq without parent should be put
332 			 * at the tail of the list.
333 			 */
334 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
335 				&rq->leaf_cfs_rq_list);
336 			/*
337 			 * We have reach the beg of a tree so we can reset
338 			 * tmp_alone_branch to the beginning of the list.
339 			 */
340 			rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
341 		} else {
342 			/*
343 			 * The parent has not already been added so we want to
344 			 * make sure that it will be put after us.
345 			 * tmp_alone_branch points to the beg of the branch
346 			 * where we will add parent.
347 			 */
348 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
349 				rq->tmp_alone_branch);
350 			/*
351 			 * update tmp_alone_branch to points to the new beg
352 			 * of the branch
353 			 */
354 			rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
355 		}
356 
357 		cfs_rq->on_list = 1;
358 	}
359 }
360 
361 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
362 {
363 	if (cfs_rq->on_list) {
364 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
365 		cfs_rq->on_list = 0;
366 	}
367 }
368 
369 /* Iterate thr' all leaf cfs_rq's on a runqueue */
370 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
371 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
372 
373 /* Do the two (enqueued) entities belong to the same group ? */
374 static inline struct cfs_rq *
375 is_same_group(struct sched_entity *se, struct sched_entity *pse)
376 {
377 	if (se->cfs_rq == pse->cfs_rq)
378 		return se->cfs_rq;
379 
380 	return NULL;
381 }
382 
383 static inline struct sched_entity *parent_entity(struct sched_entity *se)
384 {
385 	return se->parent;
386 }
387 
388 static void
389 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
390 {
391 	int se_depth, pse_depth;
392 
393 	/*
394 	 * preemption test can be made between sibling entities who are in the
395 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
396 	 * both tasks until we find their ancestors who are siblings of common
397 	 * parent.
398 	 */
399 
400 	/* First walk up until both entities are at same depth */
401 	se_depth = (*se)->depth;
402 	pse_depth = (*pse)->depth;
403 
404 	while (se_depth > pse_depth) {
405 		se_depth--;
406 		*se = parent_entity(*se);
407 	}
408 
409 	while (pse_depth > se_depth) {
410 		pse_depth--;
411 		*pse = parent_entity(*pse);
412 	}
413 
414 	while (!is_same_group(*se, *pse)) {
415 		*se = parent_entity(*se);
416 		*pse = parent_entity(*pse);
417 	}
418 }
419 
420 #else	/* !CONFIG_FAIR_GROUP_SCHED */
421 
422 static inline struct task_struct *task_of(struct sched_entity *se)
423 {
424 	return container_of(se, struct task_struct, se);
425 }
426 
427 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
428 {
429 	return container_of(cfs_rq, struct rq, cfs);
430 }
431 
432 #define entity_is_task(se)	1
433 
434 #define for_each_sched_entity(se) \
435 		for (; se; se = NULL)
436 
437 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
438 {
439 	return &task_rq(p)->cfs;
440 }
441 
442 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
443 {
444 	struct task_struct *p = task_of(se);
445 	struct rq *rq = task_rq(p);
446 
447 	return &rq->cfs;
448 }
449 
450 /* runqueue "owned" by this group */
451 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
452 {
453 	return NULL;
454 }
455 
456 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
457 {
458 }
459 
460 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
461 {
462 }
463 
464 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
465 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
466 
467 static inline struct sched_entity *parent_entity(struct sched_entity *se)
468 {
469 	return NULL;
470 }
471 
472 static inline void
473 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
474 {
475 }
476 
477 #endif	/* CONFIG_FAIR_GROUP_SCHED */
478 
479 static __always_inline
480 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
481 
482 /**************************************************************
483  * Scheduling class tree data structure manipulation methods:
484  */
485 
486 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
487 {
488 	s64 delta = (s64)(vruntime - max_vruntime);
489 	if (delta > 0)
490 		max_vruntime = vruntime;
491 
492 	return max_vruntime;
493 }
494 
495 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
496 {
497 	s64 delta = (s64)(vruntime - min_vruntime);
498 	if (delta < 0)
499 		min_vruntime = vruntime;
500 
501 	return min_vruntime;
502 }
503 
504 static inline int entity_before(struct sched_entity *a,
505 				struct sched_entity *b)
506 {
507 	return (s64)(a->vruntime - b->vruntime) < 0;
508 }
509 
510 static void update_min_vruntime(struct cfs_rq *cfs_rq)
511 {
512 	struct sched_entity *curr = cfs_rq->curr;
513 
514 	u64 vruntime = cfs_rq->min_vruntime;
515 
516 	if (curr) {
517 		if (curr->on_rq)
518 			vruntime = curr->vruntime;
519 		else
520 			curr = NULL;
521 	}
522 
523 	if (cfs_rq->rb_leftmost) {
524 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
525 						   struct sched_entity,
526 						   run_node);
527 
528 		if (!curr)
529 			vruntime = se->vruntime;
530 		else
531 			vruntime = min_vruntime(vruntime, se->vruntime);
532 	}
533 
534 	/* ensure we never gain time by being placed backwards. */
535 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
536 #ifndef CONFIG_64BIT
537 	smp_wmb();
538 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
539 #endif
540 }
541 
542 /*
543  * Enqueue an entity into the rb-tree:
544  */
545 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
546 {
547 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
548 	struct rb_node *parent = NULL;
549 	struct sched_entity *entry;
550 	int leftmost = 1;
551 
552 	/*
553 	 * Find the right place in the rbtree:
554 	 */
555 	while (*link) {
556 		parent = *link;
557 		entry = rb_entry(parent, struct sched_entity, run_node);
558 		/*
559 		 * We dont care about collisions. Nodes with
560 		 * the same key stay together.
561 		 */
562 		if (entity_before(se, entry)) {
563 			link = &parent->rb_left;
564 		} else {
565 			link = &parent->rb_right;
566 			leftmost = 0;
567 		}
568 	}
569 
570 	/*
571 	 * Maintain a cache of leftmost tree entries (it is frequently
572 	 * used):
573 	 */
574 	if (leftmost)
575 		cfs_rq->rb_leftmost = &se->run_node;
576 
577 	rb_link_node(&se->run_node, parent, link);
578 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
579 }
580 
581 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
582 {
583 	if (cfs_rq->rb_leftmost == &se->run_node) {
584 		struct rb_node *next_node;
585 
586 		next_node = rb_next(&se->run_node);
587 		cfs_rq->rb_leftmost = next_node;
588 	}
589 
590 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
591 }
592 
593 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
594 {
595 	struct rb_node *left = cfs_rq->rb_leftmost;
596 
597 	if (!left)
598 		return NULL;
599 
600 	return rb_entry(left, struct sched_entity, run_node);
601 }
602 
603 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
604 {
605 	struct rb_node *next = rb_next(&se->run_node);
606 
607 	if (!next)
608 		return NULL;
609 
610 	return rb_entry(next, struct sched_entity, run_node);
611 }
612 
613 #ifdef CONFIG_SCHED_DEBUG
614 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
615 {
616 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
617 
618 	if (!last)
619 		return NULL;
620 
621 	return rb_entry(last, struct sched_entity, run_node);
622 }
623 
624 /**************************************************************
625  * Scheduling class statistics methods:
626  */
627 
628 int sched_proc_update_handler(struct ctl_table *table, int write,
629 		void __user *buffer, size_t *lenp,
630 		loff_t *ppos)
631 {
632 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
633 	unsigned int factor = get_update_sysctl_factor();
634 
635 	if (ret || !write)
636 		return ret;
637 
638 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
639 					sysctl_sched_min_granularity);
640 
641 #define WRT_SYSCTL(name) \
642 	(normalized_sysctl_##name = sysctl_##name / (factor))
643 	WRT_SYSCTL(sched_min_granularity);
644 	WRT_SYSCTL(sched_latency);
645 	WRT_SYSCTL(sched_wakeup_granularity);
646 #undef WRT_SYSCTL
647 
648 	return 0;
649 }
650 #endif
651 
652 /*
653  * delta /= w
654  */
655 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
656 {
657 	if (unlikely(se->load.weight != NICE_0_LOAD))
658 		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
659 
660 	return delta;
661 }
662 
663 /*
664  * The idea is to set a period in which each task runs once.
665  *
666  * When there are too many tasks (sched_nr_latency) we have to stretch
667  * this period because otherwise the slices get too small.
668  *
669  * p = (nr <= nl) ? l : l*nr/nl
670  */
671 static u64 __sched_period(unsigned long nr_running)
672 {
673 	if (unlikely(nr_running > sched_nr_latency))
674 		return nr_running * sysctl_sched_min_granularity;
675 	else
676 		return sysctl_sched_latency;
677 }
678 
679 /*
680  * We calculate the wall-time slice from the period by taking a part
681  * proportional to the weight.
682  *
683  * s = p*P[w/rw]
684  */
685 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
686 {
687 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
688 
689 	for_each_sched_entity(se) {
690 		struct load_weight *load;
691 		struct load_weight lw;
692 
693 		cfs_rq = cfs_rq_of(se);
694 		load = &cfs_rq->load;
695 
696 		if (unlikely(!se->on_rq)) {
697 			lw = cfs_rq->load;
698 
699 			update_load_add(&lw, se->load.weight);
700 			load = &lw;
701 		}
702 		slice = __calc_delta(slice, se->load.weight, load);
703 	}
704 	return slice;
705 }
706 
707 /*
708  * We calculate the vruntime slice of a to-be-inserted task.
709  *
710  * vs = s/w
711  */
712 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
713 {
714 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
715 }
716 
717 #ifdef CONFIG_SMP
718 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
719 static unsigned long task_h_load(struct task_struct *p);
720 
721 /*
722  * We choose a half-life close to 1 scheduling period.
723  * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
724  * dependent on this value.
725  */
726 #define LOAD_AVG_PERIOD 32
727 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
728 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
729 
730 /* Give new sched_entity start runnable values to heavy its load in infant time */
731 void init_entity_runnable_average(struct sched_entity *se)
732 {
733 	struct sched_avg *sa = &se->avg;
734 
735 	sa->last_update_time = 0;
736 	/*
737 	 * sched_avg's period_contrib should be strictly less then 1024, so
738 	 * we give it 1023 to make sure it is almost a period (1024us), and
739 	 * will definitely be update (after enqueue).
740 	 */
741 	sa->period_contrib = 1023;
742 	/*
743 	 * Tasks are intialized with full load to be seen as heavy tasks until
744 	 * they get a chance to stabilize to their real load level.
745 	 * Group entities are intialized with zero load to reflect the fact that
746 	 * nothing has been attached to the task group yet.
747 	 */
748 	if (entity_is_task(se))
749 		sa->load_avg = scale_load_down(se->load.weight);
750 	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
751 	/*
752 	 * At this point, util_avg won't be used in select_task_rq_fair anyway
753 	 */
754 	sa->util_avg = 0;
755 	sa->util_sum = 0;
756 	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
757 }
758 
759 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
760 static void attach_entity_cfs_rq(struct sched_entity *se);
761 
762 /*
763  * With new tasks being created, their initial util_avgs are extrapolated
764  * based on the cfs_rq's current util_avg:
765  *
766  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
767  *
768  * However, in many cases, the above util_avg does not give a desired
769  * value. Moreover, the sum of the util_avgs may be divergent, such
770  * as when the series is a harmonic series.
771  *
772  * To solve this problem, we also cap the util_avg of successive tasks to
773  * only 1/2 of the left utilization budget:
774  *
775  *   util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
776  *
777  * where n denotes the nth task.
778  *
779  * For example, a simplest series from the beginning would be like:
780  *
781  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
782  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
783  *
784  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
785  * if util_avg > util_avg_cap.
786  */
787 void post_init_entity_util_avg(struct sched_entity *se)
788 {
789 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
790 	struct sched_avg *sa = &se->avg;
791 	long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
792 
793 	if (cap > 0) {
794 		if (cfs_rq->avg.util_avg != 0) {
795 			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
796 			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
797 
798 			if (sa->util_avg > cap)
799 				sa->util_avg = cap;
800 		} else {
801 			sa->util_avg = cap;
802 		}
803 		sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
804 	}
805 
806 	if (entity_is_task(se)) {
807 		struct task_struct *p = task_of(se);
808 		if (p->sched_class != &fair_sched_class) {
809 			/*
810 			 * For !fair tasks do:
811 			 *
812 			update_cfs_rq_load_avg(now, cfs_rq, false);
813 			attach_entity_load_avg(cfs_rq, se);
814 			switched_from_fair(rq, p);
815 			 *
816 			 * such that the next switched_to_fair() has the
817 			 * expected state.
818 			 */
819 			se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
820 			return;
821 		}
822 	}
823 
824 	attach_entity_cfs_rq(se);
825 }
826 
827 #else /* !CONFIG_SMP */
828 void init_entity_runnable_average(struct sched_entity *se)
829 {
830 }
831 void post_init_entity_util_avg(struct sched_entity *se)
832 {
833 }
834 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
835 {
836 }
837 #endif /* CONFIG_SMP */
838 
839 /*
840  * Update the current task's runtime statistics.
841  */
842 static void update_curr(struct cfs_rq *cfs_rq)
843 {
844 	struct sched_entity *curr = cfs_rq->curr;
845 	u64 now = rq_clock_task(rq_of(cfs_rq));
846 	u64 delta_exec;
847 
848 	if (unlikely(!curr))
849 		return;
850 
851 	delta_exec = now - curr->exec_start;
852 	if (unlikely((s64)delta_exec <= 0))
853 		return;
854 
855 	curr->exec_start = now;
856 
857 	schedstat_set(curr->statistics.exec_max,
858 		      max(delta_exec, curr->statistics.exec_max));
859 
860 	curr->sum_exec_runtime += delta_exec;
861 	schedstat_add(cfs_rq->exec_clock, delta_exec);
862 
863 	curr->vruntime += calc_delta_fair(delta_exec, curr);
864 	update_min_vruntime(cfs_rq);
865 
866 	if (entity_is_task(curr)) {
867 		struct task_struct *curtask = task_of(curr);
868 
869 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
870 		cpuacct_charge(curtask, delta_exec);
871 		account_group_exec_runtime(curtask, delta_exec);
872 	}
873 
874 	account_cfs_rq_runtime(cfs_rq, delta_exec);
875 }
876 
877 static void update_curr_fair(struct rq *rq)
878 {
879 	update_curr(cfs_rq_of(&rq->curr->se));
880 }
881 
882 static inline void
883 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
884 {
885 	u64 wait_start, prev_wait_start;
886 
887 	if (!schedstat_enabled())
888 		return;
889 
890 	wait_start = rq_clock(rq_of(cfs_rq));
891 	prev_wait_start = schedstat_val(se->statistics.wait_start);
892 
893 	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
894 	    likely(wait_start > prev_wait_start))
895 		wait_start -= prev_wait_start;
896 
897 	schedstat_set(se->statistics.wait_start, wait_start);
898 }
899 
900 static inline void
901 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
902 {
903 	struct task_struct *p;
904 	u64 delta;
905 
906 	if (!schedstat_enabled())
907 		return;
908 
909 	delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
910 
911 	if (entity_is_task(se)) {
912 		p = task_of(se);
913 		if (task_on_rq_migrating(p)) {
914 			/*
915 			 * Preserve migrating task's wait time so wait_start
916 			 * time stamp can be adjusted to accumulate wait time
917 			 * prior to migration.
918 			 */
919 			schedstat_set(se->statistics.wait_start, delta);
920 			return;
921 		}
922 		trace_sched_stat_wait(p, delta);
923 	}
924 
925 	schedstat_set(se->statistics.wait_max,
926 		      max(schedstat_val(se->statistics.wait_max), delta));
927 	schedstat_inc(se->statistics.wait_count);
928 	schedstat_add(se->statistics.wait_sum, delta);
929 	schedstat_set(se->statistics.wait_start, 0);
930 }
931 
932 static inline void
933 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
934 {
935 	struct task_struct *tsk = NULL;
936 	u64 sleep_start, block_start;
937 
938 	if (!schedstat_enabled())
939 		return;
940 
941 	sleep_start = schedstat_val(se->statistics.sleep_start);
942 	block_start = schedstat_val(se->statistics.block_start);
943 
944 	if (entity_is_task(se))
945 		tsk = task_of(se);
946 
947 	if (sleep_start) {
948 		u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
949 
950 		if ((s64)delta < 0)
951 			delta = 0;
952 
953 		if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
954 			schedstat_set(se->statistics.sleep_max, delta);
955 
956 		schedstat_set(se->statistics.sleep_start, 0);
957 		schedstat_add(se->statistics.sum_sleep_runtime, delta);
958 
959 		if (tsk) {
960 			account_scheduler_latency(tsk, delta >> 10, 1);
961 			trace_sched_stat_sleep(tsk, delta);
962 		}
963 	}
964 	if (block_start) {
965 		u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
966 
967 		if ((s64)delta < 0)
968 			delta = 0;
969 
970 		if (unlikely(delta > schedstat_val(se->statistics.block_max)))
971 			schedstat_set(se->statistics.block_max, delta);
972 
973 		schedstat_set(se->statistics.block_start, 0);
974 		schedstat_add(se->statistics.sum_sleep_runtime, delta);
975 
976 		if (tsk) {
977 			if (tsk->in_iowait) {
978 				schedstat_add(se->statistics.iowait_sum, delta);
979 				schedstat_inc(se->statistics.iowait_count);
980 				trace_sched_stat_iowait(tsk, delta);
981 			}
982 
983 			trace_sched_stat_blocked(tsk, delta);
984 
985 			/*
986 			 * Blocking time is in units of nanosecs, so shift by
987 			 * 20 to get a milliseconds-range estimation of the
988 			 * amount of time that the task spent sleeping:
989 			 */
990 			if (unlikely(prof_on == SLEEP_PROFILING)) {
991 				profile_hits(SLEEP_PROFILING,
992 						(void *)get_wchan(tsk),
993 						delta >> 20);
994 			}
995 			account_scheduler_latency(tsk, delta >> 10, 0);
996 		}
997 	}
998 }
999 
1000 /*
1001  * Task is being enqueued - update stats:
1002  */
1003 static inline void
1004 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1005 {
1006 	if (!schedstat_enabled())
1007 		return;
1008 
1009 	/*
1010 	 * Are we enqueueing a waiting task? (for current tasks
1011 	 * a dequeue/enqueue event is a NOP)
1012 	 */
1013 	if (se != cfs_rq->curr)
1014 		update_stats_wait_start(cfs_rq, se);
1015 
1016 	if (flags & ENQUEUE_WAKEUP)
1017 		update_stats_enqueue_sleeper(cfs_rq, se);
1018 }
1019 
1020 static inline void
1021 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1022 {
1023 
1024 	if (!schedstat_enabled())
1025 		return;
1026 
1027 	/*
1028 	 * Mark the end of the wait period if dequeueing a
1029 	 * waiting task:
1030 	 */
1031 	if (se != cfs_rq->curr)
1032 		update_stats_wait_end(cfs_rq, se);
1033 
1034 	if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1035 		struct task_struct *tsk = task_of(se);
1036 
1037 		if (tsk->state & TASK_INTERRUPTIBLE)
1038 			schedstat_set(se->statistics.sleep_start,
1039 				      rq_clock(rq_of(cfs_rq)));
1040 		if (tsk->state & TASK_UNINTERRUPTIBLE)
1041 			schedstat_set(se->statistics.block_start,
1042 				      rq_clock(rq_of(cfs_rq)));
1043 	}
1044 }
1045 
1046 /*
1047  * We are picking a new current task - update its stats:
1048  */
1049 static inline void
1050 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1051 {
1052 	/*
1053 	 * We are starting a new run period:
1054 	 */
1055 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
1056 }
1057 
1058 /**************************************************
1059  * Scheduling class queueing methods:
1060  */
1061 
1062 #ifdef CONFIG_NUMA_BALANCING
1063 /*
1064  * Approximate time to scan a full NUMA task in ms. The task scan period is
1065  * calculated based on the tasks virtual memory size and
1066  * numa_balancing_scan_size.
1067  */
1068 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1069 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1070 
1071 /* Portion of address space to scan in MB */
1072 unsigned int sysctl_numa_balancing_scan_size = 256;
1073 
1074 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1075 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1076 
1077 static unsigned int task_nr_scan_windows(struct task_struct *p)
1078 {
1079 	unsigned long rss = 0;
1080 	unsigned long nr_scan_pages;
1081 
1082 	/*
1083 	 * Calculations based on RSS as non-present and empty pages are skipped
1084 	 * by the PTE scanner and NUMA hinting faults should be trapped based
1085 	 * on resident pages
1086 	 */
1087 	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1088 	rss = get_mm_rss(p->mm);
1089 	if (!rss)
1090 		rss = nr_scan_pages;
1091 
1092 	rss = round_up(rss, nr_scan_pages);
1093 	return rss / nr_scan_pages;
1094 }
1095 
1096 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1097 #define MAX_SCAN_WINDOW 2560
1098 
1099 static unsigned int task_scan_min(struct task_struct *p)
1100 {
1101 	unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1102 	unsigned int scan, floor;
1103 	unsigned int windows = 1;
1104 
1105 	if (scan_size < MAX_SCAN_WINDOW)
1106 		windows = MAX_SCAN_WINDOW / scan_size;
1107 	floor = 1000 / windows;
1108 
1109 	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1110 	return max_t(unsigned int, floor, scan);
1111 }
1112 
1113 static unsigned int task_scan_max(struct task_struct *p)
1114 {
1115 	unsigned int smin = task_scan_min(p);
1116 	unsigned int smax;
1117 
1118 	/* Watch for min being lower than max due to floor calculations */
1119 	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1120 	return max(smin, smax);
1121 }
1122 
1123 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1124 {
1125 	rq->nr_numa_running += (p->numa_preferred_nid != -1);
1126 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1127 }
1128 
1129 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1130 {
1131 	rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1132 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1133 }
1134 
1135 struct numa_group {
1136 	atomic_t refcount;
1137 
1138 	spinlock_t lock; /* nr_tasks, tasks */
1139 	int nr_tasks;
1140 	pid_t gid;
1141 	int active_nodes;
1142 
1143 	struct rcu_head rcu;
1144 	unsigned long total_faults;
1145 	unsigned long max_faults_cpu;
1146 	/*
1147 	 * Faults_cpu is used to decide whether memory should move
1148 	 * towards the CPU. As a consequence, these stats are weighted
1149 	 * more by CPU use than by memory faults.
1150 	 */
1151 	unsigned long *faults_cpu;
1152 	unsigned long faults[0];
1153 };
1154 
1155 /* Shared or private faults. */
1156 #define NR_NUMA_HINT_FAULT_TYPES 2
1157 
1158 /* Memory and CPU locality */
1159 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1160 
1161 /* Averaged statistics, and temporary buffers. */
1162 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1163 
1164 pid_t task_numa_group_id(struct task_struct *p)
1165 {
1166 	return p->numa_group ? p->numa_group->gid : 0;
1167 }
1168 
1169 /*
1170  * The averaged statistics, shared & private, memory & cpu,
1171  * occupy the first half of the array. The second half of the
1172  * array is for current counters, which are averaged into the
1173  * first set by task_numa_placement.
1174  */
1175 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1176 {
1177 	return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1178 }
1179 
1180 static inline unsigned long task_faults(struct task_struct *p, int nid)
1181 {
1182 	if (!p->numa_faults)
1183 		return 0;
1184 
1185 	return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1186 		p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1187 }
1188 
1189 static inline unsigned long group_faults(struct task_struct *p, int nid)
1190 {
1191 	if (!p->numa_group)
1192 		return 0;
1193 
1194 	return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1195 		p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1196 }
1197 
1198 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1199 {
1200 	return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1201 		group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1202 }
1203 
1204 /*
1205  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1206  * considered part of a numa group's pseudo-interleaving set. Migrations
1207  * between these nodes are slowed down, to allow things to settle down.
1208  */
1209 #define ACTIVE_NODE_FRACTION 3
1210 
1211 static bool numa_is_active_node(int nid, struct numa_group *ng)
1212 {
1213 	return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1214 }
1215 
1216 /* Handle placement on systems where not all nodes are directly connected. */
1217 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1218 					int maxdist, bool task)
1219 {
1220 	unsigned long score = 0;
1221 	int node;
1222 
1223 	/*
1224 	 * All nodes are directly connected, and the same distance
1225 	 * from each other. No need for fancy placement algorithms.
1226 	 */
1227 	if (sched_numa_topology_type == NUMA_DIRECT)
1228 		return 0;
1229 
1230 	/*
1231 	 * This code is called for each node, introducing N^2 complexity,
1232 	 * which should be ok given the number of nodes rarely exceeds 8.
1233 	 */
1234 	for_each_online_node(node) {
1235 		unsigned long faults;
1236 		int dist = node_distance(nid, node);
1237 
1238 		/*
1239 		 * The furthest away nodes in the system are not interesting
1240 		 * for placement; nid was already counted.
1241 		 */
1242 		if (dist == sched_max_numa_distance || node == nid)
1243 			continue;
1244 
1245 		/*
1246 		 * On systems with a backplane NUMA topology, compare groups
1247 		 * of nodes, and move tasks towards the group with the most
1248 		 * memory accesses. When comparing two nodes at distance
1249 		 * "hoplimit", only nodes closer by than "hoplimit" are part
1250 		 * of each group. Skip other nodes.
1251 		 */
1252 		if (sched_numa_topology_type == NUMA_BACKPLANE &&
1253 					dist > maxdist)
1254 			continue;
1255 
1256 		/* Add up the faults from nearby nodes. */
1257 		if (task)
1258 			faults = task_faults(p, node);
1259 		else
1260 			faults = group_faults(p, node);
1261 
1262 		/*
1263 		 * On systems with a glueless mesh NUMA topology, there are
1264 		 * no fixed "groups of nodes". Instead, nodes that are not
1265 		 * directly connected bounce traffic through intermediate
1266 		 * nodes; a numa_group can occupy any set of nodes.
1267 		 * The further away a node is, the less the faults count.
1268 		 * This seems to result in good task placement.
1269 		 */
1270 		if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1271 			faults *= (sched_max_numa_distance - dist);
1272 			faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1273 		}
1274 
1275 		score += faults;
1276 	}
1277 
1278 	return score;
1279 }
1280 
1281 /*
1282  * These return the fraction of accesses done by a particular task, or
1283  * task group, on a particular numa node.  The group weight is given a
1284  * larger multiplier, in order to group tasks together that are almost
1285  * evenly spread out between numa nodes.
1286  */
1287 static inline unsigned long task_weight(struct task_struct *p, int nid,
1288 					int dist)
1289 {
1290 	unsigned long faults, total_faults;
1291 
1292 	if (!p->numa_faults)
1293 		return 0;
1294 
1295 	total_faults = p->total_numa_faults;
1296 
1297 	if (!total_faults)
1298 		return 0;
1299 
1300 	faults = task_faults(p, nid);
1301 	faults += score_nearby_nodes(p, nid, dist, true);
1302 
1303 	return 1000 * faults / total_faults;
1304 }
1305 
1306 static inline unsigned long group_weight(struct task_struct *p, int nid,
1307 					 int dist)
1308 {
1309 	unsigned long faults, total_faults;
1310 
1311 	if (!p->numa_group)
1312 		return 0;
1313 
1314 	total_faults = p->numa_group->total_faults;
1315 
1316 	if (!total_faults)
1317 		return 0;
1318 
1319 	faults = group_faults(p, nid);
1320 	faults += score_nearby_nodes(p, nid, dist, false);
1321 
1322 	return 1000 * faults / total_faults;
1323 }
1324 
1325 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1326 				int src_nid, int dst_cpu)
1327 {
1328 	struct numa_group *ng = p->numa_group;
1329 	int dst_nid = cpu_to_node(dst_cpu);
1330 	int last_cpupid, this_cpupid;
1331 
1332 	this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1333 
1334 	/*
1335 	 * Multi-stage node selection is used in conjunction with a periodic
1336 	 * migration fault to build a temporal task<->page relation. By using
1337 	 * a two-stage filter we remove short/unlikely relations.
1338 	 *
1339 	 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1340 	 * a task's usage of a particular page (n_p) per total usage of this
1341 	 * page (n_t) (in a given time-span) to a probability.
1342 	 *
1343 	 * Our periodic faults will sample this probability and getting the
1344 	 * same result twice in a row, given these samples are fully
1345 	 * independent, is then given by P(n)^2, provided our sample period
1346 	 * is sufficiently short compared to the usage pattern.
1347 	 *
1348 	 * This quadric squishes small probabilities, making it less likely we
1349 	 * act on an unlikely task<->page relation.
1350 	 */
1351 	last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1352 	if (!cpupid_pid_unset(last_cpupid) &&
1353 				cpupid_to_nid(last_cpupid) != dst_nid)
1354 		return false;
1355 
1356 	/* Always allow migrate on private faults */
1357 	if (cpupid_match_pid(p, last_cpupid))
1358 		return true;
1359 
1360 	/* A shared fault, but p->numa_group has not been set up yet. */
1361 	if (!ng)
1362 		return true;
1363 
1364 	/*
1365 	 * Destination node is much more heavily used than the source
1366 	 * node? Allow migration.
1367 	 */
1368 	if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1369 					ACTIVE_NODE_FRACTION)
1370 		return true;
1371 
1372 	/*
1373 	 * Distribute memory according to CPU & memory use on each node,
1374 	 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1375 	 *
1376 	 * faults_cpu(dst)   3   faults_cpu(src)
1377 	 * --------------- * - > ---------------
1378 	 * faults_mem(dst)   4   faults_mem(src)
1379 	 */
1380 	return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1381 	       group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1382 }
1383 
1384 static unsigned long weighted_cpuload(const int cpu);
1385 static unsigned long source_load(int cpu, int type);
1386 static unsigned long target_load(int cpu, int type);
1387 static unsigned long capacity_of(int cpu);
1388 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1389 
1390 /* Cached statistics for all CPUs within a node */
1391 struct numa_stats {
1392 	unsigned long nr_running;
1393 	unsigned long load;
1394 
1395 	/* Total compute capacity of CPUs on a node */
1396 	unsigned long compute_capacity;
1397 
1398 	/* Approximate capacity in terms of runnable tasks on a node */
1399 	unsigned long task_capacity;
1400 	int has_free_capacity;
1401 };
1402 
1403 /*
1404  * XXX borrowed from update_sg_lb_stats
1405  */
1406 static void update_numa_stats(struct numa_stats *ns, int nid)
1407 {
1408 	int smt, cpu, cpus = 0;
1409 	unsigned long capacity;
1410 
1411 	memset(ns, 0, sizeof(*ns));
1412 	for_each_cpu(cpu, cpumask_of_node(nid)) {
1413 		struct rq *rq = cpu_rq(cpu);
1414 
1415 		ns->nr_running += rq->nr_running;
1416 		ns->load += weighted_cpuload(cpu);
1417 		ns->compute_capacity += capacity_of(cpu);
1418 
1419 		cpus++;
1420 	}
1421 
1422 	/*
1423 	 * If we raced with hotplug and there are no CPUs left in our mask
1424 	 * the @ns structure is NULL'ed and task_numa_compare() will
1425 	 * not find this node attractive.
1426 	 *
1427 	 * We'll either bail at !has_free_capacity, or we'll detect a huge
1428 	 * imbalance and bail there.
1429 	 */
1430 	if (!cpus)
1431 		return;
1432 
1433 	/* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1434 	smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1435 	capacity = cpus / smt; /* cores */
1436 
1437 	ns->task_capacity = min_t(unsigned, capacity,
1438 		DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1439 	ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1440 }
1441 
1442 struct task_numa_env {
1443 	struct task_struct *p;
1444 
1445 	int src_cpu, src_nid;
1446 	int dst_cpu, dst_nid;
1447 
1448 	struct numa_stats src_stats, dst_stats;
1449 
1450 	int imbalance_pct;
1451 	int dist;
1452 
1453 	struct task_struct *best_task;
1454 	long best_imp;
1455 	int best_cpu;
1456 };
1457 
1458 static void task_numa_assign(struct task_numa_env *env,
1459 			     struct task_struct *p, long imp)
1460 {
1461 	if (env->best_task)
1462 		put_task_struct(env->best_task);
1463 	if (p)
1464 		get_task_struct(p);
1465 
1466 	env->best_task = p;
1467 	env->best_imp = imp;
1468 	env->best_cpu = env->dst_cpu;
1469 }
1470 
1471 static bool load_too_imbalanced(long src_load, long dst_load,
1472 				struct task_numa_env *env)
1473 {
1474 	long imb, old_imb;
1475 	long orig_src_load, orig_dst_load;
1476 	long src_capacity, dst_capacity;
1477 
1478 	/*
1479 	 * The load is corrected for the CPU capacity available on each node.
1480 	 *
1481 	 * src_load        dst_load
1482 	 * ------------ vs ---------
1483 	 * src_capacity    dst_capacity
1484 	 */
1485 	src_capacity = env->src_stats.compute_capacity;
1486 	dst_capacity = env->dst_stats.compute_capacity;
1487 
1488 	/* We care about the slope of the imbalance, not the direction. */
1489 	if (dst_load < src_load)
1490 		swap(dst_load, src_load);
1491 
1492 	/* Is the difference below the threshold? */
1493 	imb = dst_load * src_capacity * 100 -
1494 	      src_load * dst_capacity * env->imbalance_pct;
1495 	if (imb <= 0)
1496 		return false;
1497 
1498 	/*
1499 	 * The imbalance is above the allowed threshold.
1500 	 * Compare it with the old imbalance.
1501 	 */
1502 	orig_src_load = env->src_stats.load;
1503 	orig_dst_load = env->dst_stats.load;
1504 
1505 	if (orig_dst_load < orig_src_load)
1506 		swap(orig_dst_load, orig_src_load);
1507 
1508 	old_imb = orig_dst_load * src_capacity * 100 -
1509 		  orig_src_load * dst_capacity * env->imbalance_pct;
1510 
1511 	/* Would this change make things worse? */
1512 	return (imb > old_imb);
1513 }
1514 
1515 /*
1516  * This checks if the overall compute and NUMA accesses of the system would
1517  * be improved if the source tasks was migrated to the target dst_cpu taking
1518  * into account that it might be best if task running on the dst_cpu should
1519  * be exchanged with the source task
1520  */
1521 static void task_numa_compare(struct task_numa_env *env,
1522 			      long taskimp, long groupimp)
1523 {
1524 	struct rq *src_rq = cpu_rq(env->src_cpu);
1525 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
1526 	struct task_struct *cur;
1527 	long src_load, dst_load;
1528 	long load;
1529 	long imp = env->p->numa_group ? groupimp : taskimp;
1530 	long moveimp = imp;
1531 	int dist = env->dist;
1532 
1533 	rcu_read_lock();
1534 	cur = task_rcu_dereference(&dst_rq->curr);
1535 	if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1536 		cur = NULL;
1537 
1538 	/*
1539 	 * Because we have preemption enabled we can get migrated around and
1540 	 * end try selecting ourselves (current == env->p) as a swap candidate.
1541 	 */
1542 	if (cur == env->p)
1543 		goto unlock;
1544 
1545 	/*
1546 	 * "imp" is the fault differential for the source task between the
1547 	 * source and destination node. Calculate the total differential for
1548 	 * the source task and potential destination task. The more negative
1549 	 * the value is, the more rmeote accesses that would be expected to
1550 	 * be incurred if the tasks were swapped.
1551 	 */
1552 	if (cur) {
1553 		/* Skip this swap candidate if cannot move to the source cpu */
1554 		if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1555 			goto unlock;
1556 
1557 		/*
1558 		 * If dst and source tasks are in the same NUMA group, or not
1559 		 * in any group then look only at task weights.
1560 		 */
1561 		if (cur->numa_group == env->p->numa_group) {
1562 			imp = taskimp + task_weight(cur, env->src_nid, dist) -
1563 			      task_weight(cur, env->dst_nid, dist);
1564 			/*
1565 			 * Add some hysteresis to prevent swapping the
1566 			 * tasks within a group over tiny differences.
1567 			 */
1568 			if (cur->numa_group)
1569 				imp -= imp/16;
1570 		} else {
1571 			/*
1572 			 * Compare the group weights. If a task is all by
1573 			 * itself (not part of a group), use the task weight
1574 			 * instead.
1575 			 */
1576 			if (cur->numa_group)
1577 				imp += group_weight(cur, env->src_nid, dist) -
1578 				       group_weight(cur, env->dst_nid, dist);
1579 			else
1580 				imp += task_weight(cur, env->src_nid, dist) -
1581 				       task_weight(cur, env->dst_nid, dist);
1582 		}
1583 	}
1584 
1585 	if (imp <= env->best_imp && moveimp <= env->best_imp)
1586 		goto unlock;
1587 
1588 	if (!cur) {
1589 		/* Is there capacity at our destination? */
1590 		if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1591 		    !env->dst_stats.has_free_capacity)
1592 			goto unlock;
1593 
1594 		goto balance;
1595 	}
1596 
1597 	/* Balance doesn't matter much if we're running a task per cpu */
1598 	if (imp > env->best_imp && src_rq->nr_running == 1 &&
1599 			dst_rq->nr_running == 1)
1600 		goto assign;
1601 
1602 	/*
1603 	 * In the overloaded case, try and keep the load balanced.
1604 	 */
1605 balance:
1606 	load = task_h_load(env->p);
1607 	dst_load = env->dst_stats.load + load;
1608 	src_load = env->src_stats.load - load;
1609 
1610 	if (moveimp > imp && moveimp > env->best_imp) {
1611 		/*
1612 		 * If the improvement from just moving env->p direction is
1613 		 * better than swapping tasks around, check if a move is
1614 		 * possible. Store a slightly smaller score than moveimp,
1615 		 * so an actually idle CPU will win.
1616 		 */
1617 		if (!load_too_imbalanced(src_load, dst_load, env)) {
1618 			imp = moveimp - 1;
1619 			cur = NULL;
1620 			goto assign;
1621 		}
1622 	}
1623 
1624 	if (imp <= env->best_imp)
1625 		goto unlock;
1626 
1627 	if (cur) {
1628 		load = task_h_load(cur);
1629 		dst_load -= load;
1630 		src_load += load;
1631 	}
1632 
1633 	if (load_too_imbalanced(src_load, dst_load, env))
1634 		goto unlock;
1635 
1636 	/*
1637 	 * One idle CPU per node is evaluated for a task numa move.
1638 	 * Call select_idle_sibling to maybe find a better one.
1639 	 */
1640 	if (!cur) {
1641 		/*
1642 		 * select_idle_siblings() uses an per-cpu cpumask that
1643 		 * can be used from IRQ context.
1644 		 */
1645 		local_irq_disable();
1646 		env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1647 						   env->dst_cpu);
1648 		local_irq_enable();
1649 	}
1650 
1651 assign:
1652 	task_numa_assign(env, cur, imp);
1653 unlock:
1654 	rcu_read_unlock();
1655 }
1656 
1657 static void task_numa_find_cpu(struct task_numa_env *env,
1658 				long taskimp, long groupimp)
1659 {
1660 	int cpu;
1661 
1662 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1663 		/* Skip this CPU if the source task cannot migrate */
1664 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1665 			continue;
1666 
1667 		env->dst_cpu = cpu;
1668 		task_numa_compare(env, taskimp, groupimp);
1669 	}
1670 }
1671 
1672 /* Only move tasks to a NUMA node less busy than the current node. */
1673 static bool numa_has_capacity(struct task_numa_env *env)
1674 {
1675 	struct numa_stats *src = &env->src_stats;
1676 	struct numa_stats *dst = &env->dst_stats;
1677 
1678 	if (src->has_free_capacity && !dst->has_free_capacity)
1679 		return false;
1680 
1681 	/*
1682 	 * Only consider a task move if the source has a higher load
1683 	 * than the destination, corrected for CPU capacity on each node.
1684 	 *
1685 	 *      src->load                dst->load
1686 	 * --------------------- vs ---------------------
1687 	 * src->compute_capacity    dst->compute_capacity
1688 	 */
1689 	if (src->load * dst->compute_capacity * env->imbalance_pct >
1690 
1691 	    dst->load * src->compute_capacity * 100)
1692 		return true;
1693 
1694 	return false;
1695 }
1696 
1697 static int task_numa_migrate(struct task_struct *p)
1698 {
1699 	struct task_numa_env env = {
1700 		.p = p,
1701 
1702 		.src_cpu = task_cpu(p),
1703 		.src_nid = task_node(p),
1704 
1705 		.imbalance_pct = 112,
1706 
1707 		.best_task = NULL,
1708 		.best_imp = 0,
1709 		.best_cpu = -1,
1710 	};
1711 	struct sched_domain *sd;
1712 	unsigned long taskweight, groupweight;
1713 	int nid, ret, dist;
1714 	long taskimp, groupimp;
1715 
1716 	/*
1717 	 * Pick the lowest SD_NUMA domain, as that would have the smallest
1718 	 * imbalance and would be the first to start moving tasks about.
1719 	 *
1720 	 * And we want to avoid any moving of tasks about, as that would create
1721 	 * random movement of tasks -- counter the numa conditions we're trying
1722 	 * to satisfy here.
1723 	 */
1724 	rcu_read_lock();
1725 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1726 	if (sd)
1727 		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1728 	rcu_read_unlock();
1729 
1730 	/*
1731 	 * Cpusets can break the scheduler domain tree into smaller
1732 	 * balance domains, some of which do not cross NUMA boundaries.
1733 	 * Tasks that are "trapped" in such domains cannot be migrated
1734 	 * elsewhere, so there is no point in (re)trying.
1735 	 */
1736 	if (unlikely(!sd)) {
1737 		p->numa_preferred_nid = task_node(p);
1738 		return -EINVAL;
1739 	}
1740 
1741 	env.dst_nid = p->numa_preferred_nid;
1742 	dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1743 	taskweight = task_weight(p, env.src_nid, dist);
1744 	groupweight = group_weight(p, env.src_nid, dist);
1745 	update_numa_stats(&env.src_stats, env.src_nid);
1746 	taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1747 	groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1748 	update_numa_stats(&env.dst_stats, env.dst_nid);
1749 
1750 	/* Try to find a spot on the preferred nid. */
1751 	if (numa_has_capacity(&env))
1752 		task_numa_find_cpu(&env, taskimp, groupimp);
1753 
1754 	/*
1755 	 * Look at other nodes in these cases:
1756 	 * - there is no space available on the preferred_nid
1757 	 * - the task is part of a numa_group that is interleaved across
1758 	 *   multiple NUMA nodes; in order to better consolidate the group,
1759 	 *   we need to check other locations.
1760 	 */
1761 	if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1762 		for_each_online_node(nid) {
1763 			if (nid == env.src_nid || nid == p->numa_preferred_nid)
1764 				continue;
1765 
1766 			dist = node_distance(env.src_nid, env.dst_nid);
1767 			if (sched_numa_topology_type == NUMA_BACKPLANE &&
1768 						dist != env.dist) {
1769 				taskweight = task_weight(p, env.src_nid, dist);
1770 				groupweight = group_weight(p, env.src_nid, dist);
1771 			}
1772 
1773 			/* Only consider nodes where both task and groups benefit */
1774 			taskimp = task_weight(p, nid, dist) - taskweight;
1775 			groupimp = group_weight(p, nid, dist) - groupweight;
1776 			if (taskimp < 0 && groupimp < 0)
1777 				continue;
1778 
1779 			env.dist = dist;
1780 			env.dst_nid = nid;
1781 			update_numa_stats(&env.dst_stats, env.dst_nid);
1782 			if (numa_has_capacity(&env))
1783 				task_numa_find_cpu(&env, taskimp, groupimp);
1784 		}
1785 	}
1786 
1787 	/*
1788 	 * If the task is part of a workload that spans multiple NUMA nodes,
1789 	 * and is migrating into one of the workload's active nodes, remember
1790 	 * this node as the task's preferred numa node, so the workload can
1791 	 * settle down.
1792 	 * A task that migrated to a second choice node will be better off
1793 	 * trying for a better one later. Do not set the preferred node here.
1794 	 */
1795 	if (p->numa_group) {
1796 		struct numa_group *ng = p->numa_group;
1797 
1798 		if (env.best_cpu == -1)
1799 			nid = env.src_nid;
1800 		else
1801 			nid = env.dst_nid;
1802 
1803 		if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1804 			sched_setnuma(p, env.dst_nid);
1805 	}
1806 
1807 	/* No better CPU than the current one was found. */
1808 	if (env.best_cpu == -1)
1809 		return -EAGAIN;
1810 
1811 	/*
1812 	 * Reset the scan period if the task is being rescheduled on an
1813 	 * alternative node to recheck if the tasks is now properly placed.
1814 	 */
1815 	p->numa_scan_period = task_scan_min(p);
1816 
1817 	if (env.best_task == NULL) {
1818 		ret = migrate_task_to(p, env.best_cpu);
1819 		if (ret != 0)
1820 			trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1821 		return ret;
1822 	}
1823 
1824 	ret = migrate_swap(p, env.best_task);
1825 	if (ret != 0)
1826 		trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1827 	put_task_struct(env.best_task);
1828 	return ret;
1829 }
1830 
1831 /* Attempt to migrate a task to a CPU on the preferred node. */
1832 static void numa_migrate_preferred(struct task_struct *p)
1833 {
1834 	unsigned long interval = HZ;
1835 
1836 	/* This task has no NUMA fault statistics yet */
1837 	if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1838 		return;
1839 
1840 	/* Periodically retry migrating the task to the preferred node */
1841 	interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1842 	p->numa_migrate_retry = jiffies + interval;
1843 
1844 	/* Success if task is already running on preferred CPU */
1845 	if (task_node(p) == p->numa_preferred_nid)
1846 		return;
1847 
1848 	/* Otherwise, try migrate to a CPU on the preferred node */
1849 	task_numa_migrate(p);
1850 }
1851 
1852 /*
1853  * Find out how many nodes on the workload is actively running on. Do this by
1854  * tracking the nodes from which NUMA hinting faults are triggered. This can
1855  * be different from the set of nodes where the workload's memory is currently
1856  * located.
1857  */
1858 static void numa_group_count_active_nodes(struct numa_group *numa_group)
1859 {
1860 	unsigned long faults, max_faults = 0;
1861 	int nid, active_nodes = 0;
1862 
1863 	for_each_online_node(nid) {
1864 		faults = group_faults_cpu(numa_group, nid);
1865 		if (faults > max_faults)
1866 			max_faults = faults;
1867 	}
1868 
1869 	for_each_online_node(nid) {
1870 		faults = group_faults_cpu(numa_group, nid);
1871 		if (faults * ACTIVE_NODE_FRACTION > max_faults)
1872 			active_nodes++;
1873 	}
1874 
1875 	numa_group->max_faults_cpu = max_faults;
1876 	numa_group->active_nodes = active_nodes;
1877 }
1878 
1879 /*
1880  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1881  * increments. The more local the fault statistics are, the higher the scan
1882  * period will be for the next scan window. If local/(local+remote) ratio is
1883  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1884  * the scan period will decrease. Aim for 70% local accesses.
1885  */
1886 #define NUMA_PERIOD_SLOTS 10
1887 #define NUMA_PERIOD_THRESHOLD 7
1888 
1889 /*
1890  * Increase the scan period (slow down scanning) if the majority of
1891  * our memory is already on our local node, or if the majority of
1892  * the page accesses are shared with other processes.
1893  * Otherwise, decrease the scan period.
1894  */
1895 static void update_task_scan_period(struct task_struct *p,
1896 			unsigned long shared, unsigned long private)
1897 {
1898 	unsigned int period_slot;
1899 	int ratio;
1900 	int diff;
1901 
1902 	unsigned long remote = p->numa_faults_locality[0];
1903 	unsigned long local = p->numa_faults_locality[1];
1904 
1905 	/*
1906 	 * If there were no record hinting faults then either the task is
1907 	 * completely idle or all activity is areas that are not of interest
1908 	 * to automatic numa balancing. Related to that, if there were failed
1909 	 * migration then it implies we are migrating too quickly or the local
1910 	 * node is overloaded. In either case, scan slower
1911 	 */
1912 	if (local + shared == 0 || p->numa_faults_locality[2]) {
1913 		p->numa_scan_period = min(p->numa_scan_period_max,
1914 			p->numa_scan_period << 1);
1915 
1916 		p->mm->numa_next_scan = jiffies +
1917 			msecs_to_jiffies(p->numa_scan_period);
1918 
1919 		return;
1920 	}
1921 
1922 	/*
1923 	 * Prepare to scale scan period relative to the current period.
1924 	 *	 == NUMA_PERIOD_THRESHOLD scan period stays the same
1925 	 *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1926 	 *	 >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1927 	 */
1928 	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1929 	ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1930 	if (ratio >= NUMA_PERIOD_THRESHOLD) {
1931 		int slot = ratio - NUMA_PERIOD_THRESHOLD;
1932 		if (!slot)
1933 			slot = 1;
1934 		diff = slot * period_slot;
1935 	} else {
1936 		diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1937 
1938 		/*
1939 		 * Scale scan rate increases based on sharing. There is an
1940 		 * inverse relationship between the degree of sharing and
1941 		 * the adjustment made to the scanning period. Broadly
1942 		 * speaking the intent is that there is little point
1943 		 * scanning faster if shared accesses dominate as it may
1944 		 * simply bounce migrations uselessly
1945 		 */
1946 		ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1947 		diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1948 	}
1949 
1950 	p->numa_scan_period = clamp(p->numa_scan_period + diff,
1951 			task_scan_min(p), task_scan_max(p));
1952 	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1953 }
1954 
1955 /*
1956  * Get the fraction of time the task has been running since the last
1957  * NUMA placement cycle. The scheduler keeps similar statistics, but
1958  * decays those on a 32ms period, which is orders of magnitude off
1959  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1960  * stats only if the task is so new there are no NUMA statistics yet.
1961  */
1962 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1963 {
1964 	u64 runtime, delta, now;
1965 	/* Use the start of this time slice to avoid calculations. */
1966 	now = p->se.exec_start;
1967 	runtime = p->se.sum_exec_runtime;
1968 
1969 	if (p->last_task_numa_placement) {
1970 		delta = runtime - p->last_sum_exec_runtime;
1971 		*period = now - p->last_task_numa_placement;
1972 	} else {
1973 		delta = p->se.avg.load_sum / p->se.load.weight;
1974 		*period = LOAD_AVG_MAX;
1975 	}
1976 
1977 	p->last_sum_exec_runtime = runtime;
1978 	p->last_task_numa_placement = now;
1979 
1980 	return delta;
1981 }
1982 
1983 /*
1984  * Determine the preferred nid for a task in a numa_group. This needs to
1985  * be done in a way that produces consistent results with group_weight,
1986  * otherwise workloads might not converge.
1987  */
1988 static int preferred_group_nid(struct task_struct *p, int nid)
1989 {
1990 	nodemask_t nodes;
1991 	int dist;
1992 
1993 	/* Direct connections between all NUMA nodes. */
1994 	if (sched_numa_topology_type == NUMA_DIRECT)
1995 		return nid;
1996 
1997 	/*
1998 	 * On a system with glueless mesh NUMA topology, group_weight
1999 	 * scores nodes according to the number of NUMA hinting faults on
2000 	 * both the node itself, and on nearby nodes.
2001 	 */
2002 	if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2003 		unsigned long score, max_score = 0;
2004 		int node, max_node = nid;
2005 
2006 		dist = sched_max_numa_distance;
2007 
2008 		for_each_online_node(node) {
2009 			score = group_weight(p, node, dist);
2010 			if (score > max_score) {
2011 				max_score = score;
2012 				max_node = node;
2013 			}
2014 		}
2015 		return max_node;
2016 	}
2017 
2018 	/*
2019 	 * Finding the preferred nid in a system with NUMA backplane
2020 	 * interconnect topology is more involved. The goal is to locate
2021 	 * tasks from numa_groups near each other in the system, and
2022 	 * untangle workloads from different sides of the system. This requires
2023 	 * searching down the hierarchy of node groups, recursively searching
2024 	 * inside the highest scoring group of nodes. The nodemask tricks
2025 	 * keep the complexity of the search down.
2026 	 */
2027 	nodes = node_online_map;
2028 	for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2029 		unsigned long max_faults = 0;
2030 		nodemask_t max_group = NODE_MASK_NONE;
2031 		int a, b;
2032 
2033 		/* Are there nodes at this distance from each other? */
2034 		if (!find_numa_distance(dist))
2035 			continue;
2036 
2037 		for_each_node_mask(a, nodes) {
2038 			unsigned long faults = 0;
2039 			nodemask_t this_group;
2040 			nodes_clear(this_group);
2041 
2042 			/* Sum group's NUMA faults; includes a==b case. */
2043 			for_each_node_mask(b, nodes) {
2044 				if (node_distance(a, b) < dist) {
2045 					faults += group_faults(p, b);
2046 					node_set(b, this_group);
2047 					node_clear(b, nodes);
2048 				}
2049 			}
2050 
2051 			/* Remember the top group. */
2052 			if (faults > max_faults) {
2053 				max_faults = faults;
2054 				max_group = this_group;
2055 				/*
2056 				 * subtle: at the smallest distance there is
2057 				 * just one node left in each "group", the
2058 				 * winner is the preferred nid.
2059 				 */
2060 				nid = a;
2061 			}
2062 		}
2063 		/* Next round, evaluate the nodes within max_group. */
2064 		if (!max_faults)
2065 			break;
2066 		nodes = max_group;
2067 	}
2068 	return nid;
2069 }
2070 
2071 static void task_numa_placement(struct task_struct *p)
2072 {
2073 	int seq, nid, max_nid = -1, max_group_nid = -1;
2074 	unsigned long max_faults = 0, max_group_faults = 0;
2075 	unsigned long fault_types[2] = { 0, 0 };
2076 	unsigned long total_faults;
2077 	u64 runtime, period;
2078 	spinlock_t *group_lock = NULL;
2079 
2080 	/*
2081 	 * The p->mm->numa_scan_seq field gets updated without
2082 	 * exclusive access. Use READ_ONCE() here to ensure
2083 	 * that the field is read in a single access:
2084 	 */
2085 	seq = READ_ONCE(p->mm->numa_scan_seq);
2086 	if (p->numa_scan_seq == seq)
2087 		return;
2088 	p->numa_scan_seq = seq;
2089 	p->numa_scan_period_max = task_scan_max(p);
2090 
2091 	total_faults = p->numa_faults_locality[0] +
2092 		       p->numa_faults_locality[1];
2093 	runtime = numa_get_avg_runtime(p, &period);
2094 
2095 	/* If the task is part of a group prevent parallel updates to group stats */
2096 	if (p->numa_group) {
2097 		group_lock = &p->numa_group->lock;
2098 		spin_lock_irq(group_lock);
2099 	}
2100 
2101 	/* Find the node with the highest number of faults */
2102 	for_each_online_node(nid) {
2103 		/* Keep track of the offsets in numa_faults array */
2104 		int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2105 		unsigned long faults = 0, group_faults = 0;
2106 		int priv;
2107 
2108 		for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2109 			long diff, f_diff, f_weight;
2110 
2111 			mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2112 			membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2113 			cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2114 			cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2115 
2116 			/* Decay existing window, copy faults since last scan */
2117 			diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2118 			fault_types[priv] += p->numa_faults[membuf_idx];
2119 			p->numa_faults[membuf_idx] = 0;
2120 
2121 			/*
2122 			 * Normalize the faults_from, so all tasks in a group
2123 			 * count according to CPU use, instead of by the raw
2124 			 * number of faults. Tasks with little runtime have
2125 			 * little over-all impact on throughput, and thus their
2126 			 * faults are less important.
2127 			 */
2128 			f_weight = div64_u64(runtime << 16, period + 1);
2129 			f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2130 				   (total_faults + 1);
2131 			f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2132 			p->numa_faults[cpubuf_idx] = 0;
2133 
2134 			p->numa_faults[mem_idx] += diff;
2135 			p->numa_faults[cpu_idx] += f_diff;
2136 			faults += p->numa_faults[mem_idx];
2137 			p->total_numa_faults += diff;
2138 			if (p->numa_group) {
2139 				/*
2140 				 * safe because we can only change our own group
2141 				 *
2142 				 * mem_idx represents the offset for a given
2143 				 * nid and priv in a specific region because it
2144 				 * is at the beginning of the numa_faults array.
2145 				 */
2146 				p->numa_group->faults[mem_idx] += diff;
2147 				p->numa_group->faults_cpu[mem_idx] += f_diff;
2148 				p->numa_group->total_faults += diff;
2149 				group_faults += p->numa_group->faults[mem_idx];
2150 			}
2151 		}
2152 
2153 		if (faults > max_faults) {
2154 			max_faults = faults;
2155 			max_nid = nid;
2156 		}
2157 
2158 		if (group_faults > max_group_faults) {
2159 			max_group_faults = group_faults;
2160 			max_group_nid = nid;
2161 		}
2162 	}
2163 
2164 	update_task_scan_period(p, fault_types[0], fault_types[1]);
2165 
2166 	if (p->numa_group) {
2167 		numa_group_count_active_nodes(p->numa_group);
2168 		spin_unlock_irq(group_lock);
2169 		max_nid = preferred_group_nid(p, max_group_nid);
2170 	}
2171 
2172 	if (max_faults) {
2173 		/* Set the new preferred node */
2174 		if (max_nid != p->numa_preferred_nid)
2175 			sched_setnuma(p, max_nid);
2176 
2177 		if (task_node(p) != p->numa_preferred_nid)
2178 			numa_migrate_preferred(p);
2179 	}
2180 }
2181 
2182 static inline int get_numa_group(struct numa_group *grp)
2183 {
2184 	return atomic_inc_not_zero(&grp->refcount);
2185 }
2186 
2187 static inline void put_numa_group(struct numa_group *grp)
2188 {
2189 	if (atomic_dec_and_test(&grp->refcount))
2190 		kfree_rcu(grp, rcu);
2191 }
2192 
2193 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2194 			int *priv)
2195 {
2196 	struct numa_group *grp, *my_grp;
2197 	struct task_struct *tsk;
2198 	bool join = false;
2199 	int cpu = cpupid_to_cpu(cpupid);
2200 	int i;
2201 
2202 	if (unlikely(!p->numa_group)) {
2203 		unsigned int size = sizeof(struct numa_group) +
2204 				    4*nr_node_ids*sizeof(unsigned long);
2205 
2206 		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2207 		if (!grp)
2208 			return;
2209 
2210 		atomic_set(&grp->refcount, 1);
2211 		grp->active_nodes = 1;
2212 		grp->max_faults_cpu = 0;
2213 		spin_lock_init(&grp->lock);
2214 		grp->gid = p->pid;
2215 		/* Second half of the array tracks nids where faults happen */
2216 		grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2217 						nr_node_ids;
2218 
2219 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2220 			grp->faults[i] = p->numa_faults[i];
2221 
2222 		grp->total_faults = p->total_numa_faults;
2223 
2224 		grp->nr_tasks++;
2225 		rcu_assign_pointer(p->numa_group, grp);
2226 	}
2227 
2228 	rcu_read_lock();
2229 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
2230 
2231 	if (!cpupid_match_pid(tsk, cpupid))
2232 		goto no_join;
2233 
2234 	grp = rcu_dereference(tsk->numa_group);
2235 	if (!grp)
2236 		goto no_join;
2237 
2238 	my_grp = p->numa_group;
2239 	if (grp == my_grp)
2240 		goto no_join;
2241 
2242 	/*
2243 	 * Only join the other group if its bigger; if we're the bigger group,
2244 	 * the other task will join us.
2245 	 */
2246 	if (my_grp->nr_tasks > grp->nr_tasks)
2247 		goto no_join;
2248 
2249 	/*
2250 	 * Tie-break on the grp address.
2251 	 */
2252 	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2253 		goto no_join;
2254 
2255 	/* Always join threads in the same process. */
2256 	if (tsk->mm == current->mm)
2257 		join = true;
2258 
2259 	/* Simple filter to avoid false positives due to PID collisions */
2260 	if (flags & TNF_SHARED)
2261 		join = true;
2262 
2263 	/* Update priv based on whether false sharing was detected */
2264 	*priv = !join;
2265 
2266 	if (join && !get_numa_group(grp))
2267 		goto no_join;
2268 
2269 	rcu_read_unlock();
2270 
2271 	if (!join)
2272 		return;
2273 
2274 	BUG_ON(irqs_disabled());
2275 	double_lock_irq(&my_grp->lock, &grp->lock);
2276 
2277 	for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2278 		my_grp->faults[i] -= p->numa_faults[i];
2279 		grp->faults[i] += p->numa_faults[i];
2280 	}
2281 	my_grp->total_faults -= p->total_numa_faults;
2282 	grp->total_faults += p->total_numa_faults;
2283 
2284 	my_grp->nr_tasks--;
2285 	grp->nr_tasks++;
2286 
2287 	spin_unlock(&my_grp->lock);
2288 	spin_unlock_irq(&grp->lock);
2289 
2290 	rcu_assign_pointer(p->numa_group, grp);
2291 
2292 	put_numa_group(my_grp);
2293 	return;
2294 
2295 no_join:
2296 	rcu_read_unlock();
2297 	return;
2298 }
2299 
2300 void task_numa_free(struct task_struct *p)
2301 {
2302 	struct numa_group *grp = p->numa_group;
2303 	void *numa_faults = p->numa_faults;
2304 	unsigned long flags;
2305 	int i;
2306 
2307 	if (grp) {
2308 		spin_lock_irqsave(&grp->lock, flags);
2309 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2310 			grp->faults[i] -= p->numa_faults[i];
2311 		grp->total_faults -= p->total_numa_faults;
2312 
2313 		grp->nr_tasks--;
2314 		spin_unlock_irqrestore(&grp->lock, flags);
2315 		RCU_INIT_POINTER(p->numa_group, NULL);
2316 		put_numa_group(grp);
2317 	}
2318 
2319 	p->numa_faults = NULL;
2320 	kfree(numa_faults);
2321 }
2322 
2323 /*
2324  * Got a PROT_NONE fault for a page on @node.
2325  */
2326 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2327 {
2328 	struct task_struct *p = current;
2329 	bool migrated = flags & TNF_MIGRATED;
2330 	int cpu_node = task_node(current);
2331 	int local = !!(flags & TNF_FAULT_LOCAL);
2332 	struct numa_group *ng;
2333 	int priv;
2334 
2335 	if (!static_branch_likely(&sched_numa_balancing))
2336 		return;
2337 
2338 	/* for example, ksmd faulting in a user's mm */
2339 	if (!p->mm)
2340 		return;
2341 
2342 	/* Allocate buffer to track faults on a per-node basis */
2343 	if (unlikely(!p->numa_faults)) {
2344 		int size = sizeof(*p->numa_faults) *
2345 			   NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2346 
2347 		p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2348 		if (!p->numa_faults)
2349 			return;
2350 
2351 		p->total_numa_faults = 0;
2352 		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2353 	}
2354 
2355 	/*
2356 	 * First accesses are treated as private, otherwise consider accesses
2357 	 * to be private if the accessing pid has not changed
2358 	 */
2359 	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2360 		priv = 1;
2361 	} else {
2362 		priv = cpupid_match_pid(p, last_cpupid);
2363 		if (!priv && !(flags & TNF_NO_GROUP))
2364 			task_numa_group(p, last_cpupid, flags, &priv);
2365 	}
2366 
2367 	/*
2368 	 * If a workload spans multiple NUMA nodes, a shared fault that
2369 	 * occurs wholly within the set of nodes that the workload is
2370 	 * actively using should be counted as local. This allows the
2371 	 * scan rate to slow down when a workload has settled down.
2372 	 */
2373 	ng = p->numa_group;
2374 	if (!priv && !local && ng && ng->active_nodes > 1 &&
2375 				numa_is_active_node(cpu_node, ng) &&
2376 				numa_is_active_node(mem_node, ng))
2377 		local = 1;
2378 
2379 	task_numa_placement(p);
2380 
2381 	/*
2382 	 * Retry task to preferred node migration periodically, in case it
2383 	 * case it previously failed, or the scheduler moved us.
2384 	 */
2385 	if (time_after(jiffies, p->numa_migrate_retry))
2386 		numa_migrate_preferred(p);
2387 
2388 	if (migrated)
2389 		p->numa_pages_migrated += pages;
2390 	if (flags & TNF_MIGRATE_FAIL)
2391 		p->numa_faults_locality[2] += pages;
2392 
2393 	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2394 	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2395 	p->numa_faults_locality[local] += pages;
2396 }
2397 
2398 static void reset_ptenuma_scan(struct task_struct *p)
2399 {
2400 	/*
2401 	 * We only did a read acquisition of the mmap sem, so
2402 	 * p->mm->numa_scan_seq is written to without exclusive access
2403 	 * and the update is not guaranteed to be atomic. That's not
2404 	 * much of an issue though, since this is just used for
2405 	 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2406 	 * expensive, to avoid any form of compiler optimizations:
2407 	 */
2408 	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2409 	p->mm->numa_scan_offset = 0;
2410 }
2411 
2412 /*
2413  * The expensive part of numa migration is done from task_work context.
2414  * Triggered from task_tick_numa().
2415  */
2416 void task_numa_work(struct callback_head *work)
2417 {
2418 	unsigned long migrate, next_scan, now = jiffies;
2419 	struct task_struct *p = current;
2420 	struct mm_struct *mm = p->mm;
2421 	u64 runtime = p->se.sum_exec_runtime;
2422 	struct vm_area_struct *vma;
2423 	unsigned long start, end;
2424 	unsigned long nr_pte_updates = 0;
2425 	long pages, virtpages;
2426 
2427 	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2428 
2429 	work->next = work; /* protect against double add */
2430 	/*
2431 	 * Who cares about NUMA placement when they're dying.
2432 	 *
2433 	 * NOTE: make sure not to dereference p->mm before this check,
2434 	 * exit_task_work() happens _after_ exit_mm() so we could be called
2435 	 * without p->mm even though we still had it when we enqueued this
2436 	 * work.
2437 	 */
2438 	if (p->flags & PF_EXITING)
2439 		return;
2440 
2441 	if (!mm->numa_next_scan) {
2442 		mm->numa_next_scan = now +
2443 			msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2444 	}
2445 
2446 	/*
2447 	 * Enforce maximal scan/migration frequency..
2448 	 */
2449 	migrate = mm->numa_next_scan;
2450 	if (time_before(now, migrate))
2451 		return;
2452 
2453 	if (p->numa_scan_period == 0) {
2454 		p->numa_scan_period_max = task_scan_max(p);
2455 		p->numa_scan_period = task_scan_min(p);
2456 	}
2457 
2458 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2459 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2460 		return;
2461 
2462 	/*
2463 	 * Delay this task enough that another task of this mm will likely win
2464 	 * the next time around.
2465 	 */
2466 	p->node_stamp += 2 * TICK_NSEC;
2467 
2468 	start = mm->numa_scan_offset;
2469 	pages = sysctl_numa_balancing_scan_size;
2470 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2471 	virtpages = pages * 8;	   /* Scan up to this much virtual space */
2472 	if (!pages)
2473 		return;
2474 
2475 
2476 	down_read(&mm->mmap_sem);
2477 	vma = find_vma(mm, start);
2478 	if (!vma) {
2479 		reset_ptenuma_scan(p);
2480 		start = 0;
2481 		vma = mm->mmap;
2482 	}
2483 	for (; vma; vma = vma->vm_next) {
2484 		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2485 			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2486 			continue;
2487 		}
2488 
2489 		/*
2490 		 * Shared library pages mapped by multiple processes are not
2491 		 * migrated as it is expected they are cache replicated. Avoid
2492 		 * hinting faults in read-only file-backed mappings or the vdso
2493 		 * as migrating the pages will be of marginal benefit.
2494 		 */
2495 		if (!vma->vm_mm ||
2496 		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2497 			continue;
2498 
2499 		/*
2500 		 * Skip inaccessible VMAs to avoid any confusion between
2501 		 * PROT_NONE and NUMA hinting ptes
2502 		 */
2503 		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2504 			continue;
2505 
2506 		do {
2507 			start = max(start, vma->vm_start);
2508 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2509 			end = min(end, vma->vm_end);
2510 			nr_pte_updates = change_prot_numa(vma, start, end);
2511 
2512 			/*
2513 			 * Try to scan sysctl_numa_balancing_size worth of
2514 			 * hpages that have at least one present PTE that
2515 			 * is not already pte-numa. If the VMA contains
2516 			 * areas that are unused or already full of prot_numa
2517 			 * PTEs, scan up to virtpages, to skip through those
2518 			 * areas faster.
2519 			 */
2520 			if (nr_pte_updates)
2521 				pages -= (end - start) >> PAGE_SHIFT;
2522 			virtpages -= (end - start) >> PAGE_SHIFT;
2523 
2524 			start = end;
2525 			if (pages <= 0 || virtpages <= 0)
2526 				goto out;
2527 
2528 			cond_resched();
2529 		} while (end != vma->vm_end);
2530 	}
2531 
2532 out:
2533 	/*
2534 	 * It is possible to reach the end of the VMA list but the last few
2535 	 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2536 	 * would find the !migratable VMA on the next scan but not reset the
2537 	 * scanner to the start so check it now.
2538 	 */
2539 	if (vma)
2540 		mm->numa_scan_offset = start;
2541 	else
2542 		reset_ptenuma_scan(p);
2543 	up_read(&mm->mmap_sem);
2544 
2545 	/*
2546 	 * Make sure tasks use at least 32x as much time to run other code
2547 	 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2548 	 * Usually update_task_scan_period slows down scanning enough; on an
2549 	 * overloaded system we need to limit overhead on a per task basis.
2550 	 */
2551 	if (unlikely(p->se.sum_exec_runtime != runtime)) {
2552 		u64 diff = p->se.sum_exec_runtime - runtime;
2553 		p->node_stamp += 32 * diff;
2554 	}
2555 }
2556 
2557 /*
2558  * Drive the periodic memory faults..
2559  */
2560 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2561 {
2562 	struct callback_head *work = &curr->numa_work;
2563 	u64 period, now;
2564 
2565 	/*
2566 	 * We don't care about NUMA placement if we don't have memory.
2567 	 */
2568 	if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2569 		return;
2570 
2571 	/*
2572 	 * Using runtime rather than walltime has the dual advantage that
2573 	 * we (mostly) drive the selection from busy threads and that the
2574 	 * task needs to have done some actual work before we bother with
2575 	 * NUMA placement.
2576 	 */
2577 	now = curr->se.sum_exec_runtime;
2578 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2579 
2580 	if (now > curr->node_stamp + period) {
2581 		if (!curr->node_stamp)
2582 			curr->numa_scan_period = task_scan_min(curr);
2583 		curr->node_stamp += period;
2584 
2585 		if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2586 			init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2587 			task_work_add(curr, work, true);
2588 		}
2589 	}
2590 }
2591 #else
2592 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2593 {
2594 }
2595 
2596 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2597 {
2598 }
2599 
2600 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2601 {
2602 }
2603 #endif /* CONFIG_NUMA_BALANCING */
2604 
2605 static void
2606 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2607 {
2608 	update_load_add(&cfs_rq->load, se->load.weight);
2609 	if (!parent_entity(se))
2610 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2611 #ifdef CONFIG_SMP
2612 	if (entity_is_task(se)) {
2613 		struct rq *rq = rq_of(cfs_rq);
2614 
2615 		account_numa_enqueue(rq, task_of(se));
2616 		list_add(&se->group_node, &rq->cfs_tasks);
2617 	}
2618 #endif
2619 	cfs_rq->nr_running++;
2620 }
2621 
2622 static void
2623 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2624 {
2625 	update_load_sub(&cfs_rq->load, se->load.weight);
2626 	if (!parent_entity(se))
2627 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2628 #ifdef CONFIG_SMP
2629 	if (entity_is_task(se)) {
2630 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2631 		list_del_init(&se->group_node);
2632 	}
2633 #endif
2634 	cfs_rq->nr_running--;
2635 }
2636 
2637 #ifdef CONFIG_FAIR_GROUP_SCHED
2638 # ifdef CONFIG_SMP
2639 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2640 {
2641 	long tg_weight, load, shares;
2642 
2643 	/*
2644 	 * This really should be: cfs_rq->avg.load_avg, but instead we use
2645 	 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2646 	 * the shares for small weight interactive tasks.
2647 	 */
2648 	load = scale_load_down(cfs_rq->load.weight);
2649 
2650 	tg_weight = atomic_long_read(&tg->load_avg);
2651 
2652 	/* Ensure tg_weight >= load */
2653 	tg_weight -= cfs_rq->tg_load_avg_contrib;
2654 	tg_weight += load;
2655 
2656 	shares = (tg->shares * load);
2657 	if (tg_weight)
2658 		shares /= tg_weight;
2659 
2660 	if (shares < MIN_SHARES)
2661 		shares = MIN_SHARES;
2662 	if (shares > tg->shares)
2663 		shares = tg->shares;
2664 
2665 	return shares;
2666 }
2667 # else /* CONFIG_SMP */
2668 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2669 {
2670 	return tg->shares;
2671 }
2672 # endif /* CONFIG_SMP */
2673 
2674 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2675 			    unsigned long weight)
2676 {
2677 	if (se->on_rq) {
2678 		/* commit outstanding execution time */
2679 		if (cfs_rq->curr == se)
2680 			update_curr(cfs_rq);
2681 		account_entity_dequeue(cfs_rq, se);
2682 	}
2683 
2684 	update_load_set(&se->load, weight);
2685 
2686 	if (se->on_rq)
2687 		account_entity_enqueue(cfs_rq, se);
2688 }
2689 
2690 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2691 
2692 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2693 {
2694 	struct task_group *tg;
2695 	struct sched_entity *se;
2696 	long shares;
2697 
2698 	tg = cfs_rq->tg;
2699 	se = tg->se[cpu_of(rq_of(cfs_rq))];
2700 	if (!se || throttled_hierarchy(cfs_rq))
2701 		return;
2702 #ifndef CONFIG_SMP
2703 	if (likely(se->load.weight == tg->shares))
2704 		return;
2705 #endif
2706 	shares = calc_cfs_shares(cfs_rq, tg);
2707 
2708 	reweight_entity(cfs_rq_of(se), se, shares);
2709 }
2710 #else /* CONFIG_FAIR_GROUP_SCHED */
2711 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2712 {
2713 }
2714 #endif /* CONFIG_FAIR_GROUP_SCHED */
2715 
2716 #ifdef CONFIG_SMP
2717 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2718 static const u32 runnable_avg_yN_inv[] = {
2719 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2720 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2721 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2722 	0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2723 	0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2724 	0x85aac367, 0x82cd8698,
2725 };
2726 
2727 /*
2728  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
2729  * over-estimates when re-combining.
2730  */
2731 static const u32 runnable_avg_yN_sum[] = {
2732 	    0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2733 	 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2734 	17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2735 };
2736 
2737 /*
2738  * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
2739  * lower integers. See Documentation/scheduler/sched-avg.txt how these
2740  * were generated:
2741  */
2742 static const u32 __accumulated_sum_N32[] = {
2743 	    0, 23371, 35056, 40899, 43820, 45281,
2744 	46011, 46376, 46559, 46650, 46696, 46719,
2745 };
2746 
2747 /*
2748  * Approximate:
2749  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
2750  */
2751 static __always_inline u64 decay_load(u64 val, u64 n)
2752 {
2753 	unsigned int local_n;
2754 
2755 	if (!n)
2756 		return val;
2757 	else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2758 		return 0;
2759 
2760 	/* after bounds checking we can collapse to 32-bit */
2761 	local_n = n;
2762 
2763 	/*
2764 	 * As y^PERIOD = 1/2, we can combine
2765 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2766 	 * With a look-up table which covers y^n (n<PERIOD)
2767 	 *
2768 	 * To achieve constant time decay_load.
2769 	 */
2770 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2771 		val >>= local_n / LOAD_AVG_PERIOD;
2772 		local_n %= LOAD_AVG_PERIOD;
2773 	}
2774 
2775 	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2776 	return val;
2777 }
2778 
2779 /*
2780  * For updates fully spanning n periods, the contribution to runnable
2781  * average will be: \Sum 1024*y^n
2782  *
2783  * We can compute this reasonably efficiently by combining:
2784  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
2785  */
2786 static u32 __compute_runnable_contrib(u64 n)
2787 {
2788 	u32 contrib = 0;
2789 
2790 	if (likely(n <= LOAD_AVG_PERIOD))
2791 		return runnable_avg_yN_sum[n];
2792 	else if (unlikely(n >= LOAD_AVG_MAX_N))
2793 		return LOAD_AVG_MAX;
2794 
2795 	/* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
2796 	contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
2797 	n %= LOAD_AVG_PERIOD;
2798 	contrib = decay_load(contrib, n);
2799 	return contrib + runnable_avg_yN_sum[n];
2800 }
2801 
2802 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2803 
2804 /*
2805  * We can represent the historical contribution to runnable average as the
2806  * coefficients of a geometric series.  To do this we sub-divide our runnable
2807  * history into segments of approximately 1ms (1024us); label the segment that
2808  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2809  *
2810  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2811  *      p0            p1           p2
2812  *     (now)       (~1ms ago)  (~2ms ago)
2813  *
2814  * Let u_i denote the fraction of p_i that the entity was runnable.
2815  *
2816  * We then designate the fractions u_i as our co-efficients, yielding the
2817  * following representation of historical load:
2818  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2819  *
2820  * We choose y based on the with of a reasonably scheduling period, fixing:
2821  *   y^32 = 0.5
2822  *
2823  * This means that the contribution to load ~32ms ago (u_32) will be weighted
2824  * approximately half as much as the contribution to load within the last ms
2825  * (u_0).
2826  *
2827  * When a period "rolls over" and we have new u_0`, multiplying the previous
2828  * sum again by y is sufficient to update:
2829  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2830  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2831  */
2832 static __always_inline int
2833 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2834 		  unsigned long weight, int running, struct cfs_rq *cfs_rq)
2835 {
2836 	u64 delta, scaled_delta, periods;
2837 	u32 contrib;
2838 	unsigned int delta_w, scaled_delta_w, decayed = 0;
2839 	unsigned long scale_freq, scale_cpu;
2840 
2841 	delta = now - sa->last_update_time;
2842 	/*
2843 	 * This should only happen when time goes backwards, which it
2844 	 * unfortunately does during sched clock init when we swap over to TSC.
2845 	 */
2846 	if ((s64)delta < 0) {
2847 		sa->last_update_time = now;
2848 		return 0;
2849 	}
2850 
2851 	/*
2852 	 * Use 1024ns as the unit of measurement since it's a reasonable
2853 	 * approximation of 1us and fast to compute.
2854 	 */
2855 	delta >>= 10;
2856 	if (!delta)
2857 		return 0;
2858 	sa->last_update_time = now;
2859 
2860 	scale_freq = arch_scale_freq_capacity(NULL, cpu);
2861 	scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2862 
2863 	/* delta_w is the amount already accumulated against our next period */
2864 	delta_w = sa->period_contrib;
2865 	if (delta + delta_w >= 1024) {
2866 		decayed = 1;
2867 
2868 		/* how much left for next period will start over, we don't know yet */
2869 		sa->period_contrib = 0;
2870 
2871 		/*
2872 		 * Now that we know we're crossing a period boundary, figure
2873 		 * out how much from delta we need to complete the current
2874 		 * period and accrue it.
2875 		 */
2876 		delta_w = 1024 - delta_w;
2877 		scaled_delta_w = cap_scale(delta_w, scale_freq);
2878 		if (weight) {
2879 			sa->load_sum += weight * scaled_delta_w;
2880 			if (cfs_rq) {
2881 				cfs_rq->runnable_load_sum +=
2882 						weight * scaled_delta_w;
2883 			}
2884 		}
2885 		if (running)
2886 			sa->util_sum += scaled_delta_w * scale_cpu;
2887 
2888 		delta -= delta_w;
2889 
2890 		/* Figure out how many additional periods this update spans */
2891 		periods = delta / 1024;
2892 		delta %= 1024;
2893 
2894 		sa->load_sum = decay_load(sa->load_sum, periods + 1);
2895 		if (cfs_rq) {
2896 			cfs_rq->runnable_load_sum =
2897 				decay_load(cfs_rq->runnable_load_sum, periods + 1);
2898 		}
2899 		sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
2900 
2901 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
2902 		contrib = __compute_runnable_contrib(periods);
2903 		contrib = cap_scale(contrib, scale_freq);
2904 		if (weight) {
2905 			sa->load_sum += weight * contrib;
2906 			if (cfs_rq)
2907 				cfs_rq->runnable_load_sum += weight * contrib;
2908 		}
2909 		if (running)
2910 			sa->util_sum += contrib * scale_cpu;
2911 	}
2912 
2913 	/* Remainder of delta accrued against u_0` */
2914 	scaled_delta = cap_scale(delta, scale_freq);
2915 	if (weight) {
2916 		sa->load_sum += weight * scaled_delta;
2917 		if (cfs_rq)
2918 			cfs_rq->runnable_load_sum += weight * scaled_delta;
2919 	}
2920 	if (running)
2921 		sa->util_sum += scaled_delta * scale_cpu;
2922 
2923 	sa->period_contrib += delta;
2924 
2925 	if (decayed) {
2926 		sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2927 		if (cfs_rq) {
2928 			cfs_rq->runnable_load_avg =
2929 				div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2930 		}
2931 		sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2932 	}
2933 
2934 	return decayed;
2935 }
2936 
2937 /*
2938  * Signed add and clamp on underflow.
2939  *
2940  * Explicitly do a load-store to ensure the intermediate value never hits
2941  * memory. This allows lockless observations without ever seeing the negative
2942  * values.
2943  */
2944 #define add_positive(_ptr, _val) do {                           \
2945 	typeof(_ptr) ptr = (_ptr);                              \
2946 	typeof(_val) val = (_val);                              \
2947 	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
2948 								\
2949 	res = var + val;                                        \
2950 								\
2951 	if (val < 0 && res > var)                               \
2952 		res = 0;                                        \
2953 								\
2954 	WRITE_ONCE(*ptr, res);                                  \
2955 } while (0)
2956 
2957 #ifdef CONFIG_FAIR_GROUP_SCHED
2958 /**
2959  * update_tg_load_avg - update the tg's load avg
2960  * @cfs_rq: the cfs_rq whose avg changed
2961  * @force: update regardless of how small the difference
2962  *
2963  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
2964  * However, because tg->load_avg is a global value there are performance
2965  * considerations.
2966  *
2967  * In order to avoid having to look at the other cfs_rq's, we use a
2968  * differential update where we store the last value we propagated. This in
2969  * turn allows skipping updates if the differential is 'small'.
2970  *
2971  * Updating tg's load_avg is necessary before update_cfs_share() (which is
2972  * done) and effective_load() (which is not done because it is too costly).
2973  */
2974 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
2975 {
2976 	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
2977 
2978 	/*
2979 	 * No need to update load_avg for root_task_group as it is not used.
2980 	 */
2981 	if (cfs_rq->tg == &root_task_group)
2982 		return;
2983 
2984 	if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2985 		atomic_long_add(delta, &cfs_rq->tg->load_avg);
2986 		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
2987 	}
2988 }
2989 
2990 /*
2991  * Called within set_task_rq() right before setting a task's cpu. The
2992  * caller only guarantees p->pi_lock is held; no other assumptions,
2993  * including the state of rq->lock, should be made.
2994  */
2995 void set_task_rq_fair(struct sched_entity *se,
2996 		      struct cfs_rq *prev, struct cfs_rq *next)
2997 {
2998 	if (!sched_feat(ATTACH_AGE_LOAD))
2999 		return;
3000 
3001 	/*
3002 	 * We are supposed to update the task to "current" time, then its up to
3003 	 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3004 	 * getting what current time is, so simply throw away the out-of-date
3005 	 * time. This will result in the wakee task is less decayed, but giving
3006 	 * the wakee more load sounds not bad.
3007 	 */
3008 	if (se->avg.last_update_time && prev) {
3009 		u64 p_last_update_time;
3010 		u64 n_last_update_time;
3011 
3012 #ifndef CONFIG_64BIT
3013 		u64 p_last_update_time_copy;
3014 		u64 n_last_update_time_copy;
3015 
3016 		do {
3017 			p_last_update_time_copy = prev->load_last_update_time_copy;
3018 			n_last_update_time_copy = next->load_last_update_time_copy;
3019 
3020 			smp_rmb();
3021 
3022 			p_last_update_time = prev->avg.last_update_time;
3023 			n_last_update_time = next->avg.last_update_time;
3024 
3025 		} while (p_last_update_time != p_last_update_time_copy ||
3026 			 n_last_update_time != n_last_update_time_copy);
3027 #else
3028 		p_last_update_time = prev->avg.last_update_time;
3029 		n_last_update_time = next->avg.last_update_time;
3030 #endif
3031 		__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
3032 				  &se->avg, 0, 0, NULL);
3033 		se->avg.last_update_time = n_last_update_time;
3034 	}
3035 }
3036 
3037 /* Take into account change of utilization of a child task group */
3038 static inline void
3039 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
3040 {
3041 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3042 	long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3043 
3044 	/* Nothing to update */
3045 	if (!delta)
3046 		return;
3047 
3048 	/* Set new sched_entity's utilization */
3049 	se->avg.util_avg = gcfs_rq->avg.util_avg;
3050 	se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
3051 
3052 	/* Update parent cfs_rq utilization */
3053 	add_positive(&cfs_rq->avg.util_avg, delta);
3054 	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
3055 }
3056 
3057 /* Take into account change of load of a child task group */
3058 static inline void
3059 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
3060 {
3061 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3062 	long delta, load = gcfs_rq->avg.load_avg;
3063 
3064 	/*
3065 	 * If the load of group cfs_rq is null, the load of the
3066 	 * sched_entity will also be null so we can skip the formula
3067 	 */
3068 	if (load) {
3069 		long tg_load;
3070 
3071 		/* Get tg's load and ensure tg_load > 0 */
3072 		tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
3073 
3074 		/* Ensure tg_load >= load and updated with current load*/
3075 		tg_load -= gcfs_rq->tg_load_avg_contrib;
3076 		tg_load += load;
3077 
3078 		/*
3079 		 * We need to compute a correction term in the case that the
3080 		 * task group is consuming more CPU than a task of equal
3081 		 * weight. A task with a weight equals to tg->shares will have
3082 		 * a load less or equal to scale_load_down(tg->shares).
3083 		 * Similarly, the sched_entities that represent the task group
3084 		 * at parent level, can't have a load higher than
3085 		 * scale_load_down(tg->shares). And the Sum of sched_entities'
3086 		 * load must be <= scale_load_down(tg->shares).
3087 		 */
3088 		if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
3089 			/* scale gcfs_rq's load into tg's shares*/
3090 			load *= scale_load_down(gcfs_rq->tg->shares);
3091 			load /= tg_load;
3092 		}
3093 	}
3094 
3095 	delta = load - se->avg.load_avg;
3096 
3097 	/* Nothing to update */
3098 	if (!delta)
3099 		return;
3100 
3101 	/* Set new sched_entity's load */
3102 	se->avg.load_avg = load;
3103 	se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
3104 
3105 	/* Update parent cfs_rq load */
3106 	add_positive(&cfs_rq->avg.load_avg, delta);
3107 	cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
3108 
3109 	/*
3110 	 * If the sched_entity is already enqueued, we also have to update the
3111 	 * runnable load avg.
3112 	 */
3113 	if (se->on_rq) {
3114 		/* Update parent cfs_rq runnable_load_avg */
3115 		add_positive(&cfs_rq->runnable_load_avg, delta);
3116 		cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
3117 	}
3118 }
3119 
3120 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
3121 {
3122 	cfs_rq->propagate_avg = 1;
3123 }
3124 
3125 static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
3126 {
3127 	struct cfs_rq *cfs_rq = group_cfs_rq(se);
3128 
3129 	if (!cfs_rq->propagate_avg)
3130 		return 0;
3131 
3132 	cfs_rq->propagate_avg = 0;
3133 	return 1;
3134 }
3135 
3136 /* Update task and its cfs_rq load average */
3137 static inline int propagate_entity_load_avg(struct sched_entity *se)
3138 {
3139 	struct cfs_rq *cfs_rq;
3140 
3141 	if (entity_is_task(se))
3142 		return 0;
3143 
3144 	if (!test_and_clear_tg_cfs_propagate(se))
3145 		return 0;
3146 
3147 	cfs_rq = cfs_rq_of(se);
3148 
3149 	set_tg_cfs_propagate(cfs_rq);
3150 
3151 	update_tg_cfs_util(cfs_rq, se);
3152 	update_tg_cfs_load(cfs_rq, se);
3153 
3154 	return 1;
3155 }
3156 
3157 #else /* CONFIG_FAIR_GROUP_SCHED */
3158 
3159 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
3160 
3161 static inline int propagate_entity_load_avg(struct sched_entity *se)
3162 {
3163 	return 0;
3164 }
3165 
3166 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
3167 
3168 #endif /* CONFIG_FAIR_GROUP_SCHED */
3169 
3170 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
3171 {
3172 	if (&this_rq()->cfs == cfs_rq) {
3173 		/*
3174 		 * There are a few boundary cases this might miss but it should
3175 		 * get called often enough that that should (hopefully) not be
3176 		 * a real problem -- added to that it only calls on the local
3177 		 * CPU, so if we enqueue remotely we'll miss an update, but
3178 		 * the next tick/schedule should update.
3179 		 *
3180 		 * It will not get called when we go idle, because the idle
3181 		 * thread is a different class (!fair), nor will the utilization
3182 		 * number include things like RT tasks.
3183 		 *
3184 		 * As is, the util number is not freq-invariant (we'd have to
3185 		 * implement arch_scale_freq_capacity() for that).
3186 		 *
3187 		 * See cpu_util().
3188 		 */
3189 		cpufreq_update_util(rq_of(cfs_rq), 0);
3190 	}
3191 }
3192 
3193 /*
3194  * Unsigned subtract and clamp on underflow.
3195  *
3196  * Explicitly do a load-store to ensure the intermediate value never hits
3197  * memory. This allows lockless observations without ever seeing the negative
3198  * values.
3199  */
3200 #define sub_positive(_ptr, _val) do {				\
3201 	typeof(_ptr) ptr = (_ptr);				\
3202 	typeof(*ptr) val = (_val);				\
3203 	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
3204 	res = var - val;					\
3205 	if (res > var)						\
3206 		res = 0;					\
3207 	WRITE_ONCE(*ptr, res);					\
3208 } while (0)
3209 
3210 /**
3211  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3212  * @now: current time, as per cfs_rq_clock_task()
3213  * @cfs_rq: cfs_rq to update
3214  * @update_freq: should we call cfs_rq_util_change() or will the call do so
3215  *
3216  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3217  * avg. The immediate corollary is that all (fair) tasks must be attached, see
3218  * post_init_entity_util_avg().
3219  *
3220  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3221  *
3222  * Returns true if the load decayed or we removed load.
3223  *
3224  * Since both these conditions indicate a changed cfs_rq->avg.load we should
3225  * call update_tg_load_avg() when this function returns true.
3226  */
3227 static inline int
3228 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3229 {
3230 	struct sched_avg *sa = &cfs_rq->avg;
3231 	int decayed, removed_load = 0, removed_util = 0;
3232 
3233 	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
3234 		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
3235 		sub_positive(&sa->load_avg, r);
3236 		sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
3237 		removed_load = 1;
3238 		set_tg_cfs_propagate(cfs_rq);
3239 	}
3240 
3241 	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
3242 		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
3243 		sub_positive(&sa->util_avg, r);
3244 		sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
3245 		removed_util = 1;
3246 		set_tg_cfs_propagate(cfs_rq);
3247 	}
3248 
3249 	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
3250 		scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
3251 
3252 #ifndef CONFIG_64BIT
3253 	smp_wmb();
3254 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
3255 #endif
3256 
3257 	if (update_freq && (decayed || removed_util))
3258 		cfs_rq_util_change(cfs_rq);
3259 
3260 	return decayed || removed_load;
3261 }
3262 
3263 /*
3264  * Optional action to be done while updating the load average
3265  */
3266 #define UPDATE_TG	0x1
3267 #define SKIP_AGE_LOAD	0x2
3268 
3269 /* Update task and its cfs_rq load average */
3270 static inline void update_load_avg(struct sched_entity *se, int flags)
3271 {
3272 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3273 	u64 now = cfs_rq_clock_task(cfs_rq);
3274 	struct rq *rq = rq_of(cfs_rq);
3275 	int cpu = cpu_of(rq);
3276 	int decayed;
3277 
3278 	/*
3279 	 * Track task load average for carrying it to new CPU after migrated, and
3280 	 * track group sched_entity load average for task_h_load calc in migration
3281 	 */
3282 	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
3283 		__update_load_avg(now, cpu, &se->avg,
3284 			  se->on_rq * scale_load_down(se->load.weight),
3285 			  cfs_rq->curr == se, NULL);
3286 	}
3287 
3288 	decayed  = update_cfs_rq_load_avg(now, cfs_rq, true);
3289 	decayed |= propagate_entity_load_avg(se);
3290 
3291 	if (decayed && (flags & UPDATE_TG))
3292 		update_tg_load_avg(cfs_rq, 0);
3293 }
3294 
3295 /**
3296  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3297  * @cfs_rq: cfs_rq to attach to
3298  * @se: sched_entity to attach
3299  *
3300  * Must call update_cfs_rq_load_avg() before this, since we rely on
3301  * cfs_rq->avg.last_update_time being current.
3302  */
3303 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3304 {
3305 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
3306 	cfs_rq->avg.load_avg += se->avg.load_avg;
3307 	cfs_rq->avg.load_sum += se->avg.load_sum;
3308 	cfs_rq->avg.util_avg += se->avg.util_avg;
3309 	cfs_rq->avg.util_sum += se->avg.util_sum;
3310 	set_tg_cfs_propagate(cfs_rq);
3311 
3312 	cfs_rq_util_change(cfs_rq);
3313 }
3314 
3315 /**
3316  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3317  * @cfs_rq: cfs_rq to detach from
3318  * @se: sched_entity to detach
3319  *
3320  * Must call update_cfs_rq_load_avg() before this, since we rely on
3321  * cfs_rq->avg.last_update_time being current.
3322  */
3323 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3324 {
3325 
3326 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3327 	sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3328 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3329 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3330 	set_tg_cfs_propagate(cfs_rq);
3331 
3332 	cfs_rq_util_change(cfs_rq);
3333 }
3334 
3335 /* Add the load generated by se into cfs_rq's load average */
3336 static inline void
3337 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3338 {
3339 	struct sched_avg *sa = &se->avg;
3340 
3341 	cfs_rq->runnable_load_avg += sa->load_avg;
3342 	cfs_rq->runnable_load_sum += sa->load_sum;
3343 
3344 	if (!sa->last_update_time) {
3345 		attach_entity_load_avg(cfs_rq, se);
3346 		update_tg_load_avg(cfs_rq, 0);
3347 	}
3348 }
3349 
3350 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
3351 static inline void
3352 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3353 {
3354 	cfs_rq->runnable_load_avg =
3355 		max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3356 	cfs_rq->runnable_load_sum =
3357 		max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
3358 }
3359 
3360 #ifndef CONFIG_64BIT
3361 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3362 {
3363 	u64 last_update_time_copy;
3364 	u64 last_update_time;
3365 
3366 	do {
3367 		last_update_time_copy = cfs_rq->load_last_update_time_copy;
3368 		smp_rmb();
3369 		last_update_time = cfs_rq->avg.last_update_time;
3370 	} while (last_update_time != last_update_time_copy);
3371 
3372 	return last_update_time;
3373 }
3374 #else
3375 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3376 {
3377 	return cfs_rq->avg.last_update_time;
3378 }
3379 #endif
3380 
3381 /*
3382  * Synchronize entity load avg of dequeued entity without locking
3383  * the previous rq.
3384  */
3385 void sync_entity_load_avg(struct sched_entity *se)
3386 {
3387 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3388 	u64 last_update_time;
3389 
3390 	last_update_time = cfs_rq_last_update_time(cfs_rq);
3391 	__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
3392 }
3393 
3394 /*
3395  * Task first catches up with cfs_rq, and then subtract
3396  * itself from the cfs_rq (task must be off the queue now).
3397  */
3398 void remove_entity_load_avg(struct sched_entity *se)
3399 {
3400 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3401 
3402 	/*
3403 	 * tasks cannot exit without having gone through wake_up_new_task() ->
3404 	 * post_init_entity_util_avg() which will have added things to the
3405 	 * cfs_rq, so we can remove unconditionally.
3406 	 *
3407 	 * Similarly for groups, they will have passed through
3408 	 * post_init_entity_util_avg() before unregister_sched_fair_group()
3409 	 * calls this.
3410 	 */
3411 
3412 	sync_entity_load_avg(se);
3413 	atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3414 	atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3415 }
3416 
3417 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3418 {
3419 	return cfs_rq->runnable_load_avg;
3420 }
3421 
3422 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3423 {
3424 	return cfs_rq->avg.load_avg;
3425 }
3426 
3427 static int idle_balance(struct rq *this_rq);
3428 
3429 #else /* CONFIG_SMP */
3430 
3431 static inline int
3432 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3433 {
3434 	return 0;
3435 }
3436 
3437 #define UPDATE_TG	0x0
3438 #define SKIP_AGE_LOAD	0x0
3439 
3440 static inline void update_load_avg(struct sched_entity *se, int not_used1)
3441 {
3442 	cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
3443 }
3444 
3445 static inline void
3446 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3447 static inline void
3448 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3449 static inline void remove_entity_load_avg(struct sched_entity *se) {}
3450 
3451 static inline void
3452 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3453 static inline void
3454 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3455 
3456 static inline int idle_balance(struct rq *rq)
3457 {
3458 	return 0;
3459 }
3460 
3461 #endif /* CONFIG_SMP */
3462 
3463 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3464 {
3465 #ifdef CONFIG_SCHED_DEBUG
3466 	s64 d = se->vruntime - cfs_rq->min_vruntime;
3467 
3468 	if (d < 0)
3469 		d = -d;
3470 
3471 	if (d > 3*sysctl_sched_latency)
3472 		schedstat_inc(cfs_rq->nr_spread_over);
3473 #endif
3474 }
3475 
3476 static void
3477 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3478 {
3479 	u64 vruntime = cfs_rq->min_vruntime;
3480 
3481 	/*
3482 	 * The 'current' period is already promised to the current tasks,
3483 	 * however the extra weight of the new task will slow them down a
3484 	 * little, place the new task so that it fits in the slot that
3485 	 * stays open at the end.
3486 	 */
3487 	if (initial && sched_feat(START_DEBIT))
3488 		vruntime += sched_vslice(cfs_rq, se);
3489 
3490 	/* sleeps up to a single latency don't count. */
3491 	if (!initial) {
3492 		unsigned long thresh = sysctl_sched_latency;
3493 
3494 		/*
3495 		 * Halve their sleep time's effect, to allow
3496 		 * for a gentler effect of sleepers:
3497 		 */
3498 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
3499 			thresh >>= 1;
3500 
3501 		vruntime -= thresh;
3502 	}
3503 
3504 	/* ensure we never gain time by being placed backwards. */
3505 	se->vruntime = max_vruntime(se->vruntime, vruntime);
3506 }
3507 
3508 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3509 
3510 static inline void check_schedstat_required(void)
3511 {
3512 #ifdef CONFIG_SCHEDSTATS
3513 	if (schedstat_enabled())
3514 		return;
3515 
3516 	/* Force schedstat enabled if a dependent tracepoint is active */
3517 	if (trace_sched_stat_wait_enabled()    ||
3518 			trace_sched_stat_sleep_enabled()   ||
3519 			trace_sched_stat_iowait_enabled()  ||
3520 			trace_sched_stat_blocked_enabled() ||
3521 			trace_sched_stat_runtime_enabled())  {
3522 		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3523 			     "stat_blocked and stat_runtime require the "
3524 			     "kernel parameter schedstats=enabled or "
3525 			     "kernel.sched_schedstats=1\n");
3526 	}
3527 #endif
3528 }
3529 
3530 
3531 /*
3532  * MIGRATION
3533  *
3534  *	dequeue
3535  *	  update_curr()
3536  *	    update_min_vruntime()
3537  *	  vruntime -= min_vruntime
3538  *
3539  *	enqueue
3540  *	  update_curr()
3541  *	    update_min_vruntime()
3542  *	  vruntime += min_vruntime
3543  *
3544  * this way the vruntime transition between RQs is done when both
3545  * min_vruntime are up-to-date.
3546  *
3547  * WAKEUP (remote)
3548  *
3549  *	->migrate_task_rq_fair() (p->state == TASK_WAKING)
3550  *	  vruntime -= min_vruntime
3551  *
3552  *	enqueue
3553  *	  update_curr()
3554  *	    update_min_vruntime()
3555  *	  vruntime += min_vruntime
3556  *
3557  * this way we don't have the most up-to-date min_vruntime on the originating
3558  * CPU and an up-to-date min_vruntime on the destination CPU.
3559  */
3560 
3561 static void
3562 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3563 {
3564 	bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3565 	bool curr = cfs_rq->curr == se;
3566 
3567 	/*
3568 	 * If we're the current task, we must renormalise before calling
3569 	 * update_curr().
3570 	 */
3571 	if (renorm && curr)
3572 		se->vruntime += cfs_rq->min_vruntime;
3573 
3574 	update_curr(cfs_rq);
3575 
3576 	/*
3577 	 * Otherwise, renormalise after, such that we're placed at the current
3578 	 * moment in time, instead of some random moment in the past. Being
3579 	 * placed in the past could significantly boost this task to the
3580 	 * fairness detriment of existing tasks.
3581 	 */
3582 	if (renorm && !curr)
3583 		se->vruntime += cfs_rq->min_vruntime;
3584 
3585 	update_load_avg(se, UPDATE_TG);
3586 	enqueue_entity_load_avg(cfs_rq, se);
3587 	account_entity_enqueue(cfs_rq, se);
3588 	update_cfs_shares(cfs_rq);
3589 
3590 	if (flags & ENQUEUE_WAKEUP)
3591 		place_entity(cfs_rq, se, 0);
3592 
3593 	check_schedstat_required();
3594 	update_stats_enqueue(cfs_rq, se, flags);
3595 	check_spread(cfs_rq, se);
3596 	if (!curr)
3597 		__enqueue_entity(cfs_rq, se);
3598 	se->on_rq = 1;
3599 
3600 	if (cfs_rq->nr_running == 1) {
3601 		list_add_leaf_cfs_rq(cfs_rq);
3602 		check_enqueue_throttle(cfs_rq);
3603 	}
3604 }
3605 
3606 static void __clear_buddies_last(struct sched_entity *se)
3607 {
3608 	for_each_sched_entity(se) {
3609 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
3610 		if (cfs_rq->last != se)
3611 			break;
3612 
3613 		cfs_rq->last = NULL;
3614 	}
3615 }
3616 
3617 static void __clear_buddies_next(struct sched_entity *se)
3618 {
3619 	for_each_sched_entity(se) {
3620 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
3621 		if (cfs_rq->next != se)
3622 			break;
3623 
3624 		cfs_rq->next = NULL;
3625 	}
3626 }
3627 
3628 static void __clear_buddies_skip(struct sched_entity *se)
3629 {
3630 	for_each_sched_entity(se) {
3631 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
3632 		if (cfs_rq->skip != se)
3633 			break;
3634 
3635 		cfs_rq->skip = NULL;
3636 	}
3637 }
3638 
3639 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3640 {
3641 	if (cfs_rq->last == se)
3642 		__clear_buddies_last(se);
3643 
3644 	if (cfs_rq->next == se)
3645 		__clear_buddies_next(se);
3646 
3647 	if (cfs_rq->skip == se)
3648 		__clear_buddies_skip(se);
3649 }
3650 
3651 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3652 
3653 static void
3654 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3655 {
3656 	/*
3657 	 * Update run-time statistics of the 'current'.
3658 	 */
3659 	update_curr(cfs_rq);
3660 	update_load_avg(se, UPDATE_TG);
3661 	dequeue_entity_load_avg(cfs_rq, se);
3662 
3663 	update_stats_dequeue(cfs_rq, se, flags);
3664 
3665 	clear_buddies(cfs_rq, se);
3666 
3667 	if (se != cfs_rq->curr)
3668 		__dequeue_entity(cfs_rq, se);
3669 	se->on_rq = 0;
3670 	account_entity_dequeue(cfs_rq, se);
3671 
3672 	/*
3673 	 * Normalize after update_curr(); which will also have moved
3674 	 * min_vruntime if @se is the one holding it back. But before doing
3675 	 * update_min_vruntime() again, which will discount @se's position and
3676 	 * can move min_vruntime forward still more.
3677 	 */
3678 	if (!(flags & DEQUEUE_SLEEP))
3679 		se->vruntime -= cfs_rq->min_vruntime;
3680 
3681 	/* return excess runtime on last dequeue */
3682 	return_cfs_rq_runtime(cfs_rq);
3683 
3684 	update_cfs_shares(cfs_rq);
3685 
3686 	/*
3687 	 * Now advance min_vruntime if @se was the entity holding it back,
3688 	 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3689 	 * put back on, and if we advance min_vruntime, we'll be placed back
3690 	 * further than we started -- ie. we'll be penalized.
3691 	 */
3692 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3693 		update_min_vruntime(cfs_rq);
3694 }
3695 
3696 /*
3697  * Preempt the current task with a newly woken task if needed:
3698  */
3699 static void
3700 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3701 {
3702 	unsigned long ideal_runtime, delta_exec;
3703 	struct sched_entity *se;
3704 	s64 delta;
3705 
3706 	ideal_runtime = sched_slice(cfs_rq, curr);
3707 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3708 	if (delta_exec > ideal_runtime) {
3709 		resched_curr(rq_of(cfs_rq));
3710 		/*
3711 		 * The current task ran long enough, ensure it doesn't get
3712 		 * re-elected due to buddy favours.
3713 		 */
3714 		clear_buddies(cfs_rq, curr);
3715 		return;
3716 	}
3717 
3718 	/*
3719 	 * Ensure that a task that missed wakeup preemption by a
3720 	 * narrow margin doesn't have to wait for a full slice.
3721 	 * This also mitigates buddy induced latencies under load.
3722 	 */
3723 	if (delta_exec < sysctl_sched_min_granularity)
3724 		return;
3725 
3726 	se = __pick_first_entity(cfs_rq);
3727 	delta = curr->vruntime - se->vruntime;
3728 
3729 	if (delta < 0)
3730 		return;
3731 
3732 	if (delta > ideal_runtime)
3733 		resched_curr(rq_of(cfs_rq));
3734 }
3735 
3736 static void
3737 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3738 {
3739 	/* 'current' is not kept within the tree. */
3740 	if (se->on_rq) {
3741 		/*
3742 		 * Any task has to be enqueued before it get to execute on
3743 		 * a CPU. So account for the time it spent waiting on the
3744 		 * runqueue.
3745 		 */
3746 		update_stats_wait_end(cfs_rq, se);
3747 		__dequeue_entity(cfs_rq, se);
3748 		update_load_avg(se, UPDATE_TG);
3749 	}
3750 
3751 	update_stats_curr_start(cfs_rq, se);
3752 	cfs_rq->curr = se;
3753 
3754 	/*
3755 	 * Track our maximum slice length, if the CPU's load is at
3756 	 * least twice that of our own weight (i.e. dont track it
3757 	 * when there are only lesser-weight tasks around):
3758 	 */
3759 	if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3760 		schedstat_set(se->statistics.slice_max,
3761 			max((u64)schedstat_val(se->statistics.slice_max),
3762 			    se->sum_exec_runtime - se->prev_sum_exec_runtime));
3763 	}
3764 
3765 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
3766 }
3767 
3768 static int
3769 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3770 
3771 /*
3772  * Pick the next process, keeping these things in mind, in this order:
3773  * 1) keep things fair between processes/task groups
3774  * 2) pick the "next" process, since someone really wants that to run
3775  * 3) pick the "last" process, for cache locality
3776  * 4) do not run the "skip" process, if something else is available
3777  */
3778 static struct sched_entity *
3779 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3780 {
3781 	struct sched_entity *left = __pick_first_entity(cfs_rq);
3782 	struct sched_entity *se;
3783 
3784 	/*
3785 	 * If curr is set we have to see if its left of the leftmost entity
3786 	 * still in the tree, provided there was anything in the tree at all.
3787 	 */
3788 	if (!left || (curr && entity_before(curr, left)))
3789 		left = curr;
3790 
3791 	se = left; /* ideally we run the leftmost entity */
3792 
3793 	/*
3794 	 * Avoid running the skip buddy, if running something else can
3795 	 * be done without getting too unfair.
3796 	 */
3797 	if (cfs_rq->skip == se) {
3798 		struct sched_entity *second;
3799 
3800 		if (se == curr) {
3801 			second = __pick_first_entity(cfs_rq);
3802 		} else {
3803 			second = __pick_next_entity(se);
3804 			if (!second || (curr && entity_before(curr, second)))
3805 				second = curr;
3806 		}
3807 
3808 		if (second && wakeup_preempt_entity(second, left) < 1)
3809 			se = second;
3810 	}
3811 
3812 	/*
3813 	 * Prefer last buddy, try to return the CPU to a preempted task.
3814 	 */
3815 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3816 		se = cfs_rq->last;
3817 
3818 	/*
3819 	 * Someone really wants this to run. If it's not unfair, run it.
3820 	 */
3821 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3822 		se = cfs_rq->next;
3823 
3824 	clear_buddies(cfs_rq, se);
3825 
3826 	return se;
3827 }
3828 
3829 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3830 
3831 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3832 {
3833 	/*
3834 	 * If still on the runqueue then deactivate_task()
3835 	 * was not called and update_curr() has to be done:
3836 	 */
3837 	if (prev->on_rq)
3838 		update_curr(cfs_rq);
3839 
3840 	/* throttle cfs_rqs exceeding runtime */
3841 	check_cfs_rq_runtime(cfs_rq);
3842 
3843 	check_spread(cfs_rq, prev);
3844 
3845 	if (prev->on_rq) {
3846 		update_stats_wait_start(cfs_rq, prev);
3847 		/* Put 'current' back into the tree. */
3848 		__enqueue_entity(cfs_rq, prev);
3849 		/* in !on_rq case, update occurred at dequeue */
3850 		update_load_avg(prev, 0);
3851 	}
3852 	cfs_rq->curr = NULL;
3853 }
3854 
3855 static void
3856 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3857 {
3858 	/*
3859 	 * Update run-time statistics of the 'current'.
3860 	 */
3861 	update_curr(cfs_rq);
3862 
3863 	/*
3864 	 * Ensure that runnable average is periodically updated.
3865 	 */
3866 	update_load_avg(curr, UPDATE_TG);
3867 	update_cfs_shares(cfs_rq);
3868 
3869 #ifdef CONFIG_SCHED_HRTICK
3870 	/*
3871 	 * queued ticks are scheduled to match the slice, so don't bother
3872 	 * validating it and just reschedule.
3873 	 */
3874 	if (queued) {
3875 		resched_curr(rq_of(cfs_rq));
3876 		return;
3877 	}
3878 	/*
3879 	 * don't let the period tick interfere with the hrtick preemption
3880 	 */
3881 	if (!sched_feat(DOUBLE_TICK) &&
3882 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3883 		return;
3884 #endif
3885 
3886 	if (cfs_rq->nr_running > 1)
3887 		check_preempt_tick(cfs_rq, curr);
3888 }
3889 
3890 
3891 /**************************************************
3892  * CFS bandwidth control machinery
3893  */
3894 
3895 #ifdef CONFIG_CFS_BANDWIDTH
3896 
3897 #ifdef HAVE_JUMP_LABEL
3898 static struct static_key __cfs_bandwidth_used;
3899 
3900 static inline bool cfs_bandwidth_used(void)
3901 {
3902 	return static_key_false(&__cfs_bandwidth_used);
3903 }
3904 
3905 void cfs_bandwidth_usage_inc(void)
3906 {
3907 	static_key_slow_inc(&__cfs_bandwidth_used);
3908 }
3909 
3910 void cfs_bandwidth_usage_dec(void)
3911 {
3912 	static_key_slow_dec(&__cfs_bandwidth_used);
3913 }
3914 #else /* HAVE_JUMP_LABEL */
3915 static bool cfs_bandwidth_used(void)
3916 {
3917 	return true;
3918 }
3919 
3920 void cfs_bandwidth_usage_inc(void) {}
3921 void cfs_bandwidth_usage_dec(void) {}
3922 #endif /* HAVE_JUMP_LABEL */
3923 
3924 /*
3925  * default period for cfs group bandwidth.
3926  * default: 0.1s, units: nanoseconds
3927  */
3928 static inline u64 default_cfs_period(void)
3929 {
3930 	return 100000000ULL;
3931 }
3932 
3933 static inline u64 sched_cfs_bandwidth_slice(void)
3934 {
3935 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3936 }
3937 
3938 /*
3939  * Replenish runtime according to assigned quota and update expiration time.
3940  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3941  * additional synchronization around rq->lock.
3942  *
3943  * requires cfs_b->lock
3944  */
3945 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3946 {
3947 	u64 now;
3948 
3949 	if (cfs_b->quota == RUNTIME_INF)
3950 		return;
3951 
3952 	now = sched_clock_cpu(smp_processor_id());
3953 	cfs_b->runtime = cfs_b->quota;
3954 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3955 }
3956 
3957 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3958 {
3959 	return &tg->cfs_bandwidth;
3960 }
3961 
3962 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3963 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3964 {
3965 	if (unlikely(cfs_rq->throttle_count))
3966 		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
3967 
3968 	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3969 }
3970 
3971 /* returns 0 on failure to allocate runtime */
3972 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3973 {
3974 	struct task_group *tg = cfs_rq->tg;
3975 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3976 	u64 amount = 0, min_amount, expires;
3977 
3978 	/* note: this is a positive sum as runtime_remaining <= 0 */
3979 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3980 
3981 	raw_spin_lock(&cfs_b->lock);
3982 	if (cfs_b->quota == RUNTIME_INF)
3983 		amount = min_amount;
3984 	else {
3985 		start_cfs_bandwidth(cfs_b);
3986 
3987 		if (cfs_b->runtime > 0) {
3988 			amount = min(cfs_b->runtime, min_amount);
3989 			cfs_b->runtime -= amount;
3990 			cfs_b->idle = 0;
3991 		}
3992 	}
3993 	expires = cfs_b->runtime_expires;
3994 	raw_spin_unlock(&cfs_b->lock);
3995 
3996 	cfs_rq->runtime_remaining += amount;
3997 	/*
3998 	 * we may have advanced our local expiration to account for allowed
3999 	 * spread between our sched_clock and the one on which runtime was
4000 	 * issued.
4001 	 */
4002 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
4003 		cfs_rq->runtime_expires = expires;
4004 
4005 	return cfs_rq->runtime_remaining > 0;
4006 }
4007 
4008 /*
4009  * Note: This depends on the synchronization provided by sched_clock and the
4010  * fact that rq->clock snapshots this value.
4011  */
4012 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4013 {
4014 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4015 
4016 	/* if the deadline is ahead of our clock, nothing to do */
4017 	if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
4018 		return;
4019 
4020 	if (cfs_rq->runtime_remaining < 0)
4021 		return;
4022 
4023 	/*
4024 	 * If the local deadline has passed we have to consider the
4025 	 * possibility that our sched_clock is 'fast' and the global deadline
4026 	 * has not truly expired.
4027 	 *
4028 	 * Fortunately we can check determine whether this the case by checking
4029 	 * whether the global deadline has advanced. It is valid to compare
4030 	 * cfs_b->runtime_expires without any locks since we only care about
4031 	 * exact equality, so a partial write will still work.
4032 	 */
4033 
4034 	if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
4035 		/* extend local deadline, drift is bounded above by 2 ticks */
4036 		cfs_rq->runtime_expires += TICK_NSEC;
4037 	} else {
4038 		/* global deadline is ahead, expiration has passed */
4039 		cfs_rq->runtime_remaining = 0;
4040 	}
4041 }
4042 
4043 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4044 {
4045 	/* dock delta_exec before expiring quota (as it could span periods) */
4046 	cfs_rq->runtime_remaining -= delta_exec;
4047 	expire_cfs_rq_runtime(cfs_rq);
4048 
4049 	if (likely(cfs_rq->runtime_remaining > 0))
4050 		return;
4051 
4052 	/*
4053 	 * if we're unable to extend our runtime we resched so that the active
4054 	 * hierarchy can be throttled
4055 	 */
4056 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4057 		resched_curr(rq_of(cfs_rq));
4058 }
4059 
4060 static __always_inline
4061 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4062 {
4063 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
4064 		return;
4065 
4066 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
4067 }
4068 
4069 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4070 {
4071 	return cfs_bandwidth_used() && cfs_rq->throttled;
4072 }
4073 
4074 /* check whether cfs_rq, or any parent, is throttled */
4075 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4076 {
4077 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
4078 }
4079 
4080 /*
4081  * Ensure that neither of the group entities corresponding to src_cpu or
4082  * dest_cpu are members of a throttled hierarchy when performing group
4083  * load-balance operations.
4084  */
4085 static inline int throttled_lb_pair(struct task_group *tg,
4086 				    int src_cpu, int dest_cpu)
4087 {
4088 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4089 
4090 	src_cfs_rq = tg->cfs_rq[src_cpu];
4091 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
4092 
4093 	return throttled_hierarchy(src_cfs_rq) ||
4094 	       throttled_hierarchy(dest_cfs_rq);
4095 }
4096 
4097 /* updated child weight may affect parent so we have to do this bottom up */
4098 static int tg_unthrottle_up(struct task_group *tg, void *data)
4099 {
4100 	struct rq *rq = data;
4101 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4102 
4103 	cfs_rq->throttle_count--;
4104 	if (!cfs_rq->throttle_count) {
4105 		/* adjust cfs_rq_clock_task() */
4106 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
4107 					     cfs_rq->throttled_clock_task;
4108 	}
4109 
4110 	return 0;
4111 }
4112 
4113 static int tg_throttle_down(struct task_group *tg, void *data)
4114 {
4115 	struct rq *rq = data;
4116 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4117 
4118 	/* group is entering throttled state, stop time */
4119 	if (!cfs_rq->throttle_count)
4120 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
4121 	cfs_rq->throttle_count++;
4122 
4123 	return 0;
4124 }
4125 
4126 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
4127 {
4128 	struct rq *rq = rq_of(cfs_rq);
4129 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4130 	struct sched_entity *se;
4131 	long task_delta, dequeue = 1;
4132 	bool empty;
4133 
4134 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
4135 
4136 	/* freeze hierarchy runnable averages while throttled */
4137 	rcu_read_lock();
4138 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
4139 	rcu_read_unlock();
4140 
4141 	task_delta = cfs_rq->h_nr_running;
4142 	for_each_sched_entity(se) {
4143 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
4144 		/* throttled entity or throttle-on-deactivate */
4145 		if (!se->on_rq)
4146 			break;
4147 
4148 		if (dequeue)
4149 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
4150 		qcfs_rq->h_nr_running -= task_delta;
4151 
4152 		if (qcfs_rq->load.weight)
4153 			dequeue = 0;
4154 	}
4155 
4156 	if (!se)
4157 		sub_nr_running(rq, task_delta);
4158 
4159 	cfs_rq->throttled = 1;
4160 	cfs_rq->throttled_clock = rq_clock(rq);
4161 	raw_spin_lock(&cfs_b->lock);
4162 	empty = list_empty(&cfs_b->throttled_cfs_rq);
4163 
4164 	/*
4165 	 * Add to the _head_ of the list, so that an already-started
4166 	 * distribute_cfs_runtime will not see us
4167 	 */
4168 	list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4169 
4170 	/*
4171 	 * If we're the first throttled task, make sure the bandwidth
4172 	 * timer is running.
4173 	 */
4174 	if (empty)
4175 		start_cfs_bandwidth(cfs_b);
4176 
4177 	raw_spin_unlock(&cfs_b->lock);
4178 }
4179 
4180 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
4181 {
4182 	struct rq *rq = rq_of(cfs_rq);
4183 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4184 	struct sched_entity *se;
4185 	int enqueue = 1;
4186 	long task_delta;
4187 
4188 	se = cfs_rq->tg->se[cpu_of(rq)];
4189 
4190 	cfs_rq->throttled = 0;
4191 
4192 	update_rq_clock(rq);
4193 
4194 	raw_spin_lock(&cfs_b->lock);
4195 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
4196 	list_del_rcu(&cfs_rq->throttled_list);
4197 	raw_spin_unlock(&cfs_b->lock);
4198 
4199 	/* update hierarchical throttle state */
4200 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4201 
4202 	if (!cfs_rq->load.weight)
4203 		return;
4204 
4205 	task_delta = cfs_rq->h_nr_running;
4206 	for_each_sched_entity(se) {
4207 		if (se->on_rq)
4208 			enqueue = 0;
4209 
4210 		cfs_rq = cfs_rq_of(se);
4211 		if (enqueue)
4212 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4213 		cfs_rq->h_nr_running += task_delta;
4214 
4215 		if (cfs_rq_throttled(cfs_rq))
4216 			break;
4217 	}
4218 
4219 	if (!se)
4220 		add_nr_running(rq, task_delta);
4221 
4222 	/* determine whether we need to wake up potentially idle cpu */
4223 	if (rq->curr == rq->idle && rq->cfs.nr_running)
4224 		resched_curr(rq);
4225 }
4226 
4227 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4228 		u64 remaining, u64 expires)
4229 {
4230 	struct cfs_rq *cfs_rq;
4231 	u64 runtime;
4232 	u64 starting_runtime = remaining;
4233 
4234 	rcu_read_lock();
4235 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4236 				throttled_list) {
4237 		struct rq *rq = rq_of(cfs_rq);
4238 
4239 		raw_spin_lock(&rq->lock);
4240 		if (!cfs_rq_throttled(cfs_rq))
4241 			goto next;
4242 
4243 		runtime = -cfs_rq->runtime_remaining + 1;
4244 		if (runtime > remaining)
4245 			runtime = remaining;
4246 		remaining -= runtime;
4247 
4248 		cfs_rq->runtime_remaining += runtime;
4249 		cfs_rq->runtime_expires = expires;
4250 
4251 		/* we check whether we're throttled above */
4252 		if (cfs_rq->runtime_remaining > 0)
4253 			unthrottle_cfs_rq(cfs_rq);
4254 
4255 next:
4256 		raw_spin_unlock(&rq->lock);
4257 
4258 		if (!remaining)
4259 			break;
4260 	}
4261 	rcu_read_unlock();
4262 
4263 	return starting_runtime - remaining;
4264 }
4265 
4266 /*
4267  * Responsible for refilling a task_group's bandwidth and unthrottling its
4268  * cfs_rqs as appropriate. If there has been no activity within the last
4269  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4270  * used to track this state.
4271  */
4272 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4273 {
4274 	u64 runtime, runtime_expires;
4275 	int throttled;
4276 
4277 	/* no need to continue the timer with no bandwidth constraint */
4278 	if (cfs_b->quota == RUNTIME_INF)
4279 		goto out_deactivate;
4280 
4281 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4282 	cfs_b->nr_periods += overrun;
4283 
4284 	/*
4285 	 * idle depends on !throttled (for the case of a large deficit), and if
4286 	 * we're going inactive then everything else can be deferred
4287 	 */
4288 	if (cfs_b->idle && !throttled)
4289 		goto out_deactivate;
4290 
4291 	__refill_cfs_bandwidth_runtime(cfs_b);
4292 
4293 	if (!throttled) {
4294 		/* mark as potentially idle for the upcoming period */
4295 		cfs_b->idle = 1;
4296 		return 0;
4297 	}
4298 
4299 	/* account preceding periods in which throttling occurred */
4300 	cfs_b->nr_throttled += overrun;
4301 
4302 	runtime_expires = cfs_b->runtime_expires;
4303 
4304 	/*
4305 	 * This check is repeated as we are holding onto the new bandwidth while
4306 	 * we unthrottle. This can potentially race with an unthrottled group
4307 	 * trying to acquire new bandwidth from the global pool. This can result
4308 	 * in us over-using our runtime if it is all used during this loop, but
4309 	 * only by limited amounts in that extreme case.
4310 	 */
4311 	while (throttled && cfs_b->runtime > 0) {
4312 		runtime = cfs_b->runtime;
4313 		raw_spin_unlock(&cfs_b->lock);
4314 		/* we can't nest cfs_b->lock while distributing bandwidth */
4315 		runtime = distribute_cfs_runtime(cfs_b, runtime,
4316 						 runtime_expires);
4317 		raw_spin_lock(&cfs_b->lock);
4318 
4319 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4320 
4321 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
4322 	}
4323 
4324 	/*
4325 	 * While we are ensured activity in the period following an
4326 	 * unthrottle, this also covers the case in which the new bandwidth is
4327 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
4328 	 * timer to remain active while there are any throttled entities.)
4329 	 */
4330 	cfs_b->idle = 0;
4331 
4332 	return 0;
4333 
4334 out_deactivate:
4335 	return 1;
4336 }
4337 
4338 /* a cfs_rq won't donate quota below this amount */
4339 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4340 /* minimum remaining period time to redistribute slack quota */
4341 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4342 /* how long we wait to gather additional slack before distributing */
4343 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4344 
4345 /*
4346  * Are we near the end of the current quota period?
4347  *
4348  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4349  * hrtimer base being cleared by hrtimer_start. In the case of
4350  * migrate_hrtimers, base is never cleared, so we are fine.
4351  */
4352 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4353 {
4354 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
4355 	u64 remaining;
4356 
4357 	/* if the call-back is running a quota refresh is already occurring */
4358 	if (hrtimer_callback_running(refresh_timer))
4359 		return 1;
4360 
4361 	/* is a quota refresh about to occur? */
4362 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4363 	if (remaining < min_expire)
4364 		return 1;
4365 
4366 	return 0;
4367 }
4368 
4369 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4370 {
4371 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4372 
4373 	/* if there's a quota refresh soon don't bother with slack */
4374 	if (runtime_refresh_within(cfs_b, min_left))
4375 		return;
4376 
4377 	hrtimer_start(&cfs_b->slack_timer,
4378 			ns_to_ktime(cfs_bandwidth_slack_period),
4379 			HRTIMER_MODE_REL);
4380 }
4381 
4382 /* we know any runtime found here is valid as update_curr() precedes return */
4383 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4384 {
4385 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4386 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4387 
4388 	if (slack_runtime <= 0)
4389 		return;
4390 
4391 	raw_spin_lock(&cfs_b->lock);
4392 	if (cfs_b->quota != RUNTIME_INF &&
4393 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4394 		cfs_b->runtime += slack_runtime;
4395 
4396 		/* we are under rq->lock, defer unthrottling using a timer */
4397 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4398 		    !list_empty(&cfs_b->throttled_cfs_rq))
4399 			start_cfs_slack_bandwidth(cfs_b);
4400 	}
4401 	raw_spin_unlock(&cfs_b->lock);
4402 
4403 	/* even if it's not valid for return we don't want to try again */
4404 	cfs_rq->runtime_remaining -= slack_runtime;
4405 }
4406 
4407 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4408 {
4409 	if (!cfs_bandwidth_used())
4410 		return;
4411 
4412 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4413 		return;
4414 
4415 	__return_cfs_rq_runtime(cfs_rq);
4416 }
4417 
4418 /*
4419  * This is done with a timer (instead of inline with bandwidth return) since
4420  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4421  */
4422 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4423 {
4424 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4425 	u64 expires;
4426 
4427 	/* confirm we're still not at a refresh boundary */
4428 	raw_spin_lock(&cfs_b->lock);
4429 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4430 		raw_spin_unlock(&cfs_b->lock);
4431 		return;
4432 	}
4433 
4434 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4435 		runtime = cfs_b->runtime;
4436 
4437 	expires = cfs_b->runtime_expires;
4438 	raw_spin_unlock(&cfs_b->lock);
4439 
4440 	if (!runtime)
4441 		return;
4442 
4443 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4444 
4445 	raw_spin_lock(&cfs_b->lock);
4446 	if (expires == cfs_b->runtime_expires)
4447 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
4448 	raw_spin_unlock(&cfs_b->lock);
4449 }
4450 
4451 /*
4452  * When a group wakes up we want to make sure that its quota is not already
4453  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4454  * runtime as update_curr() throttling can not not trigger until it's on-rq.
4455  */
4456 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4457 {
4458 	if (!cfs_bandwidth_used())
4459 		return;
4460 
4461 	/* an active group must be handled by the update_curr()->put() path */
4462 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4463 		return;
4464 
4465 	/* ensure the group is not already throttled */
4466 	if (cfs_rq_throttled(cfs_rq))
4467 		return;
4468 
4469 	/* update runtime allocation */
4470 	account_cfs_rq_runtime(cfs_rq, 0);
4471 	if (cfs_rq->runtime_remaining <= 0)
4472 		throttle_cfs_rq(cfs_rq);
4473 }
4474 
4475 static void sync_throttle(struct task_group *tg, int cpu)
4476 {
4477 	struct cfs_rq *pcfs_rq, *cfs_rq;
4478 
4479 	if (!cfs_bandwidth_used())
4480 		return;
4481 
4482 	if (!tg->parent)
4483 		return;
4484 
4485 	cfs_rq = tg->cfs_rq[cpu];
4486 	pcfs_rq = tg->parent->cfs_rq[cpu];
4487 
4488 	cfs_rq->throttle_count = pcfs_rq->throttle_count;
4489 	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4490 }
4491 
4492 /* conditionally throttle active cfs_rq's from put_prev_entity() */
4493 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4494 {
4495 	if (!cfs_bandwidth_used())
4496 		return false;
4497 
4498 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4499 		return false;
4500 
4501 	/*
4502 	 * it's possible for a throttled entity to be forced into a running
4503 	 * state (e.g. set_curr_task), in this case we're finished.
4504 	 */
4505 	if (cfs_rq_throttled(cfs_rq))
4506 		return true;
4507 
4508 	throttle_cfs_rq(cfs_rq);
4509 	return true;
4510 }
4511 
4512 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4513 {
4514 	struct cfs_bandwidth *cfs_b =
4515 		container_of(timer, struct cfs_bandwidth, slack_timer);
4516 
4517 	do_sched_cfs_slack_timer(cfs_b);
4518 
4519 	return HRTIMER_NORESTART;
4520 }
4521 
4522 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4523 {
4524 	struct cfs_bandwidth *cfs_b =
4525 		container_of(timer, struct cfs_bandwidth, period_timer);
4526 	int overrun;
4527 	int idle = 0;
4528 
4529 	raw_spin_lock(&cfs_b->lock);
4530 	for (;;) {
4531 		overrun = hrtimer_forward_now(timer, cfs_b->period);
4532 		if (!overrun)
4533 			break;
4534 
4535 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
4536 	}
4537 	if (idle)
4538 		cfs_b->period_active = 0;
4539 	raw_spin_unlock(&cfs_b->lock);
4540 
4541 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4542 }
4543 
4544 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4545 {
4546 	raw_spin_lock_init(&cfs_b->lock);
4547 	cfs_b->runtime = 0;
4548 	cfs_b->quota = RUNTIME_INF;
4549 	cfs_b->period = ns_to_ktime(default_cfs_period());
4550 
4551 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4552 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4553 	cfs_b->period_timer.function = sched_cfs_period_timer;
4554 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4555 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
4556 }
4557 
4558 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4559 {
4560 	cfs_rq->runtime_enabled = 0;
4561 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
4562 }
4563 
4564 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4565 {
4566 	lockdep_assert_held(&cfs_b->lock);
4567 
4568 	if (!cfs_b->period_active) {
4569 		cfs_b->period_active = 1;
4570 		hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4571 		hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4572 	}
4573 }
4574 
4575 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4576 {
4577 	/* init_cfs_bandwidth() was not called */
4578 	if (!cfs_b->throttled_cfs_rq.next)
4579 		return;
4580 
4581 	hrtimer_cancel(&cfs_b->period_timer);
4582 	hrtimer_cancel(&cfs_b->slack_timer);
4583 }
4584 
4585 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4586 {
4587 	struct cfs_rq *cfs_rq;
4588 
4589 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4590 		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4591 
4592 		raw_spin_lock(&cfs_b->lock);
4593 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4594 		raw_spin_unlock(&cfs_b->lock);
4595 	}
4596 }
4597 
4598 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4599 {
4600 	struct cfs_rq *cfs_rq;
4601 
4602 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4603 		if (!cfs_rq->runtime_enabled)
4604 			continue;
4605 
4606 		/*
4607 		 * clock_task is not advancing so we just need to make sure
4608 		 * there's some valid quota amount
4609 		 */
4610 		cfs_rq->runtime_remaining = 1;
4611 		/*
4612 		 * Offline rq is schedulable till cpu is completely disabled
4613 		 * in take_cpu_down(), so we prevent new cfs throttling here.
4614 		 */
4615 		cfs_rq->runtime_enabled = 0;
4616 
4617 		if (cfs_rq_throttled(cfs_rq))
4618 			unthrottle_cfs_rq(cfs_rq);
4619 	}
4620 }
4621 
4622 #else /* CONFIG_CFS_BANDWIDTH */
4623 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4624 {
4625 	return rq_clock_task(rq_of(cfs_rq));
4626 }
4627 
4628 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4629 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4630 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4631 static inline void sync_throttle(struct task_group *tg, int cpu) {}
4632 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4633 
4634 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4635 {
4636 	return 0;
4637 }
4638 
4639 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4640 {
4641 	return 0;
4642 }
4643 
4644 static inline int throttled_lb_pair(struct task_group *tg,
4645 				    int src_cpu, int dest_cpu)
4646 {
4647 	return 0;
4648 }
4649 
4650 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4651 
4652 #ifdef CONFIG_FAIR_GROUP_SCHED
4653 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4654 #endif
4655 
4656 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4657 {
4658 	return NULL;
4659 }
4660 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4661 static inline void update_runtime_enabled(struct rq *rq) {}
4662 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4663 
4664 #endif /* CONFIG_CFS_BANDWIDTH */
4665 
4666 /**************************************************
4667  * CFS operations on tasks:
4668  */
4669 
4670 #ifdef CONFIG_SCHED_HRTICK
4671 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4672 {
4673 	struct sched_entity *se = &p->se;
4674 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
4675 
4676 	SCHED_WARN_ON(task_rq(p) != rq);
4677 
4678 	if (rq->cfs.h_nr_running > 1) {
4679 		u64 slice = sched_slice(cfs_rq, se);
4680 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4681 		s64 delta = slice - ran;
4682 
4683 		if (delta < 0) {
4684 			if (rq->curr == p)
4685 				resched_curr(rq);
4686 			return;
4687 		}
4688 		hrtick_start(rq, delta);
4689 	}
4690 }
4691 
4692 /*
4693  * called from enqueue/dequeue and updates the hrtick when the
4694  * current task is from our class and nr_running is low enough
4695  * to matter.
4696  */
4697 static void hrtick_update(struct rq *rq)
4698 {
4699 	struct task_struct *curr = rq->curr;
4700 
4701 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4702 		return;
4703 
4704 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4705 		hrtick_start_fair(rq, curr);
4706 }
4707 #else /* !CONFIG_SCHED_HRTICK */
4708 static inline void
4709 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4710 {
4711 }
4712 
4713 static inline void hrtick_update(struct rq *rq)
4714 {
4715 }
4716 #endif
4717 
4718 /*
4719  * The enqueue_task method is called before nr_running is
4720  * increased. Here we update the fair scheduling stats and
4721  * then put the task into the rbtree:
4722  */
4723 static void
4724 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4725 {
4726 	struct cfs_rq *cfs_rq;
4727 	struct sched_entity *se = &p->se;
4728 
4729 	/*
4730 	 * If in_iowait is set, the code below may not trigger any cpufreq
4731 	 * utilization updates, so do it here explicitly with the IOWAIT flag
4732 	 * passed.
4733 	 */
4734 	if (p->in_iowait)
4735 		cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
4736 
4737 	for_each_sched_entity(se) {
4738 		if (se->on_rq)
4739 			break;
4740 		cfs_rq = cfs_rq_of(se);
4741 		enqueue_entity(cfs_rq, se, flags);
4742 
4743 		/*
4744 		 * end evaluation on encountering a throttled cfs_rq
4745 		 *
4746 		 * note: in the case of encountering a throttled cfs_rq we will
4747 		 * post the final h_nr_running increment below.
4748 		 */
4749 		if (cfs_rq_throttled(cfs_rq))
4750 			break;
4751 		cfs_rq->h_nr_running++;
4752 
4753 		flags = ENQUEUE_WAKEUP;
4754 	}
4755 
4756 	for_each_sched_entity(se) {
4757 		cfs_rq = cfs_rq_of(se);
4758 		cfs_rq->h_nr_running++;
4759 
4760 		if (cfs_rq_throttled(cfs_rq))
4761 			break;
4762 
4763 		update_load_avg(se, UPDATE_TG);
4764 		update_cfs_shares(cfs_rq);
4765 	}
4766 
4767 	if (!se)
4768 		add_nr_running(rq, 1);
4769 
4770 	hrtick_update(rq);
4771 }
4772 
4773 static void set_next_buddy(struct sched_entity *se);
4774 
4775 /*
4776  * The dequeue_task method is called before nr_running is
4777  * decreased. We remove the task from the rbtree and
4778  * update the fair scheduling stats:
4779  */
4780 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4781 {
4782 	struct cfs_rq *cfs_rq;
4783 	struct sched_entity *se = &p->se;
4784 	int task_sleep = flags & DEQUEUE_SLEEP;
4785 
4786 	for_each_sched_entity(se) {
4787 		cfs_rq = cfs_rq_of(se);
4788 		dequeue_entity(cfs_rq, se, flags);
4789 
4790 		/*
4791 		 * end evaluation on encountering a throttled cfs_rq
4792 		 *
4793 		 * note: in the case of encountering a throttled cfs_rq we will
4794 		 * post the final h_nr_running decrement below.
4795 		*/
4796 		if (cfs_rq_throttled(cfs_rq))
4797 			break;
4798 		cfs_rq->h_nr_running--;
4799 
4800 		/* Don't dequeue parent if it has other entities besides us */
4801 		if (cfs_rq->load.weight) {
4802 			/* Avoid re-evaluating load for this entity: */
4803 			se = parent_entity(se);
4804 			/*
4805 			 * Bias pick_next to pick a task from this cfs_rq, as
4806 			 * p is sleeping when it is within its sched_slice.
4807 			 */
4808 			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4809 				set_next_buddy(se);
4810 			break;
4811 		}
4812 		flags |= DEQUEUE_SLEEP;
4813 	}
4814 
4815 	for_each_sched_entity(se) {
4816 		cfs_rq = cfs_rq_of(se);
4817 		cfs_rq->h_nr_running--;
4818 
4819 		if (cfs_rq_throttled(cfs_rq))
4820 			break;
4821 
4822 		update_load_avg(se, UPDATE_TG);
4823 		update_cfs_shares(cfs_rq);
4824 	}
4825 
4826 	if (!se)
4827 		sub_nr_running(rq, 1);
4828 
4829 	hrtick_update(rq);
4830 }
4831 
4832 #ifdef CONFIG_SMP
4833 
4834 /* Working cpumask for: load_balance, load_balance_newidle. */
4835 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4836 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
4837 
4838 #ifdef CONFIG_NO_HZ_COMMON
4839 /*
4840  * per rq 'load' arrray crap; XXX kill this.
4841  */
4842 
4843 /*
4844  * The exact cpuload calculated at every tick would be:
4845  *
4846  *   load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4847  *
4848  * If a cpu misses updates for n ticks (as it was idle) and update gets
4849  * called on the n+1-th tick when cpu may be busy, then we have:
4850  *
4851  *   load_n   = (1 - 1/2^i)^n * load_0
4852  *   load_n+1 = (1 - 1/2^i)   * load_n + (1/2^i) * cur_load
4853  *
4854  * decay_load_missed() below does efficient calculation of
4855  *
4856  *   load' = (1 - 1/2^i)^n * load
4857  *
4858  * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4859  * This allows us to precompute the above in said factors, thereby allowing the
4860  * reduction of an arbitrary n in O(log_2 n) steps. (See also
4861  * fixed_power_int())
4862  *
4863  * The calculation is approximated on a 128 point scale.
4864  */
4865 #define DEGRADE_SHIFT		7
4866 
4867 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4868 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4869 	{   0,   0,  0,  0,  0,  0, 0, 0 },
4870 	{  64,  32,  8,  0,  0,  0, 0, 0 },
4871 	{  96,  72, 40, 12,  1,  0, 0, 0 },
4872 	{ 112,  98, 75, 43, 15,  1, 0, 0 },
4873 	{ 120, 112, 98, 76, 45, 16, 2, 0 }
4874 };
4875 
4876 /*
4877  * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4878  * would be when CPU is idle and so we just decay the old load without
4879  * adding any new load.
4880  */
4881 static unsigned long
4882 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4883 {
4884 	int j = 0;
4885 
4886 	if (!missed_updates)
4887 		return load;
4888 
4889 	if (missed_updates >= degrade_zero_ticks[idx])
4890 		return 0;
4891 
4892 	if (idx == 1)
4893 		return load >> missed_updates;
4894 
4895 	while (missed_updates) {
4896 		if (missed_updates % 2)
4897 			load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4898 
4899 		missed_updates >>= 1;
4900 		j++;
4901 	}
4902 	return load;
4903 }
4904 #endif /* CONFIG_NO_HZ_COMMON */
4905 
4906 /**
4907  * __cpu_load_update - update the rq->cpu_load[] statistics
4908  * @this_rq: The rq to update statistics for
4909  * @this_load: The current load
4910  * @pending_updates: The number of missed updates
4911  *
4912  * Update rq->cpu_load[] statistics. This function is usually called every
4913  * scheduler tick (TICK_NSEC).
4914  *
4915  * This function computes a decaying average:
4916  *
4917  *   load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4918  *
4919  * Because of NOHZ it might not get called on every tick which gives need for
4920  * the @pending_updates argument.
4921  *
4922  *   load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4923  *             = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4924  *             = A * (A * load[i]_n-2 + B) + B
4925  *             = A * (A * (A * load[i]_n-3 + B) + B) + B
4926  *             = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
4927  *             = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
4928  *             = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
4929  *             = (1 - 1/2^i)^n * (load[i]_0 - load) + load
4930  *
4931  * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
4932  * any change in load would have resulted in the tick being turned back on.
4933  *
4934  * For regular NOHZ, this reduces to:
4935  *
4936  *   load[i]_n = (1 - 1/2^i)^n * load[i]_0
4937  *
4938  * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
4939  * term.
4940  */
4941 static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4942 			    unsigned long pending_updates)
4943 {
4944 	unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
4945 	int i, scale;
4946 
4947 	this_rq->nr_load_updates++;
4948 
4949 	/* Update our load: */
4950 	this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4951 	for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4952 		unsigned long old_load, new_load;
4953 
4954 		/* scale is effectively 1 << i now, and >> i divides by scale */
4955 
4956 		old_load = this_rq->cpu_load[i];
4957 #ifdef CONFIG_NO_HZ_COMMON
4958 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
4959 		if (tickless_load) {
4960 			old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4961 			/*
4962 			 * old_load can never be a negative value because a
4963 			 * decayed tickless_load cannot be greater than the
4964 			 * original tickless_load.
4965 			 */
4966 			old_load += tickless_load;
4967 		}
4968 #endif
4969 		new_load = this_load;
4970 		/*
4971 		 * Round up the averaging division if load is increasing. This
4972 		 * prevents us from getting stuck on 9 if the load is 10, for
4973 		 * example.
4974 		 */
4975 		if (new_load > old_load)
4976 			new_load += scale - 1;
4977 
4978 		this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4979 	}
4980 
4981 	sched_avg_update(this_rq);
4982 }
4983 
4984 /* Used instead of source_load when we know the type == 0 */
4985 static unsigned long weighted_cpuload(const int cpu)
4986 {
4987 	return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4988 }
4989 
4990 #ifdef CONFIG_NO_HZ_COMMON
4991 /*
4992  * There is no sane way to deal with nohz on smp when using jiffies because the
4993  * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4994  * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4995  *
4996  * Therefore we need to avoid the delta approach from the regular tick when
4997  * possible since that would seriously skew the load calculation. This is why we
4998  * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
4999  * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
5000  * loop exit, nohz_idle_balance, nohz full exit...)
5001  *
5002  * This means we might still be one tick off for nohz periods.
5003  */
5004 
5005 static void cpu_load_update_nohz(struct rq *this_rq,
5006 				 unsigned long curr_jiffies,
5007 				 unsigned long load)
5008 {
5009 	unsigned long pending_updates;
5010 
5011 	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
5012 	if (pending_updates) {
5013 		this_rq->last_load_update_tick = curr_jiffies;
5014 		/*
5015 		 * In the regular NOHZ case, we were idle, this means load 0.
5016 		 * In the NOHZ_FULL case, we were non-idle, we should consider
5017 		 * its weighted load.
5018 		 */
5019 		cpu_load_update(this_rq, load, pending_updates);
5020 	}
5021 }
5022 
5023 /*
5024  * Called from nohz_idle_balance() to update the load ratings before doing the
5025  * idle balance.
5026  */
5027 static void cpu_load_update_idle(struct rq *this_rq)
5028 {
5029 	/*
5030 	 * bail if there's load or we're actually up-to-date.
5031 	 */
5032 	if (weighted_cpuload(cpu_of(this_rq)))
5033 		return;
5034 
5035 	cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
5036 }
5037 
5038 /*
5039  * Record CPU load on nohz entry so we know the tickless load to account
5040  * on nohz exit. cpu_load[0] happens then to be updated more frequently
5041  * than other cpu_load[idx] but it should be fine as cpu_load readers
5042  * shouldn't rely into synchronized cpu_load[*] updates.
5043  */
5044 void cpu_load_update_nohz_start(void)
5045 {
5046 	struct rq *this_rq = this_rq();
5047 
5048 	/*
5049 	 * This is all lockless but should be fine. If weighted_cpuload changes
5050 	 * concurrently we'll exit nohz. And cpu_load write can race with
5051 	 * cpu_load_update_idle() but both updater would be writing the same.
5052 	 */
5053 	this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
5054 }
5055 
5056 /*
5057  * Account the tickless load in the end of a nohz frame.
5058  */
5059 void cpu_load_update_nohz_stop(void)
5060 {
5061 	unsigned long curr_jiffies = READ_ONCE(jiffies);
5062 	struct rq *this_rq = this_rq();
5063 	unsigned long load;
5064 
5065 	if (curr_jiffies == this_rq->last_load_update_tick)
5066 		return;
5067 
5068 	load = weighted_cpuload(cpu_of(this_rq));
5069 	raw_spin_lock(&this_rq->lock);
5070 	update_rq_clock(this_rq);
5071 	cpu_load_update_nohz(this_rq, curr_jiffies, load);
5072 	raw_spin_unlock(&this_rq->lock);
5073 }
5074 #else /* !CONFIG_NO_HZ_COMMON */
5075 static inline void cpu_load_update_nohz(struct rq *this_rq,
5076 					unsigned long curr_jiffies,
5077 					unsigned long load) { }
5078 #endif /* CONFIG_NO_HZ_COMMON */
5079 
5080 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
5081 {
5082 #ifdef CONFIG_NO_HZ_COMMON
5083 	/* See the mess around cpu_load_update_nohz(). */
5084 	this_rq->last_load_update_tick = READ_ONCE(jiffies);
5085 #endif
5086 	cpu_load_update(this_rq, load, 1);
5087 }
5088 
5089 /*
5090  * Called from scheduler_tick()
5091  */
5092 void cpu_load_update_active(struct rq *this_rq)
5093 {
5094 	unsigned long load = weighted_cpuload(cpu_of(this_rq));
5095 
5096 	if (tick_nohz_tick_stopped())
5097 		cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
5098 	else
5099 		cpu_load_update_periodic(this_rq, load);
5100 }
5101 
5102 /*
5103  * Return a low guess at the load of a migration-source cpu weighted
5104  * according to the scheduling class and "nice" value.
5105  *
5106  * We want to under-estimate the load of migration sources, to
5107  * balance conservatively.
5108  */
5109 static unsigned long source_load(int cpu, int type)
5110 {
5111 	struct rq *rq = cpu_rq(cpu);
5112 	unsigned long total = weighted_cpuload(cpu);
5113 
5114 	if (type == 0 || !sched_feat(LB_BIAS))
5115 		return total;
5116 
5117 	return min(rq->cpu_load[type-1], total);
5118 }
5119 
5120 /*
5121  * Return a high guess at the load of a migration-target cpu weighted
5122  * according to the scheduling class and "nice" value.
5123  */
5124 static unsigned long target_load(int cpu, int type)
5125 {
5126 	struct rq *rq = cpu_rq(cpu);
5127 	unsigned long total = weighted_cpuload(cpu);
5128 
5129 	if (type == 0 || !sched_feat(LB_BIAS))
5130 		return total;
5131 
5132 	return max(rq->cpu_load[type-1], total);
5133 }
5134 
5135 static unsigned long capacity_of(int cpu)
5136 {
5137 	return cpu_rq(cpu)->cpu_capacity;
5138 }
5139 
5140 static unsigned long capacity_orig_of(int cpu)
5141 {
5142 	return cpu_rq(cpu)->cpu_capacity_orig;
5143 }
5144 
5145 static unsigned long cpu_avg_load_per_task(int cpu)
5146 {
5147 	struct rq *rq = cpu_rq(cpu);
5148 	unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
5149 	unsigned long load_avg = weighted_cpuload(cpu);
5150 
5151 	if (nr_running)
5152 		return load_avg / nr_running;
5153 
5154 	return 0;
5155 }
5156 
5157 #ifdef CONFIG_FAIR_GROUP_SCHED
5158 /*
5159  * effective_load() calculates the load change as seen from the root_task_group
5160  *
5161  * Adding load to a group doesn't make a group heavier, but can cause movement
5162  * of group shares between cpus. Assuming the shares were perfectly aligned one
5163  * can calculate the shift in shares.
5164  *
5165  * Calculate the effective load difference if @wl is added (subtracted) to @tg
5166  * on this @cpu and results in a total addition (subtraction) of @wg to the
5167  * total group weight.
5168  *
5169  * Given a runqueue weight distribution (rw_i) we can compute a shares
5170  * distribution (s_i) using:
5171  *
5172  *   s_i = rw_i / \Sum rw_j						(1)
5173  *
5174  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
5175  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
5176  * shares distribution (s_i):
5177  *
5178  *   rw_i = {   2,   4,   1,   0 }
5179  *   s_i  = { 2/7, 4/7, 1/7,   0 }
5180  *
5181  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
5182  * task used to run on and the CPU the waker is running on), we need to
5183  * compute the effect of waking a task on either CPU and, in case of a sync
5184  * wakeup, compute the effect of the current task going to sleep.
5185  *
5186  * So for a change of @wl to the local @cpu with an overall group weight change
5187  * of @wl we can compute the new shares distribution (s'_i) using:
5188  *
5189  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
5190  *
5191  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
5192  * differences in waking a task to CPU 0. The additional task changes the
5193  * weight and shares distributions like:
5194  *
5195  *   rw'_i = {   3,   4,   1,   0 }
5196  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
5197  *
5198  * We can then compute the difference in effective weight by using:
5199  *
5200  *   dw_i = S * (s'_i - s_i)						(3)
5201  *
5202  * Where 'S' is the group weight as seen by its parent.
5203  *
5204  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5205  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5206  * 4/7) times the weight of the group.
5207  */
5208 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5209 {
5210 	struct sched_entity *se = tg->se[cpu];
5211 
5212 	if (!tg->parent)	/* the trivial, non-cgroup case */
5213 		return wl;
5214 
5215 	for_each_sched_entity(se) {
5216 		struct cfs_rq *cfs_rq = se->my_q;
5217 		long W, w = cfs_rq_load_avg(cfs_rq);
5218 
5219 		tg = cfs_rq->tg;
5220 
5221 		/*
5222 		 * W = @wg + \Sum rw_j
5223 		 */
5224 		W = wg + atomic_long_read(&tg->load_avg);
5225 
5226 		/* Ensure \Sum rw_j >= rw_i */
5227 		W -= cfs_rq->tg_load_avg_contrib;
5228 		W += w;
5229 
5230 		/*
5231 		 * w = rw_i + @wl
5232 		 */
5233 		w += wl;
5234 
5235 		/*
5236 		 * wl = S * s'_i; see (2)
5237 		 */
5238 		if (W > 0 && w < W)
5239 			wl = (w * (long)scale_load_down(tg->shares)) / W;
5240 		else
5241 			wl = scale_load_down(tg->shares);
5242 
5243 		/*
5244 		 * Per the above, wl is the new se->load.weight value; since
5245 		 * those are clipped to [MIN_SHARES, ...) do so now. See
5246 		 * calc_cfs_shares().
5247 		 */
5248 		if (wl < MIN_SHARES)
5249 			wl = MIN_SHARES;
5250 
5251 		/*
5252 		 * wl = dw_i = S * (s'_i - s_i); see (3)
5253 		 */
5254 		wl -= se->avg.load_avg;
5255 
5256 		/*
5257 		 * Recursively apply this logic to all parent groups to compute
5258 		 * the final effective load change on the root group. Since
5259 		 * only the @tg group gets extra weight, all parent groups can
5260 		 * only redistribute existing shares. @wl is the shift in shares
5261 		 * resulting from this level per the above.
5262 		 */
5263 		wg = 0;
5264 	}
5265 
5266 	return wl;
5267 }
5268 #else
5269 
5270 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5271 {
5272 	return wl;
5273 }
5274 
5275 #endif
5276 
5277 static void record_wakee(struct task_struct *p)
5278 {
5279 	/*
5280 	 * Only decay a single time; tasks that have less then 1 wakeup per
5281 	 * jiffy will not have built up many flips.
5282 	 */
5283 	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5284 		current->wakee_flips >>= 1;
5285 		current->wakee_flip_decay_ts = jiffies;
5286 	}
5287 
5288 	if (current->last_wakee != p) {
5289 		current->last_wakee = p;
5290 		current->wakee_flips++;
5291 	}
5292 }
5293 
5294 /*
5295  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5296  *
5297  * A waker of many should wake a different task than the one last awakened
5298  * at a frequency roughly N times higher than one of its wakees.
5299  *
5300  * In order to determine whether we should let the load spread vs consolidating
5301  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5302  * partner, and a factor of lls_size higher frequency in the other.
5303  *
5304  * With both conditions met, we can be relatively sure that the relationship is
5305  * non-monogamous, with partner count exceeding socket size.
5306  *
5307  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5308  * whatever is irrelevant, spread criteria is apparent partner count exceeds
5309  * socket size.
5310  */
5311 static int wake_wide(struct task_struct *p)
5312 {
5313 	unsigned int master = current->wakee_flips;
5314 	unsigned int slave = p->wakee_flips;
5315 	int factor = this_cpu_read(sd_llc_size);
5316 
5317 	if (master < slave)
5318 		swap(master, slave);
5319 	if (slave < factor || master < slave * factor)
5320 		return 0;
5321 	return 1;
5322 }
5323 
5324 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5325 		       int prev_cpu, int sync)
5326 {
5327 	s64 this_load, load;
5328 	s64 this_eff_load, prev_eff_load;
5329 	int idx, this_cpu;
5330 	struct task_group *tg;
5331 	unsigned long weight;
5332 	int balanced;
5333 
5334 	idx	  = sd->wake_idx;
5335 	this_cpu  = smp_processor_id();
5336 	load	  = source_load(prev_cpu, idx);
5337 	this_load = target_load(this_cpu, idx);
5338 
5339 	/*
5340 	 * If sync wakeup then subtract the (maximum possible)
5341 	 * effect of the currently running task from the load
5342 	 * of the current CPU:
5343 	 */
5344 	if (sync) {
5345 		tg = task_group(current);
5346 		weight = current->se.avg.load_avg;
5347 
5348 		this_load += effective_load(tg, this_cpu, -weight, -weight);
5349 		load += effective_load(tg, prev_cpu, 0, -weight);
5350 	}
5351 
5352 	tg = task_group(p);
5353 	weight = p->se.avg.load_avg;
5354 
5355 	/*
5356 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
5357 	 * due to the sync cause above having dropped this_load to 0, we'll
5358 	 * always have an imbalance, but there's really nothing you can do
5359 	 * about that, so that's good too.
5360 	 *
5361 	 * Otherwise check if either cpus are near enough in load to allow this
5362 	 * task to be woken on this_cpu.
5363 	 */
5364 	this_eff_load = 100;
5365 	this_eff_load *= capacity_of(prev_cpu);
5366 
5367 	prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5368 	prev_eff_load *= capacity_of(this_cpu);
5369 
5370 	if (this_load > 0) {
5371 		this_eff_load *= this_load +
5372 			effective_load(tg, this_cpu, weight, weight);
5373 
5374 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
5375 	}
5376 
5377 	balanced = this_eff_load <= prev_eff_load;
5378 
5379 	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5380 
5381 	if (!balanced)
5382 		return 0;
5383 
5384 	schedstat_inc(sd->ttwu_move_affine);
5385 	schedstat_inc(p->se.statistics.nr_wakeups_affine);
5386 
5387 	return 1;
5388 }
5389 
5390 static inline int task_util(struct task_struct *p);
5391 static int cpu_util_wake(int cpu, struct task_struct *p);
5392 
5393 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
5394 {
5395 	return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
5396 }
5397 
5398 /*
5399  * find_idlest_group finds and returns the least busy CPU group within the
5400  * domain.
5401  */
5402 static struct sched_group *
5403 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5404 		  int this_cpu, int sd_flag)
5405 {
5406 	struct sched_group *idlest = NULL, *group = sd->groups;
5407 	struct sched_group *most_spare_sg = NULL;
5408 	unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0;
5409 	unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0;
5410 	unsigned long most_spare = 0, this_spare = 0;
5411 	int load_idx = sd->forkexec_idx;
5412 	int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
5413 	unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
5414 				(sd->imbalance_pct-100) / 100;
5415 
5416 	if (sd_flag & SD_BALANCE_WAKE)
5417 		load_idx = sd->wake_idx;
5418 
5419 	do {
5420 		unsigned long load, avg_load, runnable_load;
5421 		unsigned long spare_cap, max_spare_cap;
5422 		int local_group;
5423 		int i;
5424 
5425 		/* Skip over this group if it has no CPUs allowed */
5426 		if (!cpumask_intersects(sched_group_cpus(group),
5427 					tsk_cpus_allowed(p)))
5428 			continue;
5429 
5430 		local_group = cpumask_test_cpu(this_cpu,
5431 					       sched_group_cpus(group));
5432 
5433 		/*
5434 		 * Tally up the load of all CPUs in the group and find
5435 		 * the group containing the CPU with most spare capacity.
5436 		 */
5437 		avg_load = 0;
5438 		runnable_load = 0;
5439 		max_spare_cap = 0;
5440 
5441 		for_each_cpu(i, sched_group_cpus(group)) {
5442 			/* Bias balancing toward cpus of our domain */
5443 			if (local_group)
5444 				load = source_load(i, load_idx);
5445 			else
5446 				load = target_load(i, load_idx);
5447 
5448 			runnable_load += load;
5449 
5450 			avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
5451 
5452 			spare_cap = capacity_spare_wake(i, p);
5453 
5454 			if (spare_cap > max_spare_cap)
5455 				max_spare_cap = spare_cap;
5456 		}
5457 
5458 		/* Adjust by relative CPU capacity of the group */
5459 		avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
5460 					group->sgc->capacity;
5461 		runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
5462 					group->sgc->capacity;
5463 
5464 		if (local_group) {
5465 			this_runnable_load = runnable_load;
5466 			this_avg_load = avg_load;
5467 			this_spare = max_spare_cap;
5468 		} else {
5469 			if (min_runnable_load > (runnable_load + imbalance)) {
5470 				/*
5471 				 * The runnable load is significantly smaller
5472 				 * so we can pick this new cpu
5473 				 */
5474 				min_runnable_load = runnable_load;
5475 				min_avg_load = avg_load;
5476 				idlest = group;
5477 			} else if ((runnable_load < (min_runnable_load + imbalance)) &&
5478 				   (100*min_avg_load > imbalance_scale*avg_load)) {
5479 				/*
5480 				 * The runnable loads are close so take the
5481 				 * blocked load into account through avg_load.
5482 				 */
5483 				min_avg_load = avg_load;
5484 				idlest = group;
5485 			}
5486 
5487 			if (most_spare < max_spare_cap) {
5488 				most_spare = max_spare_cap;
5489 				most_spare_sg = group;
5490 			}
5491 		}
5492 	} while (group = group->next, group != sd->groups);
5493 
5494 	/*
5495 	 * The cross-over point between using spare capacity or least load
5496 	 * is too conservative for high utilization tasks on partially
5497 	 * utilized systems if we require spare_capacity > task_util(p),
5498 	 * so we allow for some task stuffing by using
5499 	 * spare_capacity > task_util(p)/2.
5500 	 *
5501 	 * Spare capacity can't be used for fork because the utilization has
5502 	 * not been set yet, we must first select a rq to compute the initial
5503 	 * utilization.
5504 	 */
5505 	if (sd_flag & SD_BALANCE_FORK)
5506 		goto skip_spare;
5507 
5508 	if (this_spare > task_util(p) / 2 &&
5509 	    imbalance_scale*this_spare > 100*most_spare)
5510 		return NULL;
5511 
5512 	if (most_spare > task_util(p) / 2)
5513 		return most_spare_sg;
5514 
5515 skip_spare:
5516 	if (!idlest)
5517 		return NULL;
5518 
5519 	if (min_runnable_load > (this_runnable_load + imbalance))
5520 		return NULL;
5521 
5522 	if ((this_runnable_load < (min_runnable_load + imbalance)) &&
5523 	     (100*this_avg_load < imbalance_scale*min_avg_load))
5524 		return NULL;
5525 
5526 	return idlest;
5527 }
5528 
5529 /*
5530  * find_idlest_cpu - find the idlest cpu among the cpus in group.
5531  */
5532 static int
5533 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5534 {
5535 	unsigned long load, min_load = ULONG_MAX;
5536 	unsigned int min_exit_latency = UINT_MAX;
5537 	u64 latest_idle_timestamp = 0;
5538 	int least_loaded_cpu = this_cpu;
5539 	int shallowest_idle_cpu = -1;
5540 	int i;
5541 
5542 	/* Check if we have any choice: */
5543 	if (group->group_weight == 1)
5544 		return cpumask_first(sched_group_cpus(group));
5545 
5546 	/* Traverse only the allowed CPUs */
5547 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
5548 		if (idle_cpu(i)) {
5549 			struct rq *rq = cpu_rq(i);
5550 			struct cpuidle_state *idle = idle_get_state(rq);
5551 			if (idle && idle->exit_latency < min_exit_latency) {
5552 				/*
5553 				 * We give priority to a CPU whose idle state
5554 				 * has the smallest exit latency irrespective
5555 				 * of any idle timestamp.
5556 				 */
5557 				min_exit_latency = idle->exit_latency;
5558 				latest_idle_timestamp = rq->idle_stamp;
5559 				shallowest_idle_cpu = i;
5560 			} else if ((!idle || idle->exit_latency == min_exit_latency) &&
5561 				   rq->idle_stamp > latest_idle_timestamp) {
5562 				/*
5563 				 * If equal or no active idle state, then
5564 				 * the most recently idled CPU might have
5565 				 * a warmer cache.
5566 				 */
5567 				latest_idle_timestamp = rq->idle_stamp;
5568 				shallowest_idle_cpu = i;
5569 			}
5570 		} else if (shallowest_idle_cpu == -1) {
5571 			load = weighted_cpuload(i);
5572 			if (load < min_load || (load == min_load && i == this_cpu)) {
5573 				min_load = load;
5574 				least_loaded_cpu = i;
5575 			}
5576 		}
5577 	}
5578 
5579 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5580 }
5581 
5582 /*
5583  * Implement a for_each_cpu() variant that starts the scan at a given cpu
5584  * (@start), and wraps around.
5585  *
5586  * This is used to scan for idle CPUs; such that not all CPUs looking for an
5587  * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5588  * through the LLC domain.
5589  *
5590  * Especially tbench is found sensitive to this.
5591  */
5592 
5593 static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5594 {
5595 	int next;
5596 
5597 again:
5598 	next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5599 
5600 	if (*wrapped) {
5601 		if (next >= start)
5602 			return nr_cpumask_bits;
5603 	} else {
5604 		if (next >= nr_cpumask_bits) {
5605 			*wrapped = 1;
5606 			n = -1;
5607 			goto again;
5608 		}
5609 	}
5610 
5611 	return next;
5612 }
5613 
5614 #define for_each_cpu_wrap(cpu, mask, start, wrap)				\
5615 	for ((wrap) = 0, (cpu) = (start)-1;					\
5616 		(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),	\
5617 		(cpu) < nr_cpumask_bits; )
5618 
5619 #ifdef CONFIG_SCHED_SMT
5620 
5621 static inline void set_idle_cores(int cpu, int val)
5622 {
5623 	struct sched_domain_shared *sds;
5624 
5625 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5626 	if (sds)
5627 		WRITE_ONCE(sds->has_idle_cores, val);
5628 }
5629 
5630 static inline bool test_idle_cores(int cpu, bool def)
5631 {
5632 	struct sched_domain_shared *sds;
5633 
5634 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5635 	if (sds)
5636 		return READ_ONCE(sds->has_idle_cores);
5637 
5638 	return def;
5639 }
5640 
5641 /*
5642  * Scans the local SMT mask to see if the entire core is idle, and records this
5643  * information in sd_llc_shared->has_idle_cores.
5644  *
5645  * Since SMT siblings share all cache levels, inspecting this limited remote
5646  * state should be fairly cheap.
5647  */
5648 void __update_idle_core(struct rq *rq)
5649 {
5650 	int core = cpu_of(rq);
5651 	int cpu;
5652 
5653 	rcu_read_lock();
5654 	if (test_idle_cores(core, true))
5655 		goto unlock;
5656 
5657 	for_each_cpu(cpu, cpu_smt_mask(core)) {
5658 		if (cpu == core)
5659 			continue;
5660 
5661 		if (!idle_cpu(cpu))
5662 			goto unlock;
5663 	}
5664 
5665 	set_idle_cores(core, 1);
5666 unlock:
5667 	rcu_read_unlock();
5668 }
5669 
5670 /*
5671  * Scan the entire LLC domain for idle cores; this dynamically switches off if
5672  * there are no idle cores left in the system; tracked through
5673  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5674  */
5675 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5676 {
5677 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5678 	int core, cpu, wrap;
5679 
5680 	if (!static_branch_likely(&sched_smt_present))
5681 		return -1;
5682 
5683 	if (!test_idle_cores(target, false))
5684 		return -1;
5685 
5686 	cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
5687 
5688 	for_each_cpu_wrap(core, cpus, target, wrap) {
5689 		bool idle = true;
5690 
5691 		for_each_cpu(cpu, cpu_smt_mask(core)) {
5692 			cpumask_clear_cpu(cpu, cpus);
5693 			if (!idle_cpu(cpu))
5694 				idle = false;
5695 		}
5696 
5697 		if (idle)
5698 			return core;
5699 	}
5700 
5701 	/*
5702 	 * Failed to find an idle core; stop looking for one.
5703 	 */
5704 	set_idle_cores(target, 0);
5705 
5706 	return -1;
5707 }
5708 
5709 /*
5710  * Scan the local SMT mask for idle CPUs.
5711  */
5712 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5713 {
5714 	int cpu;
5715 
5716 	if (!static_branch_likely(&sched_smt_present))
5717 		return -1;
5718 
5719 	for_each_cpu(cpu, cpu_smt_mask(target)) {
5720 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5721 			continue;
5722 		if (idle_cpu(cpu))
5723 			return cpu;
5724 	}
5725 
5726 	return -1;
5727 }
5728 
5729 #else /* CONFIG_SCHED_SMT */
5730 
5731 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5732 {
5733 	return -1;
5734 }
5735 
5736 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5737 {
5738 	return -1;
5739 }
5740 
5741 #endif /* CONFIG_SCHED_SMT */
5742 
5743 /*
5744  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
5745  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
5746  * average idle time for this rq (as found in rq->avg_idle).
5747  */
5748 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5749 {
5750 	struct sched_domain *this_sd;
5751 	u64 avg_cost, avg_idle = this_rq()->avg_idle;
5752 	u64 time, cost;
5753 	s64 delta;
5754 	int cpu, wrap;
5755 
5756 	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5757 	if (!this_sd)
5758 		return -1;
5759 
5760 	avg_cost = this_sd->avg_scan_cost;
5761 
5762 	/*
5763 	 * Due to large variance we need a large fuzz factor; hackbench in
5764 	 * particularly is sensitive here.
5765 	 */
5766 	if ((avg_idle / 512) < avg_cost)
5767 		return -1;
5768 
5769 	time = local_clock();
5770 
5771 	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5772 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5773 			continue;
5774 		if (idle_cpu(cpu))
5775 			break;
5776 	}
5777 
5778 	time = local_clock() - time;
5779 	cost = this_sd->avg_scan_cost;
5780 	delta = (s64)(time - cost) / 8;
5781 	this_sd->avg_scan_cost += delta;
5782 
5783 	return cpu;
5784 }
5785 
5786 /*
5787  * Try and locate an idle core/thread in the LLC cache domain.
5788  */
5789 static int select_idle_sibling(struct task_struct *p, int prev, int target)
5790 {
5791 	struct sched_domain *sd;
5792 	int i;
5793 
5794 	if (idle_cpu(target))
5795 		return target;
5796 
5797 	/*
5798 	 * If the previous cpu is cache affine and idle, don't be stupid.
5799 	 */
5800 	if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
5801 		return prev;
5802 
5803 	sd = rcu_dereference(per_cpu(sd_llc, target));
5804 	if (!sd)
5805 		return target;
5806 
5807 	i = select_idle_core(p, sd, target);
5808 	if ((unsigned)i < nr_cpumask_bits)
5809 		return i;
5810 
5811 	i = select_idle_cpu(p, sd, target);
5812 	if ((unsigned)i < nr_cpumask_bits)
5813 		return i;
5814 
5815 	i = select_idle_smt(p, sd, target);
5816 	if ((unsigned)i < nr_cpumask_bits)
5817 		return i;
5818 
5819 	return target;
5820 }
5821 
5822 /*
5823  * cpu_util returns the amount of capacity of a CPU that is used by CFS
5824  * tasks. The unit of the return value must be the one of capacity so we can
5825  * compare the utilization with the capacity of the CPU that is available for
5826  * CFS task (ie cpu_capacity).
5827  *
5828  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5829  * recent utilization of currently non-runnable tasks on a CPU. It represents
5830  * the amount of utilization of a CPU in the range [0..capacity_orig] where
5831  * capacity_orig is the cpu_capacity available at the highest frequency
5832  * (arch_scale_freq_capacity()).
5833  * The utilization of a CPU converges towards a sum equal to or less than the
5834  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5835  * the running time on this CPU scaled by capacity_curr.
5836  *
5837  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5838  * higher than capacity_orig because of unfortunate rounding in
5839  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5840  * the average stabilizes with the new running time. We need to check that the
5841  * utilization stays within the range of [0..capacity_orig] and cap it if
5842  * necessary. Without utilization capping, a group could be seen as overloaded
5843  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5844  * available capacity. We allow utilization to overshoot capacity_curr (but not
5845  * capacity_orig) as it useful for predicting the capacity required after task
5846  * migrations (scheduler-driven DVFS).
5847  */
5848 static int cpu_util(int cpu)
5849 {
5850 	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5851 	unsigned long capacity = capacity_orig_of(cpu);
5852 
5853 	return (util >= capacity) ? capacity : util;
5854 }
5855 
5856 static inline int task_util(struct task_struct *p)
5857 {
5858 	return p->se.avg.util_avg;
5859 }
5860 
5861 /*
5862  * cpu_util_wake: Compute cpu utilization with any contributions from
5863  * the waking task p removed.
5864  */
5865 static int cpu_util_wake(int cpu, struct task_struct *p)
5866 {
5867 	unsigned long util, capacity;
5868 
5869 	/* Task has no contribution or is new */
5870 	if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
5871 		return cpu_util(cpu);
5872 
5873 	capacity = capacity_orig_of(cpu);
5874 	util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0);
5875 
5876 	return (util >= capacity) ? capacity : util;
5877 }
5878 
5879 /*
5880  * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
5881  * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
5882  *
5883  * In that case WAKE_AFFINE doesn't make sense and we'll let
5884  * BALANCE_WAKE sort things out.
5885  */
5886 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
5887 {
5888 	long min_cap, max_cap;
5889 
5890 	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
5891 	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
5892 
5893 	/* Minimum capacity is close to max, no need to abort wake_affine */
5894 	if (max_cap - min_cap < max_cap >> 3)
5895 		return 0;
5896 
5897 	/* Bring task utilization in sync with prev_cpu */
5898 	sync_entity_load_avg(&p->se);
5899 
5900 	return min_cap * 1024 < task_util(p) * capacity_margin;
5901 }
5902 
5903 /*
5904  * select_task_rq_fair: Select target runqueue for the waking task in domains
5905  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5906  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
5907  *
5908  * Balances load by selecting the idlest cpu in the idlest group, or under
5909  * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
5910  *
5911  * Returns the target cpu number.
5912  *
5913  * preempt must be disabled.
5914  */
5915 static int
5916 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5917 {
5918 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5919 	int cpu = smp_processor_id();
5920 	int new_cpu = prev_cpu;
5921 	int want_affine = 0;
5922 	int sync = wake_flags & WF_SYNC;
5923 
5924 	if (sd_flag & SD_BALANCE_WAKE) {
5925 		record_wakee(p);
5926 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
5927 			      && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
5928 	}
5929 
5930 	rcu_read_lock();
5931 	for_each_domain(cpu, tmp) {
5932 		if (!(tmp->flags & SD_LOAD_BALANCE))
5933 			break;
5934 
5935 		/*
5936 		 * If both cpu and prev_cpu are part of this domain,
5937 		 * cpu is a valid SD_WAKE_AFFINE target.
5938 		 */
5939 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5940 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5941 			affine_sd = tmp;
5942 			break;
5943 		}
5944 
5945 		if (tmp->flags & sd_flag)
5946 			sd = tmp;
5947 		else if (!want_affine)
5948 			break;
5949 	}
5950 
5951 	if (affine_sd) {
5952 		sd = NULL; /* Prefer wake_affine over balance flags */
5953 		if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
5954 			new_cpu = cpu;
5955 	}
5956 
5957 	if (!sd) {
5958 		if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
5959 			new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
5960 
5961 	} else while (sd) {
5962 		struct sched_group *group;
5963 		int weight;
5964 
5965 		if (!(sd->flags & sd_flag)) {
5966 			sd = sd->child;
5967 			continue;
5968 		}
5969 
5970 		group = find_idlest_group(sd, p, cpu, sd_flag);
5971 		if (!group) {
5972 			sd = sd->child;
5973 			continue;
5974 		}
5975 
5976 		new_cpu = find_idlest_cpu(group, p, cpu);
5977 		if (new_cpu == -1 || new_cpu == cpu) {
5978 			/* Now try balancing at a lower domain level of cpu */
5979 			sd = sd->child;
5980 			continue;
5981 		}
5982 
5983 		/* Now try balancing at a lower domain level of new_cpu */
5984 		cpu = new_cpu;
5985 		weight = sd->span_weight;
5986 		sd = NULL;
5987 		for_each_domain(cpu, tmp) {
5988 			if (weight <= tmp->span_weight)
5989 				break;
5990 			if (tmp->flags & sd_flag)
5991 				sd = tmp;
5992 		}
5993 		/* while loop will break here if sd == NULL */
5994 	}
5995 	rcu_read_unlock();
5996 
5997 	return new_cpu;
5998 }
5999 
6000 /*
6001  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
6002  * cfs_rq_of(p) references at time of call are still valid and identify the
6003  * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
6004  */
6005 static void migrate_task_rq_fair(struct task_struct *p)
6006 {
6007 	/*
6008 	 * As blocked tasks retain absolute vruntime the migration needs to
6009 	 * deal with this by subtracting the old and adding the new
6010 	 * min_vruntime -- the latter is done by enqueue_entity() when placing
6011 	 * the task on the new runqueue.
6012 	 */
6013 	if (p->state == TASK_WAKING) {
6014 		struct sched_entity *se = &p->se;
6015 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
6016 		u64 min_vruntime;
6017 
6018 #ifndef CONFIG_64BIT
6019 		u64 min_vruntime_copy;
6020 
6021 		do {
6022 			min_vruntime_copy = cfs_rq->min_vruntime_copy;
6023 			smp_rmb();
6024 			min_vruntime = cfs_rq->min_vruntime;
6025 		} while (min_vruntime != min_vruntime_copy);
6026 #else
6027 		min_vruntime = cfs_rq->min_vruntime;
6028 #endif
6029 
6030 		se->vruntime -= min_vruntime;
6031 	}
6032 
6033 	/*
6034 	 * We are supposed to update the task to "current" time, then its up to date
6035 	 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
6036 	 * what current time is, so simply throw away the out-of-date time. This
6037 	 * will result in the wakee task is less decayed, but giving the wakee more
6038 	 * load sounds not bad.
6039 	 */
6040 	remove_entity_load_avg(&p->se);
6041 
6042 	/* Tell new CPU we are migrated */
6043 	p->se.avg.last_update_time = 0;
6044 
6045 	/* We have migrated, no longer consider this task hot */
6046 	p->se.exec_start = 0;
6047 }
6048 
6049 static void task_dead_fair(struct task_struct *p)
6050 {
6051 	remove_entity_load_avg(&p->se);
6052 }
6053 #endif /* CONFIG_SMP */
6054 
6055 static unsigned long
6056 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
6057 {
6058 	unsigned long gran = sysctl_sched_wakeup_granularity;
6059 
6060 	/*
6061 	 * Since its curr running now, convert the gran from real-time
6062 	 * to virtual-time in his units.
6063 	 *
6064 	 * By using 'se' instead of 'curr' we penalize light tasks, so
6065 	 * they get preempted easier. That is, if 'se' < 'curr' then
6066 	 * the resulting gran will be larger, therefore penalizing the
6067 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
6068 	 * be smaller, again penalizing the lighter task.
6069 	 *
6070 	 * This is especially important for buddies when the leftmost
6071 	 * task is higher priority than the buddy.
6072 	 */
6073 	return calc_delta_fair(gran, se);
6074 }
6075 
6076 /*
6077  * Should 'se' preempt 'curr'.
6078  *
6079  *             |s1
6080  *        |s2
6081  *   |s3
6082  *         g
6083  *      |<--->|c
6084  *
6085  *  w(c, s1) = -1
6086  *  w(c, s2) =  0
6087  *  w(c, s3) =  1
6088  *
6089  */
6090 static int
6091 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
6092 {
6093 	s64 gran, vdiff = curr->vruntime - se->vruntime;
6094 
6095 	if (vdiff <= 0)
6096 		return -1;
6097 
6098 	gran = wakeup_gran(curr, se);
6099 	if (vdiff > gran)
6100 		return 1;
6101 
6102 	return 0;
6103 }
6104 
6105 static void set_last_buddy(struct sched_entity *se)
6106 {
6107 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6108 		return;
6109 
6110 	for_each_sched_entity(se)
6111 		cfs_rq_of(se)->last = se;
6112 }
6113 
6114 static void set_next_buddy(struct sched_entity *se)
6115 {
6116 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6117 		return;
6118 
6119 	for_each_sched_entity(se)
6120 		cfs_rq_of(se)->next = se;
6121 }
6122 
6123 static void set_skip_buddy(struct sched_entity *se)
6124 {
6125 	for_each_sched_entity(se)
6126 		cfs_rq_of(se)->skip = se;
6127 }
6128 
6129 /*
6130  * Preempt the current task with a newly woken task if needed:
6131  */
6132 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
6133 {
6134 	struct task_struct *curr = rq->curr;
6135 	struct sched_entity *se = &curr->se, *pse = &p->se;
6136 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6137 	int scale = cfs_rq->nr_running >= sched_nr_latency;
6138 	int next_buddy_marked = 0;
6139 
6140 	if (unlikely(se == pse))
6141 		return;
6142 
6143 	/*
6144 	 * This is possible from callers such as attach_tasks(), in which we
6145 	 * unconditionally check_prempt_curr() after an enqueue (which may have
6146 	 * lead to a throttle).  This both saves work and prevents false
6147 	 * next-buddy nomination below.
6148 	 */
6149 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
6150 		return;
6151 
6152 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
6153 		set_next_buddy(pse);
6154 		next_buddy_marked = 1;
6155 	}
6156 
6157 	/*
6158 	 * We can come here with TIF_NEED_RESCHED already set from new task
6159 	 * wake up path.
6160 	 *
6161 	 * Note: this also catches the edge-case of curr being in a throttled
6162 	 * group (e.g. via set_curr_task), since update_curr() (in the
6163 	 * enqueue of curr) will have resulted in resched being set.  This
6164 	 * prevents us from potentially nominating it as a false LAST_BUDDY
6165 	 * below.
6166 	 */
6167 	if (test_tsk_need_resched(curr))
6168 		return;
6169 
6170 	/* Idle tasks are by definition preempted by non-idle tasks. */
6171 	if (unlikely(curr->policy == SCHED_IDLE) &&
6172 	    likely(p->policy != SCHED_IDLE))
6173 		goto preempt;
6174 
6175 	/*
6176 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
6177 	 * is driven by the tick):
6178 	 */
6179 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
6180 		return;
6181 
6182 	find_matching_se(&se, &pse);
6183 	update_curr(cfs_rq_of(se));
6184 	BUG_ON(!pse);
6185 	if (wakeup_preempt_entity(se, pse) == 1) {
6186 		/*
6187 		 * Bias pick_next to pick the sched entity that is
6188 		 * triggering this preemption.
6189 		 */
6190 		if (!next_buddy_marked)
6191 			set_next_buddy(pse);
6192 		goto preempt;
6193 	}
6194 
6195 	return;
6196 
6197 preempt:
6198 	resched_curr(rq);
6199 	/*
6200 	 * Only set the backward buddy when the current task is still
6201 	 * on the rq. This can happen when a wakeup gets interleaved
6202 	 * with schedule on the ->pre_schedule() or idle_balance()
6203 	 * point, either of which can * drop the rq lock.
6204 	 *
6205 	 * Also, during early boot the idle thread is in the fair class,
6206 	 * for obvious reasons its a bad idea to schedule back to it.
6207 	 */
6208 	if (unlikely(!se->on_rq || curr == rq->idle))
6209 		return;
6210 
6211 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
6212 		set_last_buddy(se);
6213 }
6214 
6215 static struct task_struct *
6216 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
6217 {
6218 	struct cfs_rq *cfs_rq = &rq->cfs;
6219 	struct sched_entity *se;
6220 	struct task_struct *p;
6221 	int new_tasks;
6222 
6223 again:
6224 #ifdef CONFIG_FAIR_GROUP_SCHED
6225 	if (!cfs_rq->nr_running)
6226 		goto idle;
6227 
6228 	if (prev->sched_class != &fair_sched_class)
6229 		goto simple;
6230 
6231 	/*
6232 	 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
6233 	 * likely that a next task is from the same cgroup as the current.
6234 	 *
6235 	 * Therefore attempt to avoid putting and setting the entire cgroup
6236 	 * hierarchy, only change the part that actually changes.
6237 	 */
6238 
6239 	do {
6240 		struct sched_entity *curr = cfs_rq->curr;
6241 
6242 		/*
6243 		 * Since we got here without doing put_prev_entity() we also
6244 		 * have to consider cfs_rq->curr. If it is still a runnable
6245 		 * entity, update_curr() will update its vruntime, otherwise
6246 		 * forget we've ever seen it.
6247 		 */
6248 		if (curr) {
6249 			if (curr->on_rq)
6250 				update_curr(cfs_rq);
6251 			else
6252 				curr = NULL;
6253 
6254 			/*
6255 			 * This call to check_cfs_rq_runtime() will do the
6256 			 * throttle and dequeue its entity in the parent(s).
6257 			 * Therefore the 'simple' nr_running test will indeed
6258 			 * be correct.
6259 			 */
6260 			if (unlikely(check_cfs_rq_runtime(cfs_rq)))
6261 				goto simple;
6262 		}
6263 
6264 		se = pick_next_entity(cfs_rq, curr);
6265 		cfs_rq = group_cfs_rq(se);
6266 	} while (cfs_rq);
6267 
6268 	p = task_of(se);
6269 
6270 	/*
6271 	 * Since we haven't yet done put_prev_entity and if the selected task
6272 	 * is a different task than we started out with, try and touch the
6273 	 * least amount of cfs_rqs.
6274 	 */
6275 	if (prev != p) {
6276 		struct sched_entity *pse = &prev->se;
6277 
6278 		while (!(cfs_rq = is_same_group(se, pse))) {
6279 			int se_depth = se->depth;
6280 			int pse_depth = pse->depth;
6281 
6282 			if (se_depth <= pse_depth) {
6283 				put_prev_entity(cfs_rq_of(pse), pse);
6284 				pse = parent_entity(pse);
6285 			}
6286 			if (se_depth >= pse_depth) {
6287 				set_next_entity(cfs_rq_of(se), se);
6288 				se = parent_entity(se);
6289 			}
6290 		}
6291 
6292 		put_prev_entity(cfs_rq, pse);
6293 		set_next_entity(cfs_rq, se);
6294 	}
6295 
6296 	if (hrtick_enabled(rq))
6297 		hrtick_start_fair(rq, p);
6298 
6299 	return p;
6300 simple:
6301 	cfs_rq = &rq->cfs;
6302 #endif
6303 
6304 	if (!cfs_rq->nr_running)
6305 		goto idle;
6306 
6307 	put_prev_task(rq, prev);
6308 
6309 	do {
6310 		se = pick_next_entity(cfs_rq, NULL);
6311 		set_next_entity(cfs_rq, se);
6312 		cfs_rq = group_cfs_rq(se);
6313 	} while (cfs_rq);
6314 
6315 	p = task_of(se);
6316 
6317 	if (hrtick_enabled(rq))
6318 		hrtick_start_fair(rq, p);
6319 
6320 	return p;
6321 
6322 idle:
6323 	/*
6324 	 * This is OK, because current is on_cpu, which avoids it being picked
6325 	 * for load-balance and preemption/IRQs are still disabled avoiding
6326 	 * further scheduler activity on it and we're being very careful to
6327 	 * re-start the picking loop.
6328 	 */
6329 	lockdep_unpin_lock(&rq->lock, cookie);
6330 	new_tasks = idle_balance(rq);
6331 	lockdep_repin_lock(&rq->lock, cookie);
6332 	/*
6333 	 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6334 	 * possible for any higher priority task to appear. In that case we
6335 	 * must re-start the pick_next_entity() loop.
6336 	 */
6337 	if (new_tasks < 0)
6338 		return RETRY_TASK;
6339 
6340 	if (new_tasks > 0)
6341 		goto again;
6342 
6343 	return NULL;
6344 }
6345 
6346 /*
6347  * Account for a descheduled task:
6348  */
6349 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6350 {
6351 	struct sched_entity *se = &prev->se;
6352 	struct cfs_rq *cfs_rq;
6353 
6354 	for_each_sched_entity(se) {
6355 		cfs_rq = cfs_rq_of(se);
6356 		put_prev_entity(cfs_rq, se);
6357 	}
6358 }
6359 
6360 /*
6361  * sched_yield() is very simple
6362  *
6363  * The magic of dealing with the ->skip buddy is in pick_next_entity.
6364  */
6365 static void yield_task_fair(struct rq *rq)
6366 {
6367 	struct task_struct *curr = rq->curr;
6368 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6369 	struct sched_entity *se = &curr->se;
6370 
6371 	/*
6372 	 * Are we the only task in the tree?
6373 	 */
6374 	if (unlikely(rq->nr_running == 1))
6375 		return;
6376 
6377 	clear_buddies(cfs_rq, se);
6378 
6379 	if (curr->policy != SCHED_BATCH) {
6380 		update_rq_clock(rq);
6381 		/*
6382 		 * Update run-time statistics of the 'current'.
6383 		 */
6384 		update_curr(cfs_rq);
6385 		/*
6386 		 * Tell update_rq_clock() that we've just updated,
6387 		 * so we don't do microscopic update in schedule()
6388 		 * and double the fastpath cost.
6389 		 */
6390 		rq_clock_skip_update(rq, true);
6391 	}
6392 
6393 	set_skip_buddy(se);
6394 }
6395 
6396 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6397 {
6398 	struct sched_entity *se = &p->se;
6399 
6400 	/* throttled hierarchies are not runnable */
6401 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
6402 		return false;
6403 
6404 	/* Tell the scheduler that we'd really like pse to run next. */
6405 	set_next_buddy(se);
6406 
6407 	yield_task_fair(rq);
6408 
6409 	return true;
6410 }
6411 
6412 #ifdef CONFIG_SMP
6413 /**************************************************
6414  * Fair scheduling class load-balancing methods.
6415  *
6416  * BASICS
6417  *
6418  * The purpose of load-balancing is to achieve the same basic fairness the
6419  * per-cpu scheduler provides, namely provide a proportional amount of compute
6420  * time to each task. This is expressed in the following equation:
6421  *
6422  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
6423  *
6424  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
6425  * W_i,0 is defined as:
6426  *
6427  *   W_i,0 = \Sum_j w_i,j                                             (2)
6428  *
6429  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
6430  * is derived from the nice value as per sched_prio_to_weight[].
6431  *
6432  * The weight average is an exponential decay average of the instantaneous
6433  * weight:
6434  *
6435  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
6436  *
6437  * C_i is the compute capacity of cpu i, typically it is the
6438  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6439  * can also include other factors [XXX].
6440  *
6441  * To achieve this balance we define a measure of imbalance which follows
6442  * directly from (1):
6443  *
6444  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
6445  *
6446  * We them move tasks around to minimize the imbalance. In the continuous
6447  * function space it is obvious this converges, in the discrete case we get
6448  * a few fun cases generally called infeasible weight scenarios.
6449  *
6450  * [XXX expand on:
6451  *     - infeasible weights;
6452  *     - local vs global optima in the discrete case. ]
6453  *
6454  *
6455  * SCHED DOMAINS
6456  *
6457  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
6458  * for all i,j solution, we create a tree of cpus that follows the hardware
6459  * topology where each level pairs two lower groups (or better). This results
6460  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
6461  * tree to only the first of the previous level and we decrease the frequency
6462  * of load-balance at each level inv. proportional to the number of cpus in
6463  * the groups.
6464  *
6465  * This yields:
6466  *
6467  *     log_2 n     1     n
6468  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
6469  *     i = 0      2^i   2^i
6470  *                               `- size of each group
6471  *         |         |     `- number of cpus doing load-balance
6472  *         |         `- freq
6473  *         `- sum over all levels
6474  *
6475  * Coupled with a limit on how many tasks we can migrate every balance pass,
6476  * this makes (5) the runtime complexity of the balancer.
6477  *
6478  * An important property here is that each CPU is still (indirectly) connected
6479  * to every other cpu in at most O(log n) steps:
6480  *
6481  * The adjacency matrix of the resulting graph is given by:
6482  *
6483  *             log_2 n
6484  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
6485  *             k = 0
6486  *
6487  * And you'll find that:
6488  *
6489  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
6490  *
6491  * Showing there's indeed a path between every cpu in at most O(log n) steps.
6492  * The task movement gives a factor of O(m), giving a convergence complexity
6493  * of:
6494  *
6495  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
6496  *
6497  *
6498  * WORK CONSERVING
6499  *
6500  * In order to avoid CPUs going idle while there's still work to do, new idle
6501  * balancing is more aggressive and has the newly idle cpu iterate up the domain
6502  * tree itself instead of relying on other CPUs to bring it work.
6503  *
6504  * This adds some complexity to both (5) and (8) but it reduces the total idle
6505  * time.
6506  *
6507  * [XXX more?]
6508  *
6509  *
6510  * CGROUPS
6511  *
6512  * Cgroups make a horror show out of (2), instead of a simple sum we get:
6513  *
6514  *                                s_k,i
6515  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
6516  *                                 S_k
6517  *
6518  * Where
6519  *
6520  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
6521  *
6522  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
6523  *
6524  * The big problem is S_k, its a global sum needed to compute a local (W_i)
6525  * property.
6526  *
6527  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6528  *      rewrite all of this once again.]
6529  */
6530 
6531 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6532 
6533 enum fbq_type { regular, remote, all };
6534 
6535 #define LBF_ALL_PINNED	0x01
6536 #define LBF_NEED_BREAK	0x02
6537 #define LBF_DST_PINNED  0x04
6538 #define LBF_SOME_PINNED	0x08
6539 
6540 struct lb_env {
6541 	struct sched_domain	*sd;
6542 
6543 	struct rq		*src_rq;
6544 	int			src_cpu;
6545 
6546 	int			dst_cpu;
6547 	struct rq		*dst_rq;
6548 
6549 	struct cpumask		*dst_grpmask;
6550 	int			new_dst_cpu;
6551 	enum cpu_idle_type	idle;
6552 	long			imbalance;
6553 	/* The set of CPUs under consideration for load-balancing */
6554 	struct cpumask		*cpus;
6555 
6556 	unsigned int		flags;
6557 
6558 	unsigned int		loop;
6559 	unsigned int		loop_break;
6560 	unsigned int		loop_max;
6561 
6562 	enum fbq_type		fbq_type;
6563 	struct list_head	tasks;
6564 };
6565 
6566 /*
6567  * Is this task likely cache-hot:
6568  */
6569 static int task_hot(struct task_struct *p, struct lb_env *env)
6570 {
6571 	s64 delta;
6572 
6573 	lockdep_assert_held(&env->src_rq->lock);
6574 
6575 	if (p->sched_class != &fair_sched_class)
6576 		return 0;
6577 
6578 	if (unlikely(p->policy == SCHED_IDLE))
6579 		return 0;
6580 
6581 	/*
6582 	 * Buddy candidates are cache hot:
6583 	 */
6584 	if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
6585 			(&p->se == cfs_rq_of(&p->se)->next ||
6586 			 &p->se == cfs_rq_of(&p->se)->last))
6587 		return 1;
6588 
6589 	if (sysctl_sched_migration_cost == -1)
6590 		return 1;
6591 	if (sysctl_sched_migration_cost == 0)
6592 		return 0;
6593 
6594 	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
6595 
6596 	return delta < (s64)sysctl_sched_migration_cost;
6597 }
6598 
6599 #ifdef CONFIG_NUMA_BALANCING
6600 /*
6601  * Returns 1, if task migration degrades locality
6602  * Returns 0, if task migration improves locality i.e migration preferred.
6603  * Returns -1, if task migration is not affected by locality.
6604  */
6605 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
6606 {
6607 	struct numa_group *numa_group = rcu_dereference(p->numa_group);
6608 	unsigned long src_faults, dst_faults;
6609 	int src_nid, dst_nid;
6610 
6611 	if (!static_branch_likely(&sched_numa_balancing))
6612 		return -1;
6613 
6614 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
6615 		return -1;
6616 
6617 	src_nid = cpu_to_node(env->src_cpu);
6618 	dst_nid = cpu_to_node(env->dst_cpu);
6619 
6620 	if (src_nid == dst_nid)
6621 		return -1;
6622 
6623 	/* Migrating away from the preferred node is always bad. */
6624 	if (src_nid == p->numa_preferred_nid) {
6625 		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6626 			return 1;
6627 		else
6628 			return -1;
6629 	}
6630 
6631 	/* Encourage migration to the preferred node. */
6632 	if (dst_nid == p->numa_preferred_nid)
6633 		return 0;
6634 
6635 	if (numa_group) {
6636 		src_faults = group_faults(p, src_nid);
6637 		dst_faults = group_faults(p, dst_nid);
6638 	} else {
6639 		src_faults = task_faults(p, src_nid);
6640 		dst_faults = task_faults(p, dst_nid);
6641 	}
6642 
6643 	return dst_faults < src_faults;
6644 }
6645 
6646 #else
6647 static inline int migrate_degrades_locality(struct task_struct *p,
6648 					     struct lb_env *env)
6649 {
6650 	return -1;
6651 }
6652 #endif
6653 
6654 /*
6655  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6656  */
6657 static
6658 int can_migrate_task(struct task_struct *p, struct lb_env *env)
6659 {
6660 	int tsk_cache_hot;
6661 
6662 	lockdep_assert_held(&env->src_rq->lock);
6663 
6664 	/*
6665 	 * We do not migrate tasks that are:
6666 	 * 1) throttled_lb_pair, or
6667 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
6668 	 * 3) running (obviously), or
6669 	 * 4) are cache-hot on their current CPU.
6670 	 */
6671 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6672 		return 0;
6673 
6674 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
6675 		int cpu;
6676 
6677 		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
6678 
6679 		env->flags |= LBF_SOME_PINNED;
6680 
6681 		/*
6682 		 * Remember if this task can be migrated to any other cpu in
6683 		 * our sched_group. We may want to revisit it if we couldn't
6684 		 * meet load balance goals by pulling other tasks on src_cpu.
6685 		 *
6686 		 * Also avoid computing new_dst_cpu if we have already computed
6687 		 * one in current iteration.
6688 		 */
6689 		if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
6690 			return 0;
6691 
6692 		/* Prevent to re-select dst_cpu via env's cpus */
6693 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6694 			if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6695 				env->flags |= LBF_DST_PINNED;
6696 				env->new_dst_cpu = cpu;
6697 				break;
6698 			}
6699 		}
6700 
6701 		return 0;
6702 	}
6703 
6704 	/* Record that we found atleast one task that could run on dst_cpu */
6705 	env->flags &= ~LBF_ALL_PINNED;
6706 
6707 	if (task_running(env->src_rq, p)) {
6708 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
6709 		return 0;
6710 	}
6711 
6712 	/*
6713 	 * Aggressive migration if:
6714 	 * 1) destination numa is preferred
6715 	 * 2) task is cache cold, or
6716 	 * 3) too many balance attempts have failed.
6717 	 */
6718 	tsk_cache_hot = migrate_degrades_locality(p, env);
6719 	if (tsk_cache_hot == -1)
6720 		tsk_cache_hot = task_hot(p, env);
6721 
6722 	if (tsk_cache_hot <= 0 ||
6723 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
6724 		if (tsk_cache_hot == 1) {
6725 			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
6726 			schedstat_inc(p->se.statistics.nr_forced_migrations);
6727 		}
6728 		return 1;
6729 	}
6730 
6731 	schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
6732 	return 0;
6733 }
6734 
6735 /*
6736  * detach_task() -- detach the task for the migration specified in env
6737  */
6738 static void detach_task(struct task_struct *p, struct lb_env *env)
6739 {
6740 	lockdep_assert_held(&env->src_rq->lock);
6741 
6742 	p->on_rq = TASK_ON_RQ_MIGRATING;
6743 	deactivate_task(env->src_rq, p, 0);
6744 	set_task_cpu(p, env->dst_cpu);
6745 }
6746 
6747 /*
6748  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
6749  * part of active balancing operations within "domain".
6750  *
6751  * Returns a task if successful and NULL otherwise.
6752  */
6753 static struct task_struct *detach_one_task(struct lb_env *env)
6754 {
6755 	struct task_struct *p, *n;
6756 
6757 	lockdep_assert_held(&env->src_rq->lock);
6758 
6759 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
6760 		if (!can_migrate_task(p, env))
6761 			continue;
6762 
6763 		detach_task(p, env);
6764 
6765 		/*
6766 		 * Right now, this is only the second place where
6767 		 * lb_gained[env->idle] is updated (other is detach_tasks)
6768 		 * so we can safely collect stats here rather than
6769 		 * inside detach_tasks().
6770 		 */
6771 		schedstat_inc(env->sd->lb_gained[env->idle]);
6772 		return p;
6773 	}
6774 	return NULL;
6775 }
6776 
6777 static const unsigned int sched_nr_migrate_break = 32;
6778 
6779 /*
6780  * detach_tasks() -- tries to detach up to imbalance weighted load from
6781  * busiest_rq, as part of a balancing operation within domain "sd".
6782  *
6783  * Returns number of detached tasks if successful and 0 otherwise.
6784  */
6785 static int detach_tasks(struct lb_env *env)
6786 {
6787 	struct list_head *tasks = &env->src_rq->cfs_tasks;
6788 	struct task_struct *p;
6789 	unsigned long load;
6790 	int detached = 0;
6791 
6792 	lockdep_assert_held(&env->src_rq->lock);
6793 
6794 	if (env->imbalance <= 0)
6795 		return 0;
6796 
6797 	while (!list_empty(tasks)) {
6798 		/*
6799 		 * We don't want to steal all, otherwise we may be treated likewise,
6800 		 * which could at worst lead to a livelock crash.
6801 		 */
6802 		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6803 			break;
6804 
6805 		p = list_first_entry(tasks, struct task_struct, se.group_node);
6806 
6807 		env->loop++;
6808 		/* We've more or less seen every task there is, call it quits */
6809 		if (env->loop > env->loop_max)
6810 			break;
6811 
6812 		/* take a breather every nr_migrate tasks */
6813 		if (env->loop > env->loop_break) {
6814 			env->loop_break += sched_nr_migrate_break;
6815 			env->flags |= LBF_NEED_BREAK;
6816 			break;
6817 		}
6818 
6819 		if (!can_migrate_task(p, env))
6820 			goto next;
6821 
6822 		load = task_h_load(p);
6823 
6824 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6825 			goto next;
6826 
6827 		if ((load / 2) > env->imbalance)
6828 			goto next;
6829 
6830 		detach_task(p, env);
6831 		list_add(&p->se.group_node, &env->tasks);
6832 
6833 		detached++;
6834 		env->imbalance -= load;
6835 
6836 #ifdef CONFIG_PREEMPT
6837 		/*
6838 		 * NEWIDLE balancing is a source of latency, so preemptible
6839 		 * kernels will stop after the first task is detached to minimize
6840 		 * the critical section.
6841 		 */
6842 		if (env->idle == CPU_NEWLY_IDLE)
6843 			break;
6844 #endif
6845 
6846 		/*
6847 		 * We only want to steal up to the prescribed amount of
6848 		 * weighted load.
6849 		 */
6850 		if (env->imbalance <= 0)
6851 			break;
6852 
6853 		continue;
6854 next:
6855 		list_move_tail(&p->se.group_node, tasks);
6856 	}
6857 
6858 	/*
6859 	 * Right now, this is one of only two places we collect this stat
6860 	 * so we can safely collect detach_one_task() stats here rather
6861 	 * than inside detach_one_task().
6862 	 */
6863 	schedstat_add(env->sd->lb_gained[env->idle], detached);
6864 
6865 	return detached;
6866 }
6867 
6868 /*
6869  * attach_task() -- attach the task detached by detach_task() to its new rq.
6870  */
6871 static void attach_task(struct rq *rq, struct task_struct *p)
6872 {
6873 	lockdep_assert_held(&rq->lock);
6874 
6875 	BUG_ON(task_rq(p) != rq);
6876 	activate_task(rq, p, 0);
6877 	p->on_rq = TASK_ON_RQ_QUEUED;
6878 	check_preempt_curr(rq, p, 0);
6879 }
6880 
6881 /*
6882  * attach_one_task() -- attaches the task returned from detach_one_task() to
6883  * its new rq.
6884  */
6885 static void attach_one_task(struct rq *rq, struct task_struct *p)
6886 {
6887 	raw_spin_lock(&rq->lock);
6888 	attach_task(rq, p);
6889 	raw_spin_unlock(&rq->lock);
6890 }
6891 
6892 /*
6893  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6894  * new rq.
6895  */
6896 static void attach_tasks(struct lb_env *env)
6897 {
6898 	struct list_head *tasks = &env->tasks;
6899 	struct task_struct *p;
6900 
6901 	raw_spin_lock(&env->dst_rq->lock);
6902 
6903 	while (!list_empty(tasks)) {
6904 		p = list_first_entry(tasks, struct task_struct, se.group_node);
6905 		list_del_init(&p->se.group_node);
6906 
6907 		attach_task(env->dst_rq, p);
6908 	}
6909 
6910 	raw_spin_unlock(&env->dst_rq->lock);
6911 }
6912 
6913 #ifdef CONFIG_FAIR_GROUP_SCHED
6914 static void update_blocked_averages(int cpu)
6915 {
6916 	struct rq *rq = cpu_rq(cpu);
6917 	struct cfs_rq *cfs_rq;
6918 	unsigned long flags;
6919 
6920 	raw_spin_lock_irqsave(&rq->lock, flags);
6921 	update_rq_clock(rq);
6922 
6923 	/*
6924 	 * Iterates the task_group tree in a bottom up fashion, see
6925 	 * list_add_leaf_cfs_rq() for details.
6926 	 */
6927 	for_each_leaf_cfs_rq(rq, cfs_rq) {
6928 		/* throttled entities do not contribute to load */
6929 		if (throttled_hierarchy(cfs_rq))
6930 			continue;
6931 
6932 		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
6933 			update_tg_load_avg(cfs_rq, 0);
6934 
6935 		/* Propagate pending load changes to the parent */
6936 		if (cfs_rq->tg->se[cpu])
6937 			update_load_avg(cfs_rq->tg->se[cpu], 0);
6938 	}
6939 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6940 }
6941 
6942 /*
6943  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
6944  * This needs to be done in a top-down fashion because the load of a child
6945  * group is a fraction of its parents load.
6946  */
6947 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
6948 {
6949 	struct rq *rq = rq_of(cfs_rq);
6950 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
6951 	unsigned long now = jiffies;
6952 	unsigned long load;
6953 
6954 	if (cfs_rq->last_h_load_update == now)
6955 		return;
6956 
6957 	cfs_rq->h_load_next = NULL;
6958 	for_each_sched_entity(se) {
6959 		cfs_rq = cfs_rq_of(se);
6960 		cfs_rq->h_load_next = se;
6961 		if (cfs_rq->last_h_load_update == now)
6962 			break;
6963 	}
6964 
6965 	if (!se) {
6966 		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
6967 		cfs_rq->last_h_load_update = now;
6968 	}
6969 
6970 	while ((se = cfs_rq->h_load_next) != NULL) {
6971 		load = cfs_rq->h_load;
6972 		load = div64_ul(load * se->avg.load_avg,
6973 			cfs_rq_load_avg(cfs_rq) + 1);
6974 		cfs_rq = group_cfs_rq(se);
6975 		cfs_rq->h_load = load;
6976 		cfs_rq->last_h_load_update = now;
6977 	}
6978 }
6979 
6980 static unsigned long task_h_load(struct task_struct *p)
6981 {
6982 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
6983 
6984 	update_cfs_rq_h_load(cfs_rq);
6985 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
6986 			cfs_rq_load_avg(cfs_rq) + 1);
6987 }
6988 #else
6989 static inline void update_blocked_averages(int cpu)
6990 {
6991 	struct rq *rq = cpu_rq(cpu);
6992 	struct cfs_rq *cfs_rq = &rq->cfs;
6993 	unsigned long flags;
6994 
6995 	raw_spin_lock_irqsave(&rq->lock, flags);
6996 	update_rq_clock(rq);
6997 	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
6998 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6999 }
7000 
7001 static unsigned long task_h_load(struct task_struct *p)
7002 {
7003 	return p->se.avg.load_avg;
7004 }
7005 #endif
7006 
7007 /********** Helpers for find_busiest_group ************************/
7008 
7009 enum group_type {
7010 	group_other = 0,
7011 	group_imbalanced,
7012 	group_overloaded,
7013 };
7014 
7015 /*
7016  * sg_lb_stats - stats of a sched_group required for load_balancing
7017  */
7018 struct sg_lb_stats {
7019 	unsigned long avg_load; /*Avg load across the CPUs of the group */
7020 	unsigned long group_load; /* Total load over the CPUs of the group */
7021 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
7022 	unsigned long load_per_task;
7023 	unsigned long group_capacity;
7024 	unsigned long group_util; /* Total utilization of the group */
7025 	unsigned int sum_nr_running; /* Nr tasks running in the group */
7026 	unsigned int idle_cpus;
7027 	unsigned int group_weight;
7028 	enum group_type group_type;
7029 	int group_no_capacity;
7030 #ifdef CONFIG_NUMA_BALANCING
7031 	unsigned int nr_numa_running;
7032 	unsigned int nr_preferred_running;
7033 #endif
7034 };
7035 
7036 /*
7037  * sd_lb_stats - Structure to store the statistics of a sched_domain
7038  *		 during load balancing.
7039  */
7040 struct sd_lb_stats {
7041 	struct sched_group *busiest;	/* Busiest group in this sd */
7042 	struct sched_group *local;	/* Local group in this sd */
7043 	unsigned long total_load;	/* Total load of all groups in sd */
7044 	unsigned long total_capacity;	/* Total capacity of all groups in sd */
7045 	unsigned long avg_load;	/* Average load across all groups in sd */
7046 
7047 	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
7048 	struct sg_lb_stats local_stat;	/* Statistics of the local group */
7049 };
7050 
7051 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7052 {
7053 	/*
7054 	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
7055 	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
7056 	 * We must however clear busiest_stat::avg_load because
7057 	 * update_sd_pick_busiest() reads this before assignment.
7058 	 */
7059 	*sds = (struct sd_lb_stats){
7060 		.busiest = NULL,
7061 		.local = NULL,
7062 		.total_load = 0UL,
7063 		.total_capacity = 0UL,
7064 		.busiest_stat = {
7065 			.avg_load = 0UL,
7066 			.sum_nr_running = 0,
7067 			.group_type = group_other,
7068 		},
7069 	};
7070 }
7071 
7072 /**
7073  * get_sd_load_idx - Obtain the load index for a given sched domain.
7074  * @sd: The sched_domain whose load_idx is to be obtained.
7075  * @idle: The idle status of the CPU for whose sd load_idx is obtained.
7076  *
7077  * Return: The load index.
7078  */
7079 static inline int get_sd_load_idx(struct sched_domain *sd,
7080 					enum cpu_idle_type idle)
7081 {
7082 	int load_idx;
7083 
7084 	switch (idle) {
7085 	case CPU_NOT_IDLE:
7086 		load_idx = sd->busy_idx;
7087 		break;
7088 
7089 	case CPU_NEWLY_IDLE:
7090 		load_idx = sd->newidle_idx;
7091 		break;
7092 	default:
7093 		load_idx = sd->idle_idx;
7094 		break;
7095 	}
7096 
7097 	return load_idx;
7098 }
7099 
7100 static unsigned long scale_rt_capacity(int cpu)
7101 {
7102 	struct rq *rq = cpu_rq(cpu);
7103 	u64 total, used, age_stamp, avg;
7104 	s64 delta;
7105 
7106 	/*
7107 	 * Since we're reading these variables without serialization make sure
7108 	 * we read them once before doing sanity checks on them.
7109 	 */
7110 	age_stamp = READ_ONCE(rq->age_stamp);
7111 	avg = READ_ONCE(rq->rt_avg);
7112 	delta = __rq_clock_broken(rq) - age_stamp;
7113 
7114 	if (unlikely(delta < 0))
7115 		delta = 0;
7116 
7117 	total = sched_avg_period() + delta;
7118 
7119 	used = div_u64(avg, total);
7120 
7121 	if (likely(used < SCHED_CAPACITY_SCALE))
7122 		return SCHED_CAPACITY_SCALE - used;
7123 
7124 	return 1;
7125 }
7126 
7127 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
7128 {
7129 	unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
7130 	struct sched_group *sdg = sd->groups;
7131 
7132 	cpu_rq(cpu)->cpu_capacity_orig = capacity;
7133 
7134 	capacity *= scale_rt_capacity(cpu);
7135 	capacity >>= SCHED_CAPACITY_SHIFT;
7136 
7137 	if (!capacity)
7138 		capacity = 1;
7139 
7140 	cpu_rq(cpu)->cpu_capacity = capacity;
7141 	sdg->sgc->capacity = capacity;
7142 	sdg->sgc->min_capacity = capacity;
7143 }
7144 
7145 void update_group_capacity(struct sched_domain *sd, int cpu)
7146 {
7147 	struct sched_domain *child = sd->child;
7148 	struct sched_group *group, *sdg = sd->groups;
7149 	unsigned long capacity, min_capacity;
7150 	unsigned long interval;
7151 
7152 	interval = msecs_to_jiffies(sd->balance_interval);
7153 	interval = clamp(interval, 1UL, max_load_balance_interval);
7154 	sdg->sgc->next_update = jiffies + interval;
7155 
7156 	if (!child) {
7157 		update_cpu_capacity(sd, cpu);
7158 		return;
7159 	}
7160 
7161 	capacity = 0;
7162 	min_capacity = ULONG_MAX;
7163 
7164 	if (child->flags & SD_OVERLAP) {
7165 		/*
7166 		 * SD_OVERLAP domains cannot assume that child groups
7167 		 * span the current group.
7168 		 */
7169 
7170 		for_each_cpu(cpu, sched_group_cpus(sdg)) {
7171 			struct sched_group_capacity *sgc;
7172 			struct rq *rq = cpu_rq(cpu);
7173 
7174 			/*
7175 			 * build_sched_domains() -> init_sched_groups_capacity()
7176 			 * gets here before we've attached the domains to the
7177 			 * runqueues.
7178 			 *
7179 			 * Use capacity_of(), which is set irrespective of domains
7180 			 * in update_cpu_capacity().
7181 			 *
7182 			 * This avoids capacity from being 0 and
7183 			 * causing divide-by-zero issues on boot.
7184 			 */
7185 			if (unlikely(!rq->sd)) {
7186 				capacity += capacity_of(cpu);
7187 			} else {
7188 				sgc = rq->sd->groups->sgc;
7189 				capacity += sgc->capacity;
7190 			}
7191 
7192 			min_capacity = min(capacity, min_capacity);
7193 		}
7194 	} else  {
7195 		/*
7196 		 * !SD_OVERLAP domains can assume that child groups
7197 		 * span the current group.
7198 		 */
7199 
7200 		group = child->groups;
7201 		do {
7202 			struct sched_group_capacity *sgc = group->sgc;
7203 
7204 			capacity += sgc->capacity;
7205 			min_capacity = min(sgc->min_capacity, min_capacity);
7206 			group = group->next;
7207 		} while (group != child->groups);
7208 	}
7209 
7210 	sdg->sgc->capacity = capacity;
7211 	sdg->sgc->min_capacity = min_capacity;
7212 }
7213 
7214 /*
7215  * Check whether the capacity of the rq has been noticeably reduced by side
7216  * activity. The imbalance_pct is used for the threshold.
7217  * Return true is the capacity is reduced
7218  */
7219 static inline int
7220 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
7221 {
7222 	return ((rq->cpu_capacity * sd->imbalance_pct) <
7223 				(rq->cpu_capacity_orig * 100));
7224 }
7225 
7226 /*
7227  * Group imbalance indicates (and tries to solve) the problem where balancing
7228  * groups is inadequate due to tsk_cpus_allowed() constraints.
7229  *
7230  * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
7231  * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
7232  * Something like:
7233  *
7234  *	{ 0 1 2 3 } { 4 5 6 7 }
7235  *	        *     * * *
7236  *
7237  * If we were to balance group-wise we'd place two tasks in the first group and
7238  * two tasks in the second group. Clearly this is undesired as it will overload
7239  * cpu 3 and leave one of the cpus in the second group unused.
7240  *
7241  * The current solution to this issue is detecting the skew in the first group
7242  * by noticing the lower domain failed to reach balance and had difficulty
7243  * moving tasks due to affinity constraints.
7244  *
7245  * When this is so detected; this group becomes a candidate for busiest; see
7246  * update_sd_pick_busiest(). And calculate_imbalance() and
7247  * find_busiest_group() avoid some of the usual balance conditions to allow it
7248  * to create an effective group imbalance.
7249  *
7250  * This is a somewhat tricky proposition since the next run might not find the
7251  * group imbalance and decide the groups need to be balanced again. A most
7252  * subtle and fragile situation.
7253  */
7254 
7255 static inline int sg_imbalanced(struct sched_group *group)
7256 {
7257 	return group->sgc->imbalance;
7258 }
7259 
7260 /*
7261  * group_has_capacity returns true if the group has spare capacity that could
7262  * be used by some tasks.
7263  * We consider that a group has spare capacity if the  * number of task is
7264  * smaller than the number of CPUs or if the utilization is lower than the
7265  * available capacity for CFS tasks.
7266  * For the latter, we use a threshold to stabilize the state, to take into
7267  * account the variance of the tasks' load and to return true if the available
7268  * capacity in meaningful for the load balancer.
7269  * As an example, an available capacity of 1% can appear but it doesn't make
7270  * any benefit for the load balance.
7271  */
7272 static inline bool
7273 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
7274 {
7275 	if (sgs->sum_nr_running < sgs->group_weight)
7276 		return true;
7277 
7278 	if ((sgs->group_capacity * 100) >
7279 			(sgs->group_util * env->sd->imbalance_pct))
7280 		return true;
7281 
7282 	return false;
7283 }
7284 
7285 /*
7286  *  group_is_overloaded returns true if the group has more tasks than it can
7287  *  handle.
7288  *  group_is_overloaded is not equals to !group_has_capacity because a group
7289  *  with the exact right number of tasks, has no more spare capacity but is not
7290  *  overloaded so both group_has_capacity and group_is_overloaded return
7291  *  false.
7292  */
7293 static inline bool
7294 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
7295 {
7296 	if (sgs->sum_nr_running <= sgs->group_weight)
7297 		return false;
7298 
7299 	if ((sgs->group_capacity * 100) <
7300 			(sgs->group_util * env->sd->imbalance_pct))
7301 		return true;
7302 
7303 	return false;
7304 }
7305 
7306 /*
7307  * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
7308  * per-CPU capacity than sched_group ref.
7309  */
7310 static inline bool
7311 group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
7312 {
7313 	return sg->sgc->min_capacity * capacity_margin <
7314 						ref->sgc->min_capacity * 1024;
7315 }
7316 
7317 static inline enum
7318 group_type group_classify(struct sched_group *group,
7319 			  struct sg_lb_stats *sgs)
7320 {
7321 	if (sgs->group_no_capacity)
7322 		return group_overloaded;
7323 
7324 	if (sg_imbalanced(group))
7325 		return group_imbalanced;
7326 
7327 	return group_other;
7328 }
7329 
7330 /**
7331  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
7332  * @env: The load balancing environment.
7333  * @group: sched_group whose statistics are to be updated.
7334  * @load_idx: Load index of sched_domain of this_cpu for load calc.
7335  * @local_group: Does group contain this_cpu.
7336  * @sgs: variable to hold the statistics for this group.
7337  * @overload: Indicate more than one runnable task for any CPU.
7338  */
7339 static inline void update_sg_lb_stats(struct lb_env *env,
7340 			struct sched_group *group, int load_idx,
7341 			int local_group, struct sg_lb_stats *sgs,
7342 			bool *overload)
7343 {
7344 	unsigned long load;
7345 	int i, nr_running;
7346 
7347 	memset(sgs, 0, sizeof(*sgs));
7348 
7349 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7350 		struct rq *rq = cpu_rq(i);
7351 
7352 		/* Bias balancing toward cpus of our domain */
7353 		if (local_group)
7354 			load = target_load(i, load_idx);
7355 		else
7356 			load = source_load(i, load_idx);
7357 
7358 		sgs->group_load += load;
7359 		sgs->group_util += cpu_util(i);
7360 		sgs->sum_nr_running += rq->cfs.h_nr_running;
7361 
7362 		nr_running = rq->nr_running;
7363 		if (nr_running > 1)
7364 			*overload = true;
7365 
7366 #ifdef CONFIG_NUMA_BALANCING
7367 		sgs->nr_numa_running += rq->nr_numa_running;
7368 		sgs->nr_preferred_running += rq->nr_preferred_running;
7369 #endif
7370 		sgs->sum_weighted_load += weighted_cpuload(i);
7371 		/*
7372 		 * No need to call idle_cpu() if nr_running is not 0
7373 		 */
7374 		if (!nr_running && idle_cpu(i))
7375 			sgs->idle_cpus++;
7376 	}
7377 
7378 	/* Adjust by relative CPU capacity of the group */
7379 	sgs->group_capacity = group->sgc->capacity;
7380 	sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
7381 
7382 	if (sgs->sum_nr_running)
7383 		sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
7384 
7385 	sgs->group_weight = group->group_weight;
7386 
7387 	sgs->group_no_capacity = group_is_overloaded(env, sgs);
7388 	sgs->group_type = group_classify(group, sgs);
7389 }
7390 
7391 /**
7392  * update_sd_pick_busiest - return 1 on busiest group
7393  * @env: The load balancing environment.
7394  * @sds: sched_domain statistics
7395  * @sg: sched_group candidate to be checked for being the busiest
7396  * @sgs: sched_group statistics
7397  *
7398  * Determine if @sg is a busier group than the previously selected
7399  * busiest group.
7400  *
7401  * Return: %true if @sg is a busier group than the previously selected
7402  * busiest group. %false otherwise.
7403  */
7404 static bool update_sd_pick_busiest(struct lb_env *env,
7405 				   struct sd_lb_stats *sds,
7406 				   struct sched_group *sg,
7407 				   struct sg_lb_stats *sgs)
7408 {
7409 	struct sg_lb_stats *busiest = &sds->busiest_stat;
7410 
7411 	if (sgs->group_type > busiest->group_type)
7412 		return true;
7413 
7414 	if (sgs->group_type < busiest->group_type)
7415 		return false;
7416 
7417 	if (sgs->avg_load <= busiest->avg_load)
7418 		return false;
7419 
7420 	if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
7421 		goto asym_packing;
7422 
7423 	/*
7424 	 * Candidate sg has no more than one task per CPU and
7425 	 * has higher per-CPU capacity. Migrating tasks to less
7426 	 * capable CPUs may harm throughput. Maximize throughput,
7427 	 * power/energy consequences are not considered.
7428 	 */
7429 	if (sgs->sum_nr_running <= sgs->group_weight &&
7430 	    group_smaller_cpu_capacity(sds->local, sg))
7431 		return false;
7432 
7433 asym_packing:
7434 	/* This is the busiest node in its class. */
7435 	if (!(env->sd->flags & SD_ASYM_PACKING))
7436 		return true;
7437 
7438 	/* No ASYM_PACKING if target cpu is already busy */
7439 	if (env->idle == CPU_NOT_IDLE)
7440 		return true;
7441 	/*
7442 	 * ASYM_PACKING needs to move all the work to the highest
7443 	 * prority CPUs in the group, therefore mark all groups
7444 	 * of lower priority than ourself as busy.
7445 	 */
7446 	if (sgs->sum_nr_running &&
7447 	    sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
7448 		if (!sds->busiest)
7449 			return true;
7450 
7451 		/* Prefer to move from lowest priority cpu's work */
7452 		if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
7453 				      sg->asym_prefer_cpu))
7454 			return true;
7455 	}
7456 
7457 	return false;
7458 }
7459 
7460 #ifdef CONFIG_NUMA_BALANCING
7461 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7462 {
7463 	if (sgs->sum_nr_running > sgs->nr_numa_running)
7464 		return regular;
7465 	if (sgs->sum_nr_running > sgs->nr_preferred_running)
7466 		return remote;
7467 	return all;
7468 }
7469 
7470 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7471 {
7472 	if (rq->nr_running > rq->nr_numa_running)
7473 		return regular;
7474 	if (rq->nr_running > rq->nr_preferred_running)
7475 		return remote;
7476 	return all;
7477 }
7478 #else
7479 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7480 {
7481 	return all;
7482 }
7483 
7484 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7485 {
7486 	return regular;
7487 }
7488 #endif /* CONFIG_NUMA_BALANCING */
7489 
7490 /**
7491  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
7492  * @env: The load balancing environment.
7493  * @sds: variable to hold the statistics for this sched_domain.
7494  */
7495 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7496 {
7497 	struct sched_domain *child = env->sd->child;
7498 	struct sched_group *sg = env->sd->groups;
7499 	struct sg_lb_stats tmp_sgs;
7500 	int load_idx, prefer_sibling = 0;
7501 	bool overload = false;
7502 
7503 	if (child && child->flags & SD_PREFER_SIBLING)
7504 		prefer_sibling = 1;
7505 
7506 	load_idx = get_sd_load_idx(env->sd, env->idle);
7507 
7508 	do {
7509 		struct sg_lb_stats *sgs = &tmp_sgs;
7510 		int local_group;
7511 
7512 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
7513 		if (local_group) {
7514 			sds->local = sg;
7515 			sgs = &sds->local_stat;
7516 
7517 			if (env->idle != CPU_NEWLY_IDLE ||
7518 			    time_after_eq(jiffies, sg->sgc->next_update))
7519 				update_group_capacity(env->sd, env->dst_cpu);
7520 		}
7521 
7522 		update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7523 						&overload);
7524 
7525 		if (local_group)
7526 			goto next_group;
7527 
7528 		/*
7529 		 * In case the child domain prefers tasks go to siblings
7530 		 * first, lower the sg capacity so that we'll try
7531 		 * and move all the excess tasks away. We lower the capacity
7532 		 * of a group only if the local group has the capacity to fit
7533 		 * these excess tasks. The extra check prevents the case where
7534 		 * you always pull from the heaviest group when it is already
7535 		 * under-utilized (possible with a large weight task outweighs
7536 		 * the tasks on the system).
7537 		 */
7538 		if (prefer_sibling && sds->local &&
7539 		    group_has_capacity(env, &sds->local_stat) &&
7540 		    (sgs->sum_nr_running > 1)) {
7541 			sgs->group_no_capacity = 1;
7542 			sgs->group_type = group_classify(sg, sgs);
7543 		}
7544 
7545 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
7546 			sds->busiest = sg;
7547 			sds->busiest_stat = *sgs;
7548 		}
7549 
7550 next_group:
7551 		/* Now, start updating sd_lb_stats */
7552 		sds->total_load += sgs->group_load;
7553 		sds->total_capacity += sgs->group_capacity;
7554 
7555 		sg = sg->next;
7556 	} while (sg != env->sd->groups);
7557 
7558 	if (env->sd->flags & SD_NUMA)
7559 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
7560 
7561 	if (!env->sd->parent) {
7562 		/* update overload indicator if we are at root domain */
7563 		if (env->dst_rq->rd->overload != overload)
7564 			env->dst_rq->rd->overload = overload;
7565 	}
7566 
7567 }
7568 
7569 /**
7570  * check_asym_packing - Check to see if the group is packed into the
7571  *			sched doman.
7572  *
7573  * This is primarily intended to used at the sibling level.  Some
7574  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
7575  * case of POWER7, it can move to lower SMT modes only when higher
7576  * threads are idle.  When in lower SMT modes, the threads will
7577  * perform better since they share less core resources.  Hence when we
7578  * have idle threads, we want them to be the higher ones.
7579  *
7580  * This packing function is run on idle threads.  It checks to see if
7581  * the busiest CPU in this domain (core in the P7 case) has a higher
7582  * CPU number than the packing function is being run on.  Here we are
7583  * assuming lower CPU number will be equivalent to lower a SMT thread
7584  * number.
7585  *
7586  * Return: 1 when packing is required and a task should be moved to
7587  * this CPU.  The amount of the imbalance is returned in *imbalance.
7588  *
7589  * @env: The load balancing environment.
7590  * @sds: Statistics of the sched_domain which is to be packed
7591  */
7592 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
7593 {
7594 	int busiest_cpu;
7595 
7596 	if (!(env->sd->flags & SD_ASYM_PACKING))
7597 		return 0;
7598 
7599 	if (env->idle == CPU_NOT_IDLE)
7600 		return 0;
7601 
7602 	if (!sds->busiest)
7603 		return 0;
7604 
7605 	busiest_cpu = sds->busiest->asym_prefer_cpu;
7606 	if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
7607 		return 0;
7608 
7609 	env->imbalance = DIV_ROUND_CLOSEST(
7610 		sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
7611 		SCHED_CAPACITY_SCALE);
7612 
7613 	return 1;
7614 }
7615 
7616 /**
7617  * fix_small_imbalance - Calculate the minor imbalance that exists
7618  *			amongst the groups of a sched_domain, during
7619  *			load balancing.
7620  * @env: The load balancing environment.
7621  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
7622  */
7623 static inline
7624 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7625 {
7626 	unsigned long tmp, capa_now = 0, capa_move = 0;
7627 	unsigned int imbn = 2;
7628 	unsigned long scaled_busy_load_per_task;
7629 	struct sg_lb_stats *local, *busiest;
7630 
7631 	local = &sds->local_stat;
7632 	busiest = &sds->busiest_stat;
7633 
7634 	if (!local->sum_nr_running)
7635 		local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7636 	else if (busiest->load_per_task > local->load_per_task)
7637 		imbn = 1;
7638 
7639 	scaled_busy_load_per_task =
7640 		(busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7641 		busiest->group_capacity;
7642 
7643 	if (busiest->avg_load + scaled_busy_load_per_task >=
7644 	    local->avg_load + (scaled_busy_load_per_task * imbn)) {
7645 		env->imbalance = busiest->load_per_task;
7646 		return;
7647 	}
7648 
7649 	/*
7650 	 * OK, we don't have enough imbalance to justify moving tasks,
7651 	 * however we may be able to increase total CPU capacity used by
7652 	 * moving them.
7653 	 */
7654 
7655 	capa_now += busiest->group_capacity *
7656 			min(busiest->load_per_task, busiest->avg_load);
7657 	capa_now += local->group_capacity *
7658 			min(local->load_per_task, local->avg_load);
7659 	capa_now /= SCHED_CAPACITY_SCALE;
7660 
7661 	/* Amount of load we'd subtract */
7662 	if (busiest->avg_load > scaled_busy_load_per_task) {
7663 		capa_move += busiest->group_capacity *
7664 			    min(busiest->load_per_task,
7665 				busiest->avg_load - scaled_busy_load_per_task);
7666 	}
7667 
7668 	/* Amount of load we'd add */
7669 	if (busiest->avg_load * busiest->group_capacity <
7670 	    busiest->load_per_task * SCHED_CAPACITY_SCALE) {
7671 		tmp = (busiest->avg_load * busiest->group_capacity) /
7672 		      local->group_capacity;
7673 	} else {
7674 		tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7675 		      local->group_capacity;
7676 	}
7677 	capa_move += local->group_capacity *
7678 		    min(local->load_per_task, local->avg_load + tmp);
7679 	capa_move /= SCHED_CAPACITY_SCALE;
7680 
7681 	/* Move if we gain throughput */
7682 	if (capa_move > capa_now)
7683 		env->imbalance = busiest->load_per_task;
7684 }
7685 
7686 /**
7687  * calculate_imbalance - Calculate the amount of imbalance present within the
7688  *			 groups of a given sched_domain during load balance.
7689  * @env: load balance environment
7690  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
7691  */
7692 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7693 {
7694 	unsigned long max_pull, load_above_capacity = ~0UL;
7695 	struct sg_lb_stats *local, *busiest;
7696 
7697 	local = &sds->local_stat;
7698 	busiest = &sds->busiest_stat;
7699 
7700 	if (busiest->group_type == group_imbalanced) {
7701 		/*
7702 		 * In the group_imb case we cannot rely on group-wide averages
7703 		 * to ensure cpu-load equilibrium, look at wider averages. XXX
7704 		 */
7705 		busiest->load_per_task =
7706 			min(busiest->load_per_task, sds->avg_load);
7707 	}
7708 
7709 	/*
7710 	 * Avg load of busiest sg can be less and avg load of local sg can
7711 	 * be greater than avg load across all sgs of sd because avg load
7712 	 * factors in sg capacity and sgs with smaller group_type are
7713 	 * skipped when updating the busiest sg:
7714 	 */
7715 	if (busiest->avg_load <= sds->avg_load ||
7716 	    local->avg_load >= sds->avg_load) {
7717 		env->imbalance = 0;
7718 		return fix_small_imbalance(env, sds);
7719 	}
7720 
7721 	/*
7722 	 * If there aren't any idle cpus, avoid creating some.
7723 	 */
7724 	if (busiest->group_type == group_overloaded &&
7725 	    local->group_type   == group_overloaded) {
7726 		load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
7727 		if (load_above_capacity > busiest->group_capacity) {
7728 			load_above_capacity -= busiest->group_capacity;
7729 			load_above_capacity *= scale_load_down(NICE_0_LOAD);
7730 			load_above_capacity /= busiest->group_capacity;
7731 		} else
7732 			load_above_capacity = ~0UL;
7733 	}
7734 
7735 	/*
7736 	 * We're trying to get all the cpus to the average_load, so we don't
7737 	 * want to push ourselves above the average load, nor do we wish to
7738 	 * reduce the max loaded cpu below the average load. At the same time,
7739 	 * we also don't want to reduce the group load below the group
7740 	 * capacity. Thus we look for the minimum possible imbalance.
7741 	 */
7742 	max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
7743 
7744 	/* How much load to actually move to equalise the imbalance */
7745 	env->imbalance = min(
7746 		max_pull * busiest->group_capacity,
7747 		(sds->avg_load - local->avg_load) * local->group_capacity
7748 	) / SCHED_CAPACITY_SCALE;
7749 
7750 	/*
7751 	 * if *imbalance is less than the average load per runnable task
7752 	 * there is no guarantee that any tasks will be moved so we'll have
7753 	 * a think about bumping its value to force at least one task to be
7754 	 * moved
7755 	 */
7756 	if (env->imbalance < busiest->load_per_task)
7757 		return fix_small_imbalance(env, sds);
7758 }
7759 
7760 /******* find_busiest_group() helpers end here *********************/
7761 
7762 /**
7763  * find_busiest_group - Returns the busiest group within the sched_domain
7764  * if there is an imbalance.
7765  *
7766  * Also calculates the amount of weighted load which should be moved
7767  * to restore balance.
7768  *
7769  * @env: The load balancing environment.
7770  *
7771  * Return:	- The busiest group if imbalance exists.
7772  */
7773 static struct sched_group *find_busiest_group(struct lb_env *env)
7774 {
7775 	struct sg_lb_stats *local, *busiest;
7776 	struct sd_lb_stats sds;
7777 
7778 	init_sd_lb_stats(&sds);
7779 
7780 	/*
7781 	 * Compute the various statistics relavent for load balancing at
7782 	 * this level.
7783 	 */
7784 	update_sd_lb_stats(env, &sds);
7785 	local = &sds.local_stat;
7786 	busiest = &sds.busiest_stat;
7787 
7788 	/* ASYM feature bypasses nice load balance check */
7789 	if (check_asym_packing(env, &sds))
7790 		return sds.busiest;
7791 
7792 	/* There is no busy sibling group to pull tasks from */
7793 	if (!sds.busiest || busiest->sum_nr_running == 0)
7794 		goto out_balanced;
7795 
7796 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7797 						/ sds.total_capacity;
7798 
7799 	/*
7800 	 * If the busiest group is imbalanced the below checks don't
7801 	 * work because they assume all things are equal, which typically
7802 	 * isn't true due to cpus_allowed constraints and the like.
7803 	 */
7804 	if (busiest->group_type == group_imbalanced)
7805 		goto force_balance;
7806 
7807 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
7808 	if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7809 	    busiest->group_no_capacity)
7810 		goto force_balance;
7811 
7812 	/*
7813 	 * If the local group is busier than the selected busiest group
7814 	 * don't try and pull any tasks.
7815 	 */
7816 	if (local->avg_load >= busiest->avg_load)
7817 		goto out_balanced;
7818 
7819 	/*
7820 	 * Don't pull any tasks if this group is already above the domain
7821 	 * average load.
7822 	 */
7823 	if (local->avg_load >= sds.avg_load)
7824 		goto out_balanced;
7825 
7826 	if (env->idle == CPU_IDLE) {
7827 		/*
7828 		 * This cpu is idle. If the busiest group is not overloaded
7829 		 * and there is no imbalance between this and busiest group
7830 		 * wrt idle cpus, it is balanced. The imbalance becomes
7831 		 * significant if the diff is greater than 1 otherwise we
7832 		 * might end up to just move the imbalance on another group
7833 		 */
7834 		if ((busiest->group_type != group_overloaded) &&
7835 				(local->idle_cpus <= (busiest->idle_cpus + 1)))
7836 			goto out_balanced;
7837 	} else {
7838 		/*
7839 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7840 		 * imbalance_pct to be conservative.
7841 		 */
7842 		if (100 * busiest->avg_load <=
7843 				env->sd->imbalance_pct * local->avg_load)
7844 			goto out_balanced;
7845 	}
7846 
7847 force_balance:
7848 	/* Looks like there is an imbalance. Compute it */
7849 	calculate_imbalance(env, &sds);
7850 	return sds.busiest;
7851 
7852 out_balanced:
7853 	env->imbalance = 0;
7854 	return NULL;
7855 }
7856 
7857 /*
7858  * find_busiest_queue - find the busiest runqueue among the cpus in group.
7859  */
7860 static struct rq *find_busiest_queue(struct lb_env *env,
7861 				     struct sched_group *group)
7862 {
7863 	struct rq *busiest = NULL, *rq;
7864 	unsigned long busiest_load = 0, busiest_capacity = 1;
7865 	int i;
7866 
7867 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7868 		unsigned long capacity, wl;
7869 		enum fbq_type rt;
7870 
7871 		rq = cpu_rq(i);
7872 		rt = fbq_classify_rq(rq);
7873 
7874 		/*
7875 		 * We classify groups/runqueues into three groups:
7876 		 *  - regular: there are !numa tasks
7877 		 *  - remote:  there are numa tasks that run on the 'wrong' node
7878 		 *  - all:     there is no distinction
7879 		 *
7880 		 * In order to avoid migrating ideally placed numa tasks,
7881 		 * ignore those when there's better options.
7882 		 *
7883 		 * If we ignore the actual busiest queue to migrate another
7884 		 * task, the next balance pass can still reduce the busiest
7885 		 * queue by moving tasks around inside the node.
7886 		 *
7887 		 * If we cannot move enough load due to this classification
7888 		 * the next pass will adjust the group classification and
7889 		 * allow migration of more tasks.
7890 		 *
7891 		 * Both cases only affect the total convergence complexity.
7892 		 */
7893 		if (rt > env->fbq_type)
7894 			continue;
7895 
7896 		capacity = capacity_of(i);
7897 
7898 		wl = weighted_cpuload(i);
7899 
7900 		/*
7901 		 * When comparing with imbalance, use weighted_cpuload()
7902 		 * which is not scaled with the cpu capacity.
7903 		 */
7904 
7905 		if (rq->nr_running == 1 && wl > env->imbalance &&
7906 		    !check_cpu_capacity(rq, env->sd))
7907 			continue;
7908 
7909 		/*
7910 		 * For the load comparisons with the other cpu's, consider
7911 		 * the weighted_cpuload() scaled with the cpu capacity, so
7912 		 * that the load can be moved away from the cpu that is
7913 		 * potentially running at a lower capacity.
7914 		 *
7915 		 * Thus we're looking for max(wl_i / capacity_i), crosswise
7916 		 * multiplication to rid ourselves of the division works out
7917 		 * to: wl_i * capacity_j > wl_j * capacity_i;  where j is
7918 		 * our previous maximum.
7919 		 */
7920 		if (wl * busiest_capacity > busiest_load * capacity) {
7921 			busiest_load = wl;
7922 			busiest_capacity = capacity;
7923 			busiest = rq;
7924 		}
7925 	}
7926 
7927 	return busiest;
7928 }
7929 
7930 /*
7931  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
7932  * so long as it is large enough.
7933  */
7934 #define MAX_PINNED_INTERVAL	512
7935 
7936 static int need_active_balance(struct lb_env *env)
7937 {
7938 	struct sched_domain *sd = env->sd;
7939 
7940 	if (env->idle == CPU_NEWLY_IDLE) {
7941 
7942 		/*
7943 		 * ASYM_PACKING needs to force migrate tasks from busy but
7944 		 * lower priority CPUs in order to pack all tasks in the
7945 		 * highest priority CPUs.
7946 		 */
7947 		if ((sd->flags & SD_ASYM_PACKING) &&
7948 		    sched_asym_prefer(env->dst_cpu, env->src_cpu))
7949 			return 1;
7950 	}
7951 
7952 	/*
7953 	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
7954 	 * It's worth migrating the task if the src_cpu's capacity is reduced
7955 	 * because of other sched_class or IRQs if more capacity stays
7956 	 * available on dst_cpu.
7957 	 */
7958 	if ((env->idle != CPU_NOT_IDLE) &&
7959 	    (env->src_rq->cfs.h_nr_running == 1)) {
7960 		if ((check_cpu_capacity(env->src_rq, sd)) &&
7961 		    (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7962 			return 1;
7963 	}
7964 
7965 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7966 }
7967 
7968 static int active_load_balance_cpu_stop(void *data);
7969 
7970 static int should_we_balance(struct lb_env *env)
7971 {
7972 	struct sched_group *sg = env->sd->groups;
7973 	struct cpumask *sg_cpus, *sg_mask;
7974 	int cpu, balance_cpu = -1;
7975 
7976 	/*
7977 	 * In the newly idle case, we will allow all the cpu's
7978 	 * to do the newly idle load balance.
7979 	 */
7980 	if (env->idle == CPU_NEWLY_IDLE)
7981 		return 1;
7982 
7983 	sg_cpus = sched_group_cpus(sg);
7984 	sg_mask = sched_group_mask(sg);
7985 	/* Try to find first idle cpu */
7986 	for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7987 		if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7988 			continue;
7989 
7990 		balance_cpu = cpu;
7991 		break;
7992 	}
7993 
7994 	if (balance_cpu == -1)
7995 		balance_cpu = group_balance_cpu(sg);
7996 
7997 	/*
7998 	 * First idle cpu or the first cpu(busiest) in this sched group
7999 	 * is eligible for doing load balancing at this and above domains.
8000 	 */
8001 	return balance_cpu == env->dst_cpu;
8002 }
8003 
8004 /*
8005  * Check this_cpu to ensure it is balanced within domain. Attempt to move
8006  * tasks if there is an imbalance.
8007  */
8008 static int load_balance(int this_cpu, struct rq *this_rq,
8009 			struct sched_domain *sd, enum cpu_idle_type idle,
8010 			int *continue_balancing)
8011 {
8012 	int ld_moved, cur_ld_moved, active_balance = 0;
8013 	struct sched_domain *sd_parent = sd->parent;
8014 	struct sched_group *group;
8015 	struct rq *busiest;
8016 	unsigned long flags;
8017 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
8018 
8019 	struct lb_env env = {
8020 		.sd		= sd,
8021 		.dst_cpu	= this_cpu,
8022 		.dst_rq		= this_rq,
8023 		.dst_grpmask    = sched_group_cpus(sd->groups),
8024 		.idle		= idle,
8025 		.loop_break	= sched_nr_migrate_break,
8026 		.cpus		= cpus,
8027 		.fbq_type	= all,
8028 		.tasks		= LIST_HEAD_INIT(env.tasks),
8029 	};
8030 
8031 	/*
8032 	 * For NEWLY_IDLE load_balancing, we don't need to consider
8033 	 * other cpus in our group
8034 	 */
8035 	if (idle == CPU_NEWLY_IDLE)
8036 		env.dst_grpmask = NULL;
8037 
8038 	cpumask_copy(cpus, cpu_active_mask);
8039 
8040 	schedstat_inc(sd->lb_count[idle]);
8041 
8042 redo:
8043 	if (!should_we_balance(&env)) {
8044 		*continue_balancing = 0;
8045 		goto out_balanced;
8046 	}
8047 
8048 	group = find_busiest_group(&env);
8049 	if (!group) {
8050 		schedstat_inc(sd->lb_nobusyg[idle]);
8051 		goto out_balanced;
8052 	}
8053 
8054 	busiest = find_busiest_queue(&env, group);
8055 	if (!busiest) {
8056 		schedstat_inc(sd->lb_nobusyq[idle]);
8057 		goto out_balanced;
8058 	}
8059 
8060 	BUG_ON(busiest == env.dst_rq);
8061 
8062 	schedstat_add(sd->lb_imbalance[idle], env.imbalance);
8063 
8064 	env.src_cpu = busiest->cpu;
8065 	env.src_rq = busiest;
8066 
8067 	ld_moved = 0;
8068 	if (busiest->nr_running > 1) {
8069 		/*
8070 		 * Attempt to move tasks. If find_busiest_group has found
8071 		 * an imbalance but busiest->nr_running <= 1, the group is
8072 		 * still unbalanced. ld_moved simply stays zero, so it is
8073 		 * correctly treated as an imbalance.
8074 		 */
8075 		env.flags |= LBF_ALL_PINNED;
8076 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
8077 
8078 more_balance:
8079 		raw_spin_lock_irqsave(&busiest->lock, flags);
8080 
8081 		/*
8082 		 * cur_ld_moved - load moved in current iteration
8083 		 * ld_moved     - cumulative load moved across iterations
8084 		 */
8085 		cur_ld_moved = detach_tasks(&env);
8086 
8087 		/*
8088 		 * We've detached some tasks from busiest_rq. Every
8089 		 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
8090 		 * unlock busiest->lock, and we are able to be sure
8091 		 * that nobody can manipulate the tasks in parallel.
8092 		 * See task_rq_lock() family for the details.
8093 		 */
8094 
8095 		raw_spin_unlock(&busiest->lock);
8096 
8097 		if (cur_ld_moved) {
8098 			attach_tasks(&env);
8099 			ld_moved += cur_ld_moved;
8100 		}
8101 
8102 		local_irq_restore(flags);
8103 
8104 		if (env.flags & LBF_NEED_BREAK) {
8105 			env.flags &= ~LBF_NEED_BREAK;
8106 			goto more_balance;
8107 		}
8108 
8109 		/*
8110 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
8111 		 * us and move them to an alternate dst_cpu in our sched_group
8112 		 * where they can run. The upper limit on how many times we
8113 		 * iterate on same src_cpu is dependent on number of cpus in our
8114 		 * sched_group.
8115 		 *
8116 		 * This changes load balance semantics a bit on who can move
8117 		 * load to a given_cpu. In addition to the given_cpu itself
8118 		 * (or a ilb_cpu acting on its behalf where given_cpu is
8119 		 * nohz-idle), we now have balance_cpu in a position to move
8120 		 * load to given_cpu. In rare situations, this may cause
8121 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
8122 		 * _independently_ and at _same_ time to move some load to
8123 		 * given_cpu) causing exceess load to be moved to given_cpu.
8124 		 * This however should not happen so much in practice and
8125 		 * moreover subsequent load balance cycles should correct the
8126 		 * excess load moved.
8127 		 */
8128 		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
8129 
8130 			/* Prevent to re-select dst_cpu via env's cpus */
8131 			cpumask_clear_cpu(env.dst_cpu, env.cpus);
8132 
8133 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
8134 			env.dst_cpu	 = env.new_dst_cpu;
8135 			env.flags	&= ~LBF_DST_PINNED;
8136 			env.loop	 = 0;
8137 			env.loop_break	 = sched_nr_migrate_break;
8138 
8139 			/*
8140 			 * Go back to "more_balance" rather than "redo" since we
8141 			 * need to continue with same src_cpu.
8142 			 */
8143 			goto more_balance;
8144 		}
8145 
8146 		/*
8147 		 * We failed to reach balance because of affinity.
8148 		 */
8149 		if (sd_parent) {
8150 			int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8151 
8152 			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
8153 				*group_imbalance = 1;
8154 		}
8155 
8156 		/* All tasks on this runqueue were pinned by CPU affinity */
8157 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
8158 			cpumask_clear_cpu(cpu_of(busiest), cpus);
8159 			if (!cpumask_empty(cpus)) {
8160 				env.loop = 0;
8161 				env.loop_break = sched_nr_migrate_break;
8162 				goto redo;
8163 			}
8164 			goto out_all_pinned;
8165 		}
8166 	}
8167 
8168 	if (!ld_moved) {
8169 		schedstat_inc(sd->lb_failed[idle]);
8170 		/*
8171 		 * Increment the failure counter only on periodic balance.
8172 		 * We do not want newidle balance, which can be very
8173 		 * frequent, pollute the failure counter causing
8174 		 * excessive cache_hot migrations and active balances.
8175 		 */
8176 		if (idle != CPU_NEWLY_IDLE)
8177 			sd->nr_balance_failed++;
8178 
8179 		if (need_active_balance(&env)) {
8180 			raw_spin_lock_irqsave(&busiest->lock, flags);
8181 
8182 			/* don't kick the active_load_balance_cpu_stop,
8183 			 * if the curr task on busiest cpu can't be
8184 			 * moved to this_cpu
8185 			 */
8186 			if (!cpumask_test_cpu(this_cpu,
8187 					tsk_cpus_allowed(busiest->curr))) {
8188 				raw_spin_unlock_irqrestore(&busiest->lock,
8189 							    flags);
8190 				env.flags |= LBF_ALL_PINNED;
8191 				goto out_one_pinned;
8192 			}
8193 
8194 			/*
8195 			 * ->active_balance synchronizes accesses to
8196 			 * ->active_balance_work.  Once set, it's cleared
8197 			 * only after active load balance is finished.
8198 			 */
8199 			if (!busiest->active_balance) {
8200 				busiest->active_balance = 1;
8201 				busiest->push_cpu = this_cpu;
8202 				active_balance = 1;
8203 			}
8204 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
8205 
8206 			if (active_balance) {
8207 				stop_one_cpu_nowait(cpu_of(busiest),
8208 					active_load_balance_cpu_stop, busiest,
8209 					&busiest->active_balance_work);
8210 			}
8211 
8212 			/* We've kicked active balancing, force task migration. */
8213 			sd->nr_balance_failed = sd->cache_nice_tries+1;
8214 		}
8215 	} else
8216 		sd->nr_balance_failed = 0;
8217 
8218 	if (likely(!active_balance)) {
8219 		/* We were unbalanced, so reset the balancing interval */
8220 		sd->balance_interval = sd->min_interval;
8221 	} else {
8222 		/*
8223 		 * If we've begun active balancing, start to back off. This
8224 		 * case may not be covered by the all_pinned logic if there
8225 		 * is only 1 task on the busy runqueue (because we don't call
8226 		 * detach_tasks).
8227 		 */
8228 		if (sd->balance_interval < sd->max_interval)
8229 			sd->balance_interval *= 2;
8230 	}
8231 
8232 	goto out;
8233 
8234 out_balanced:
8235 	/*
8236 	 * We reach balance although we may have faced some affinity
8237 	 * constraints. Clear the imbalance flag if it was set.
8238 	 */
8239 	if (sd_parent) {
8240 		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8241 
8242 		if (*group_imbalance)
8243 			*group_imbalance = 0;
8244 	}
8245 
8246 out_all_pinned:
8247 	/*
8248 	 * We reach balance because all tasks are pinned at this level so
8249 	 * we can't migrate them. Let the imbalance flag set so parent level
8250 	 * can try to migrate them.
8251 	 */
8252 	schedstat_inc(sd->lb_balanced[idle]);
8253 
8254 	sd->nr_balance_failed = 0;
8255 
8256 out_one_pinned:
8257 	/* tune up the balancing interval */
8258 	if (((env.flags & LBF_ALL_PINNED) &&
8259 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
8260 			(sd->balance_interval < sd->max_interval))
8261 		sd->balance_interval *= 2;
8262 
8263 	ld_moved = 0;
8264 out:
8265 	return ld_moved;
8266 }
8267 
8268 static inline unsigned long
8269 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
8270 {
8271 	unsigned long interval = sd->balance_interval;
8272 
8273 	if (cpu_busy)
8274 		interval *= sd->busy_factor;
8275 
8276 	/* scale ms to jiffies */
8277 	interval = msecs_to_jiffies(interval);
8278 	interval = clamp(interval, 1UL, max_load_balance_interval);
8279 
8280 	return interval;
8281 }
8282 
8283 static inline void
8284 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
8285 {
8286 	unsigned long interval, next;
8287 
8288 	/* used by idle balance, so cpu_busy = 0 */
8289 	interval = get_sd_balance_interval(sd, 0);
8290 	next = sd->last_balance + interval;
8291 
8292 	if (time_after(*next_balance, next))
8293 		*next_balance = next;
8294 }
8295 
8296 /*
8297  * idle_balance is called by schedule() if this_cpu is about to become
8298  * idle. Attempts to pull tasks from other CPUs.
8299  */
8300 static int idle_balance(struct rq *this_rq)
8301 {
8302 	unsigned long next_balance = jiffies + HZ;
8303 	int this_cpu = this_rq->cpu;
8304 	struct sched_domain *sd;
8305 	int pulled_task = 0;
8306 	u64 curr_cost = 0;
8307 
8308 	/*
8309 	 * We must set idle_stamp _before_ calling idle_balance(), such that we
8310 	 * measure the duration of idle_balance() as idle time.
8311 	 */
8312 	this_rq->idle_stamp = rq_clock(this_rq);
8313 
8314 	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
8315 	    !this_rq->rd->overload) {
8316 		rcu_read_lock();
8317 		sd = rcu_dereference_check_sched_domain(this_rq->sd);
8318 		if (sd)
8319 			update_next_balance(sd, &next_balance);
8320 		rcu_read_unlock();
8321 
8322 		goto out;
8323 	}
8324 
8325 	raw_spin_unlock(&this_rq->lock);
8326 
8327 	update_blocked_averages(this_cpu);
8328 	rcu_read_lock();
8329 	for_each_domain(this_cpu, sd) {
8330 		int continue_balancing = 1;
8331 		u64 t0, domain_cost;
8332 
8333 		if (!(sd->flags & SD_LOAD_BALANCE))
8334 			continue;
8335 
8336 		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
8337 			update_next_balance(sd, &next_balance);
8338 			break;
8339 		}
8340 
8341 		if (sd->flags & SD_BALANCE_NEWIDLE) {
8342 			t0 = sched_clock_cpu(this_cpu);
8343 
8344 			pulled_task = load_balance(this_cpu, this_rq,
8345 						   sd, CPU_NEWLY_IDLE,
8346 						   &continue_balancing);
8347 
8348 			domain_cost = sched_clock_cpu(this_cpu) - t0;
8349 			if (domain_cost > sd->max_newidle_lb_cost)
8350 				sd->max_newidle_lb_cost = domain_cost;
8351 
8352 			curr_cost += domain_cost;
8353 		}
8354 
8355 		update_next_balance(sd, &next_balance);
8356 
8357 		/*
8358 		 * Stop searching for tasks to pull if there are
8359 		 * now runnable tasks on this rq.
8360 		 */
8361 		if (pulled_task || this_rq->nr_running > 0)
8362 			break;
8363 	}
8364 	rcu_read_unlock();
8365 
8366 	raw_spin_lock(&this_rq->lock);
8367 
8368 	if (curr_cost > this_rq->max_idle_balance_cost)
8369 		this_rq->max_idle_balance_cost = curr_cost;
8370 
8371 	/*
8372 	 * While browsing the domains, we released the rq lock, a task could
8373 	 * have been enqueued in the meantime. Since we're not going idle,
8374 	 * pretend we pulled a task.
8375 	 */
8376 	if (this_rq->cfs.h_nr_running && !pulled_task)
8377 		pulled_task = 1;
8378 
8379 out:
8380 	/* Move the next balance forward */
8381 	if (time_after(this_rq->next_balance, next_balance))
8382 		this_rq->next_balance = next_balance;
8383 
8384 	/* Is there a task of a high priority class? */
8385 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
8386 		pulled_task = -1;
8387 
8388 	if (pulled_task)
8389 		this_rq->idle_stamp = 0;
8390 
8391 	return pulled_task;
8392 }
8393 
8394 /*
8395  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
8396  * running tasks off the busiest CPU onto idle CPUs. It requires at
8397  * least 1 task to be running on each physical CPU where possible, and
8398  * avoids physical / logical imbalances.
8399  */
8400 static int active_load_balance_cpu_stop(void *data)
8401 {
8402 	struct rq *busiest_rq = data;
8403 	int busiest_cpu = cpu_of(busiest_rq);
8404 	int target_cpu = busiest_rq->push_cpu;
8405 	struct rq *target_rq = cpu_rq(target_cpu);
8406 	struct sched_domain *sd;
8407 	struct task_struct *p = NULL;
8408 
8409 	raw_spin_lock_irq(&busiest_rq->lock);
8410 
8411 	/* make sure the requested cpu hasn't gone down in the meantime */
8412 	if (unlikely(busiest_cpu != smp_processor_id() ||
8413 		     !busiest_rq->active_balance))
8414 		goto out_unlock;
8415 
8416 	/* Is there any task to move? */
8417 	if (busiest_rq->nr_running <= 1)
8418 		goto out_unlock;
8419 
8420 	/*
8421 	 * This condition is "impossible", if it occurs
8422 	 * we need to fix it. Originally reported by
8423 	 * Bjorn Helgaas on a 128-cpu setup.
8424 	 */
8425 	BUG_ON(busiest_rq == target_rq);
8426 
8427 	/* Search for an sd spanning us and the target CPU. */
8428 	rcu_read_lock();
8429 	for_each_domain(target_cpu, sd) {
8430 		if ((sd->flags & SD_LOAD_BALANCE) &&
8431 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8432 				break;
8433 	}
8434 
8435 	if (likely(sd)) {
8436 		struct lb_env env = {
8437 			.sd		= sd,
8438 			.dst_cpu	= target_cpu,
8439 			.dst_rq		= target_rq,
8440 			.src_cpu	= busiest_rq->cpu,
8441 			.src_rq		= busiest_rq,
8442 			.idle		= CPU_IDLE,
8443 		};
8444 
8445 		schedstat_inc(sd->alb_count);
8446 
8447 		p = detach_one_task(&env);
8448 		if (p) {
8449 			schedstat_inc(sd->alb_pushed);
8450 			/* Active balancing done, reset the failure counter. */
8451 			sd->nr_balance_failed = 0;
8452 		} else {
8453 			schedstat_inc(sd->alb_failed);
8454 		}
8455 	}
8456 	rcu_read_unlock();
8457 out_unlock:
8458 	busiest_rq->active_balance = 0;
8459 	raw_spin_unlock(&busiest_rq->lock);
8460 
8461 	if (p)
8462 		attach_one_task(target_rq, p);
8463 
8464 	local_irq_enable();
8465 
8466 	return 0;
8467 }
8468 
8469 static inline int on_null_domain(struct rq *rq)
8470 {
8471 	return unlikely(!rcu_dereference_sched(rq->sd));
8472 }
8473 
8474 #ifdef CONFIG_NO_HZ_COMMON
8475 /*
8476  * idle load balancing details
8477  * - When one of the busy CPUs notice that there may be an idle rebalancing
8478  *   needed, they will kick the idle load balancer, which then does idle
8479  *   load balancing for all the idle CPUs.
8480  */
8481 static struct {
8482 	cpumask_var_t idle_cpus_mask;
8483 	atomic_t nr_cpus;
8484 	unsigned long next_balance;     /* in jiffy units */
8485 } nohz ____cacheline_aligned;
8486 
8487 static inline int find_new_ilb(void)
8488 {
8489 	int ilb = cpumask_first(nohz.idle_cpus_mask);
8490 
8491 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
8492 		return ilb;
8493 
8494 	return nr_cpu_ids;
8495 }
8496 
8497 /*
8498  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8499  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8500  * CPU (if there is one).
8501  */
8502 static void nohz_balancer_kick(void)
8503 {
8504 	int ilb_cpu;
8505 
8506 	nohz.next_balance++;
8507 
8508 	ilb_cpu = find_new_ilb();
8509 
8510 	if (ilb_cpu >= nr_cpu_ids)
8511 		return;
8512 
8513 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
8514 		return;
8515 	/*
8516 	 * Use smp_send_reschedule() instead of resched_cpu().
8517 	 * This way we generate a sched IPI on the target cpu which
8518 	 * is idle. And the softirq performing nohz idle load balance
8519 	 * will be run before returning from the IPI.
8520 	 */
8521 	smp_send_reschedule(ilb_cpu);
8522 	return;
8523 }
8524 
8525 void nohz_balance_exit_idle(unsigned int cpu)
8526 {
8527 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
8528 		/*
8529 		 * Completely isolated CPUs don't ever set, so we must test.
8530 		 */
8531 		if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
8532 			cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
8533 			atomic_dec(&nohz.nr_cpus);
8534 		}
8535 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8536 	}
8537 }
8538 
8539 static inline void set_cpu_sd_state_busy(void)
8540 {
8541 	struct sched_domain *sd;
8542 	int cpu = smp_processor_id();
8543 
8544 	rcu_read_lock();
8545 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
8546 
8547 	if (!sd || !sd->nohz_idle)
8548 		goto unlock;
8549 	sd->nohz_idle = 0;
8550 
8551 	atomic_inc(&sd->shared->nr_busy_cpus);
8552 unlock:
8553 	rcu_read_unlock();
8554 }
8555 
8556 void set_cpu_sd_state_idle(void)
8557 {
8558 	struct sched_domain *sd;
8559 	int cpu = smp_processor_id();
8560 
8561 	rcu_read_lock();
8562 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
8563 
8564 	if (!sd || sd->nohz_idle)
8565 		goto unlock;
8566 	sd->nohz_idle = 1;
8567 
8568 	atomic_dec(&sd->shared->nr_busy_cpus);
8569 unlock:
8570 	rcu_read_unlock();
8571 }
8572 
8573 /*
8574  * This routine will record that the cpu is going idle with tick stopped.
8575  * This info will be used in performing idle load balancing in the future.
8576  */
8577 void nohz_balance_enter_idle(int cpu)
8578 {
8579 	/*
8580 	 * If this cpu is going down, then nothing needs to be done.
8581 	 */
8582 	if (!cpu_active(cpu))
8583 		return;
8584 
8585 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
8586 		return;
8587 
8588 	/*
8589 	 * If we're a completely isolated CPU, we don't play.
8590 	 */
8591 	if (on_null_domain(cpu_rq(cpu)))
8592 		return;
8593 
8594 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
8595 	atomic_inc(&nohz.nr_cpus);
8596 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8597 }
8598 #endif
8599 
8600 static DEFINE_SPINLOCK(balancing);
8601 
8602 /*
8603  * Scale the max load_balance interval with the number of CPUs in the system.
8604  * This trades load-balance latency on larger machines for less cross talk.
8605  */
8606 void update_max_interval(void)
8607 {
8608 	max_load_balance_interval = HZ*num_online_cpus()/10;
8609 }
8610 
8611 /*
8612  * It checks each scheduling domain to see if it is due to be balanced,
8613  * and initiates a balancing operation if so.
8614  *
8615  * Balancing parameters are set up in init_sched_domains.
8616  */
8617 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
8618 {
8619 	int continue_balancing = 1;
8620 	int cpu = rq->cpu;
8621 	unsigned long interval;
8622 	struct sched_domain *sd;
8623 	/* Earliest time when we have to do rebalance again */
8624 	unsigned long next_balance = jiffies + 60*HZ;
8625 	int update_next_balance = 0;
8626 	int need_serialize, need_decay = 0;
8627 	u64 max_cost = 0;
8628 
8629 	update_blocked_averages(cpu);
8630 
8631 	rcu_read_lock();
8632 	for_each_domain(cpu, sd) {
8633 		/*
8634 		 * Decay the newidle max times here because this is a regular
8635 		 * visit to all the domains. Decay ~1% per second.
8636 		 */
8637 		if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8638 			sd->max_newidle_lb_cost =
8639 				(sd->max_newidle_lb_cost * 253) / 256;
8640 			sd->next_decay_max_lb_cost = jiffies + HZ;
8641 			need_decay = 1;
8642 		}
8643 		max_cost += sd->max_newidle_lb_cost;
8644 
8645 		if (!(sd->flags & SD_LOAD_BALANCE))
8646 			continue;
8647 
8648 		/*
8649 		 * Stop the load balance at this level. There is another
8650 		 * CPU in our sched group which is doing load balancing more
8651 		 * actively.
8652 		 */
8653 		if (!continue_balancing) {
8654 			if (need_decay)
8655 				continue;
8656 			break;
8657 		}
8658 
8659 		interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8660 
8661 		need_serialize = sd->flags & SD_SERIALIZE;
8662 		if (need_serialize) {
8663 			if (!spin_trylock(&balancing))
8664 				goto out;
8665 		}
8666 
8667 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
8668 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
8669 				/*
8670 				 * The LBF_DST_PINNED logic could have changed
8671 				 * env->dst_cpu, so we can't know our idle
8672 				 * state even if we migrated tasks. Update it.
8673 				 */
8674 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
8675 			}
8676 			sd->last_balance = jiffies;
8677 			interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8678 		}
8679 		if (need_serialize)
8680 			spin_unlock(&balancing);
8681 out:
8682 		if (time_after(next_balance, sd->last_balance + interval)) {
8683 			next_balance = sd->last_balance + interval;
8684 			update_next_balance = 1;
8685 		}
8686 	}
8687 	if (need_decay) {
8688 		/*
8689 		 * Ensure the rq-wide value also decays but keep it at a
8690 		 * reasonable floor to avoid funnies with rq->avg_idle.
8691 		 */
8692 		rq->max_idle_balance_cost =
8693 			max((u64)sysctl_sched_migration_cost, max_cost);
8694 	}
8695 	rcu_read_unlock();
8696 
8697 	/*
8698 	 * next_balance will be updated only when there is a need.
8699 	 * When the cpu is attached to null domain for ex, it will not be
8700 	 * updated.
8701 	 */
8702 	if (likely(update_next_balance)) {
8703 		rq->next_balance = next_balance;
8704 
8705 #ifdef CONFIG_NO_HZ_COMMON
8706 		/*
8707 		 * If this CPU has been elected to perform the nohz idle
8708 		 * balance. Other idle CPUs have already rebalanced with
8709 		 * nohz_idle_balance() and nohz.next_balance has been
8710 		 * updated accordingly. This CPU is now running the idle load
8711 		 * balance for itself and we need to update the
8712 		 * nohz.next_balance accordingly.
8713 		 */
8714 		if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8715 			nohz.next_balance = rq->next_balance;
8716 #endif
8717 	}
8718 }
8719 
8720 #ifdef CONFIG_NO_HZ_COMMON
8721 /*
8722  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
8723  * rebalancing for all the cpus for whom scheduler ticks are stopped.
8724  */
8725 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
8726 {
8727 	int this_cpu = this_rq->cpu;
8728 	struct rq *rq;
8729 	int balance_cpu;
8730 	/* Earliest time when we have to do rebalance again */
8731 	unsigned long next_balance = jiffies + 60*HZ;
8732 	int update_next_balance = 0;
8733 
8734 	if (idle != CPU_IDLE ||
8735 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
8736 		goto end;
8737 
8738 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8739 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
8740 			continue;
8741 
8742 		/*
8743 		 * If this cpu gets work to do, stop the load balancing
8744 		 * work being done for other cpus. Next load
8745 		 * balancing owner will pick it up.
8746 		 */
8747 		if (need_resched())
8748 			break;
8749 
8750 		rq = cpu_rq(balance_cpu);
8751 
8752 		/*
8753 		 * If time for next balance is due,
8754 		 * do the balance.
8755 		 */
8756 		if (time_after_eq(jiffies, rq->next_balance)) {
8757 			raw_spin_lock_irq(&rq->lock);
8758 			update_rq_clock(rq);
8759 			cpu_load_update_idle(rq);
8760 			raw_spin_unlock_irq(&rq->lock);
8761 			rebalance_domains(rq, CPU_IDLE);
8762 		}
8763 
8764 		if (time_after(next_balance, rq->next_balance)) {
8765 			next_balance = rq->next_balance;
8766 			update_next_balance = 1;
8767 		}
8768 	}
8769 
8770 	/*
8771 	 * next_balance will be updated only when there is a need.
8772 	 * When the CPU is attached to null domain for ex, it will not be
8773 	 * updated.
8774 	 */
8775 	if (likely(update_next_balance))
8776 		nohz.next_balance = next_balance;
8777 end:
8778 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
8779 }
8780 
8781 /*
8782  * Current heuristic for kicking the idle load balancer in the presence
8783  * of an idle cpu in the system.
8784  *   - This rq has more than one task.
8785  *   - This rq has at least one CFS task and the capacity of the CPU is
8786  *     significantly reduced because of RT tasks or IRQs.
8787  *   - At parent of LLC scheduler domain level, this cpu's scheduler group has
8788  *     multiple busy cpu.
8789  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
8790  *     domain span are idle.
8791  */
8792 static inline bool nohz_kick_needed(struct rq *rq)
8793 {
8794 	unsigned long now = jiffies;
8795 	struct sched_domain_shared *sds;
8796 	struct sched_domain *sd;
8797 	int nr_busy, i, cpu = rq->cpu;
8798 	bool kick = false;
8799 
8800 	if (unlikely(rq->idle_balance))
8801 		return false;
8802 
8803        /*
8804 	* We may be recently in ticked or tickless idle mode. At the first
8805 	* busy tick after returning from idle, we will update the busy stats.
8806 	*/
8807 	set_cpu_sd_state_busy();
8808 	nohz_balance_exit_idle(cpu);
8809 
8810 	/*
8811 	 * None are in tickless mode and hence no need for NOHZ idle load
8812 	 * balancing.
8813 	 */
8814 	if (likely(!atomic_read(&nohz.nr_cpus)))
8815 		return false;
8816 
8817 	if (time_before(now, nohz.next_balance))
8818 		return false;
8819 
8820 	if (rq->nr_running >= 2)
8821 		return true;
8822 
8823 	rcu_read_lock();
8824 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
8825 	if (sds) {
8826 		/*
8827 		 * XXX: write a coherent comment on why we do this.
8828 		 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
8829 		 */
8830 		nr_busy = atomic_read(&sds->nr_busy_cpus);
8831 		if (nr_busy > 1) {
8832 			kick = true;
8833 			goto unlock;
8834 		}
8835 
8836 	}
8837 
8838 	sd = rcu_dereference(rq->sd);
8839 	if (sd) {
8840 		if ((rq->cfs.h_nr_running >= 1) &&
8841 				check_cpu_capacity(rq, sd)) {
8842 			kick = true;
8843 			goto unlock;
8844 		}
8845 	}
8846 
8847 	sd = rcu_dereference(per_cpu(sd_asym, cpu));
8848 	if (sd) {
8849 		for_each_cpu(i, sched_domain_span(sd)) {
8850 			if (i == cpu ||
8851 			    !cpumask_test_cpu(i, nohz.idle_cpus_mask))
8852 				continue;
8853 
8854 			if (sched_asym_prefer(i, cpu)) {
8855 				kick = true;
8856 				goto unlock;
8857 			}
8858 		}
8859 	}
8860 unlock:
8861 	rcu_read_unlock();
8862 	return kick;
8863 }
8864 #else
8865 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
8866 #endif
8867 
8868 /*
8869  * run_rebalance_domains is triggered when needed from the scheduler tick.
8870  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
8871  */
8872 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
8873 {
8874 	struct rq *this_rq = this_rq();
8875 	enum cpu_idle_type idle = this_rq->idle_balance ?
8876 						CPU_IDLE : CPU_NOT_IDLE;
8877 
8878 	/*
8879 	 * If this cpu has a pending nohz_balance_kick, then do the
8880 	 * balancing on behalf of the other idle cpus whose ticks are
8881 	 * stopped. Do nohz_idle_balance *before* rebalance_domains to
8882 	 * give the idle cpus a chance to load balance. Else we may
8883 	 * load balance only within the local sched_domain hierarchy
8884 	 * and abort nohz_idle_balance altogether if we pull some load.
8885 	 */
8886 	nohz_idle_balance(this_rq, idle);
8887 	rebalance_domains(this_rq, idle);
8888 }
8889 
8890 /*
8891  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
8892  */
8893 void trigger_load_balance(struct rq *rq)
8894 {
8895 	/* Don't need to rebalance while attached to NULL domain */
8896 	if (unlikely(on_null_domain(rq)))
8897 		return;
8898 
8899 	if (time_after_eq(jiffies, rq->next_balance))
8900 		raise_softirq(SCHED_SOFTIRQ);
8901 #ifdef CONFIG_NO_HZ_COMMON
8902 	if (nohz_kick_needed(rq))
8903 		nohz_balancer_kick();
8904 #endif
8905 }
8906 
8907 static void rq_online_fair(struct rq *rq)
8908 {
8909 	update_sysctl();
8910 
8911 	update_runtime_enabled(rq);
8912 }
8913 
8914 static void rq_offline_fair(struct rq *rq)
8915 {
8916 	update_sysctl();
8917 
8918 	/* Ensure any throttled groups are reachable by pick_next_task */
8919 	unthrottle_offline_cfs_rqs(rq);
8920 }
8921 
8922 #endif /* CONFIG_SMP */
8923 
8924 /*
8925  * scheduler tick hitting a task of our scheduling class:
8926  */
8927 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
8928 {
8929 	struct cfs_rq *cfs_rq;
8930 	struct sched_entity *se = &curr->se;
8931 
8932 	for_each_sched_entity(se) {
8933 		cfs_rq = cfs_rq_of(se);
8934 		entity_tick(cfs_rq, se, queued);
8935 	}
8936 
8937 	if (static_branch_unlikely(&sched_numa_balancing))
8938 		task_tick_numa(rq, curr);
8939 }
8940 
8941 /*
8942  * called on fork with the child task as argument from the parent's context
8943  *  - child not yet on the tasklist
8944  *  - preemption disabled
8945  */
8946 static void task_fork_fair(struct task_struct *p)
8947 {
8948 	struct cfs_rq *cfs_rq;
8949 	struct sched_entity *se = &p->se, *curr;
8950 	struct rq *rq = this_rq();
8951 
8952 	raw_spin_lock(&rq->lock);
8953 	update_rq_clock(rq);
8954 
8955 	cfs_rq = task_cfs_rq(current);
8956 	curr = cfs_rq->curr;
8957 	if (curr) {
8958 		update_curr(cfs_rq);
8959 		se->vruntime = curr->vruntime;
8960 	}
8961 	place_entity(cfs_rq, se, 1);
8962 
8963 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
8964 		/*
8965 		 * Upon rescheduling, sched_class::put_prev_task() will place
8966 		 * 'current' within the tree based on its new key value.
8967 		 */
8968 		swap(curr->vruntime, se->vruntime);
8969 		resched_curr(rq);
8970 	}
8971 
8972 	se->vruntime -= cfs_rq->min_vruntime;
8973 	raw_spin_unlock(&rq->lock);
8974 }
8975 
8976 /*
8977  * Priority of the task has changed. Check to see if we preempt
8978  * the current task.
8979  */
8980 static void
8981 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
8982 {
8983 	if (!task_on_rq_queued(p))
8984 		return;
8985 
8986 	/*
8987 	 * Reschedule if we are currently running on this runqueue and
8988 	 * our priority decreased, or if we are not currently running on
8989 	 * this runqueue and our priority is higher than the current's
8990 	 */
8991 	if (rq->curr == p) {
8992 		if (p->prio > oldprio)
8993 			resched_curr(rq);
8994 	} else
8995 		check_preempt_curr(rq, p, 0);
8996 }
8997 
8998 static inline bool vruntime_normalized(struct task_struct *p)
8999 {
9000 	struct sched_entity *se = &p->se;
9001 
9002 	/*
9003 	 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
9004 	 * the dequeue_entity(.flags=0) will already have normalized the
9005 	 * vruntime.
9006 	 */
9007 	if (p->on_rq)
9008 		return true;
9009 
9010 	/*
9011 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
9012 	 * But there are some cases where it has already been normalized:
9013 	 *
9014 	 * - A forked child which is waiting for being woken up by
9015 	 *   wake_up_new_task().
9016 	 * - A task which has been woken up by try_to_wake_up() and
9017 	 *   waiting for actually being woken up by sched_ttwu_pending().
9018 	 */
9019 	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
9020 		return true;
9021 
9022 	return false;
9023 }
9024 
9025 #ifdef CONFIG_FAIR_GROUP_SCHED
9026 /*
9027  * Propagate the changes of the sched_entity across the tg tree to make it
9028  * visible to the root
9029  */
9030 static void propagate_entity_cfs_rq(struct sched_entity *se)
9031 {
9032 	struct cfs_rq *cfs_rq;
9033 
9034 	/* Start to propagate at parent */
9035 	se = se->parent;
9036 
9037 	for_each_sched_entity(se) {
9038 		cfs_rq = cfs_rq_of(se);
9039 
9040 		if (cfs_rq_throttled(cfs_rq))
9041 			break;
9042 
9043 		update_load_avg(se, UPDATE_TG);
9044 	}
9045 }
9046 #else
9047 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
9048 #endif
9049 
9050 static void detach_entity_cfs_rq(struct sched_entity *se)
9051 {
9052 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
9053 
9054 	/* Catch up with the cfs_rq and remove our load when we leave */
9055 	update_load_avg(se, 0);
9056 	detach_entity_load_avg(cfs_rq, se);
9057 	update_tg_load_avg(cfs_rq, false);
9058 	propagate_entity_cfs_rq(se);
9059 }
9060 
9061 static void attach_entity_cfs_rq(struct sched_entity *se)
9062 {
9063 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
9064 
9065 #ifdef CONFIG_FAIR_GROUP_SCHED
9066 	/*
9067 	 * Since the real-depth could have been changed (only FAIR
9068 	 * class maintain depth value), reset depth properly.
9069 	 */
9070 	se->depth = se->parent ? se->parent->depth + 1 : 0;
9071 #endif
9072 
9073 	/* Synchronize entity with its cfs_rq */
9074 	update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
9075 	attach_entity_load_avg(cfs_rq, se);
9076 	update_tg_load_avg(cfs_rq, false);
9077 	propagate_entity_cfs_rq(se);
9078 }
9079 
9080 static void detach_task_cfs_rq(struct task_struct *p)
9081 {
9082 	struct sched_entity *se = &p->se;
9083 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
9084 
9085 	if (!vruntime_normalized(p)) {
9086 		/*
9087 		 * Fix up our vruntime so that the current sleep doesn't
9088 		 * cause 'unlimited' sleep bonus.
9089 		 */
9090 		place_entity(cfs_rq, se, 0);
9091 		se->vruntime -= cfs_rq->min_vruntime;
9092 	}
9093 
9094 	detach_entity_cfs_rq(se);
9095 }
9096 
9097 static void attach_task_cfs_rq(struct task_struct *p)
9098 {
9099 	struct sched_entity *se = &p->se;
9100 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
9101 
9102 	attach_entity_cfs_rq(se);
9103 
9104 	if (!vruntime_normalized(p))
9105 		se->vruntime += cfs_rq->min_vruntime;
9106 }
9107 
9108 static void switched_from_fair(struct rq *rq, struct task_struct *p)
9109 {
9110 	detach_task_cfs_rq(p);
9111 }
9112 
9113 static void switched_to_fair(struct rq *rq, struct task_struct *p)
9114 {
9115 	attach_task_cfs_rq(p);
9116 
9117 	if (task_on_rq_queued(p)) {
9118 		/*
9119 		 * We were most likely switched from sched_rt, so
9120 		 * kick off the schedule if running, otherwise just see
9121 		 * if we can still preempt the current task.
9122 		 */
9123 		if (rq->curr == p)
9124 			resched_curr(rq);
9125 		else
9126 			check_preempt_curr(rq, p, 0);
9127 	}
9128 }
9129 
9130 /* Account for a task changing its policy or group.
9131  *
9132  * This routine is mostly called to set cfs_rq->curr field when a task
9133  * migrates between groups/classes.
9134  */
9135 static void set_curr_task_fair(struct rq *rq)
9136 {
9137 	struct sched_entity *se = &rq->curr->se;
9138 
9139 	for_each_sched_entity(se) {
9140 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
9141 
9142 		set_next_entity(cfs_rq, se);
9143 		/* ensure bandwidth has been allocated on our new cfs_rq */
9144 		account_cfs_rq_runtime(cfs_rq, 0);
9145 	}
9146 }
9147 
9148 void init_cfs_rq(struct cfs_rq *cfs_rq)
9149 {
9150 	cfs_rq->tasks_timeline = RB_ROOT;
9151 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
9152 #ifndef CONFIG_64BIT
9153 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
9154 #endif
9155 #ifdef CONFIG_SMP
9156 #ifdef CONFIG_FAIR_GROUP_SCHED
9157 	cfs_rq->propagate_avg = 0;
9158 #endif
9159 	atomic_long_set(&cfs_rq->removed_load_avg, 0);
9160 	atomic_long_set(&cfs_rq->removed_util_avg, 0);
9161 #endif
9162 }
9163 
9164 #ifdef CONFIG_FAIR_GROUP_SCHED
9165 static void task_set_group_fair(struct task_struct *p)
9166 {
9167 	struct sched_entity *se = &p->se;
9168 
9169 	set_task_rq(p, task_cpu(p));
9170 	se->depth = se->parent ? se->parent->depth + 1 : 0;
9171 }
9172 
9173 static void task_move_group_fair(struct task_struct *p)
9174 {
9175 	detach_task_cfs_rq(p);
9176 	set_task_rq(p, task_cpu(p));
9177 
9178 #ifdef CONFIG_SMP
9179 	/* Tell se's cfs_rq has been changed -- migrated */
9180 	p->se.avg.last_update_time = 0;
9181 #endif
9182 	attach_task_cfs_rq(p);
9183 }
9184 
9185 static void task_change_group_fair(struct task_struct *p, int type)
9186 {
9187 	switch (type) {
9188 	case TASK_SET_GROUP:
9189 		task_set_group_fair(p);
9190 		break;
9191 
9192 	case TASK_MOVE_GROUP:
9193 		task_move_group_fair(p);
9194 		break;
9195 	}
9196 }
9197 
9198 void free_fair_sched_group(struct task_group *tg)
9199 {
9200 	int i;
9201 
9202 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
9203 
9204 	for_each_possible_cpu(i) {
9205 		if (tg->cfs_rq)
9206 			kfree(tg->cfs_rq[i]);
9207 		if (tg->se)
9208 			kfree(tg->se[i]);
9209 	}
9210 
9211 	kfree(tg->cfs_rq);
9212 	kfree(tg->se);
9213 }
9214 
9215 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9216 {
9217 	struct sched_entity *se;
9218 	struct cfs_rq *cfs_rq;
9219 	int i;
9220 
9221 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
9222 	if (!tg->cfs_rq)
9223 		goto err;
9224 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
9225 	if (!tg->se)
9226 		goto err;
9227 
9228 	tg->shares = NICE_0_LOAD;
9229 
9230 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
9231 
9232 	for_each_possible_cpu(i) {
9233 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
9234 				      GFP_KERNEL, cpu_to_node(i));
9235 		if (!cfs_rq)
9236 			goto err;
9237 
9238 		se = kzalloc_node(sizeof(struct sched_entity),
9239 				  GFP_KERNEL, cpu_to_node(i));
9240 		if (!se)
9241 			goto err_free_rq;
9242 
9243 		init_cfs_rq(cfs_rq);
9244 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
9245 		init_entity_runnable_average(se);
9246 	}
9247 
9248 	return 1;
9249 
9250 err_free_rq:
9251 	kfree(cfs_rq);
9252 err:
9253 	return 0;
9254 }
9255 
9256 void online_fair_sched_group(struct task_group *tg)
9257 {
9258 	struct sched_entity *se;
9259 	struct rq *rq;
9260 	int i;
9261 
9262 	for_each_possible_cpu(i) {
9263 		rq = cpu_rq(i);
9264 		se = tg->se[i];
9265 
9266 		raw_spin_lock_irq(&rq->lock);
9267 		attach_entity_cfs_rq(se);
9268 		sync_throttle(tg, i);
9269 		raw_spin_unlock_irq(&rq->lock);
9270 	}
9271 }
9272 
9273 void unregister_fair_sched_group(struct task_group *tg)
9274 {
9275 	unsigned long flags;
9276 	struct rq *rq;
9277 	int cpu;
9278 
9279 	for_each_possible_cpu(cpu) {
9280 		if (tg->se[cpu])
9281 			remove_entity_load_avg(tg->se[cpu]);
9282 
9283 		/*
9284 		 * Only empty task groups can be destroyed; so we can speculatively
9285 		 * check on_list without danger of it being re-added.
9286 		 */
9287 		if (!tg->cfs_rq[cpu]->on_list)
9288 			continue;
9289 
9290 		rq = cpu_rq(cpu);
9291 
9292 		raw_spin_lock_irqsave(&rq->lock, flags);
9293 		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
9294 		raw_spin_unlock_irqrestore(&rq->lock, flags);
9295 	}
9296 }
9297 
9298 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
9299 			struct sched_entity *se, int cpu,
9300 			struct sched_entity *parent)
9301 {
9302 	struct rq *rq = cpu_rq(cpu);
9303 
9304 	cfs_rq->tg = tg;
9305 	cfs_rq->rq = rq;
9306 	init_cfs_rq_runtime(cfs_rq);
9307 
9308 	tg->cfs_rq[cpu] = cfs_rq;
9309 	tg->se[cpu] = se;
9310 
9311 	/* se could be NULL for root_task_group */
9312 	if (!se)
9313 		return;
9314 
9315 	if (!parent) {
9316 		se->cfs_rq = &rq->cfs;
9317 		se->depth = 0;
9318 	} else {
9319 		se->cfs_rq = parent->my_q;
9320 		se->depth = parent->depth + 1;
9321 	}
9322 
9323 	se->my_q = cfs_rq;
9324 	/* guarantee group entities always have weight */
9325 	update_load_set(&se->load, NICE_0_LOAD);
9326 	se->parent = parent;
9327 }
9328 
9329 static DEFINE_MUTEX(shares_mutex);
9330 
9331 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
9332 {
9333 	int i;
9334 	unsigned long flags;
9335 
9336 	/*
9337 	 * We can't change the weight of the root cgroup.
9338 	 */
9339 	if (!tg->se[0])
9340 		return -EINVAL;
9341 
9342 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
9343 
9344 	mutex_lock(&shares_mutex);
9345 	if (tg->shares == shares)
9346 		goto done;
9347 
9348 	tg->shares = shares;
9349 	for_each_possible_cpu(i) {
9350 		struct rq *rq = cpu_rq(i);
9351 		struct sched_entity *se;
9352 
9353 		se = tg->se[i];
9354 		/* Propagate contribution to hierarchy */
9355 		raw_spin_lock_irqsave(&rq->lock, flags);
9356 
9357 		/* Possible calls to update_curr() need rq clock */
9358 		update_rq_clock(rq);
9359 		for_each_sched_entity(se)
9360 			update_cfs_shares(group_cfs_rq(se));
9361 		raw_spin_unlock_irqrestore(&rq->lock, flags);
9362 	}
9363 
9364 done:
9365 	mutex_unlock(&shares_mutex);
9366 	return 0;
9367 }
9368 #else /* CONFIG_FAIR_GROUP_SCHED */
9369 
9370 void free_fair_sched_group(struct task_group *tg) { }
9371 
9372 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9373 {
9374 	return 1;
9375 }
9376 
9377 void online_fair_sched_group(struct task_group *tg) { }
9378 
9379 void unregister_fair_sched_group(struct task_group *tg) { }
9380 
9381 #endif /* CONFIG_FAIR_GROUP_SCHED */
9382 
9383 
9384 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
9385 {
9386 	struct sched_entity *se = &task->se;
9387 	unsigned int rr_interval = 0;
9388 
9389 	/*
9390 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
9391 	 * idle runqueue:
9392 	 */
9393 	if (rq->cfs.load.weight)
9394 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
9395 
9396 	return rr_interval;
9397 }
9398 
9399 /*
9400  * All the scheduling class methods:
9401  */
9402 const struct sched_class fair_sched_class = {
9403 	.next			= &idle_sched_class,
9404 	.enqueue_task		= enqueue_task_fair,
9405 	.dequeue_task		= dequeue_task_fair,
9406 	.yield_task		= yield_task_fair,
9407 	.yield_to_task		= yield_to_task_fair,
9408 
9409 	.check_preempt_curr	= check_preempt_wakeup,
9410 
9411 	.pick_next_task		= pick_next_task_fair,
9412 	.put_prev_task		= put_prev_task_fair,
9413 
9414 #ifdef CONFIG_SMP
9415 	.select_task_rq		= select_task_rq_fair,
9416 	.migrate_task_rq	= migrate_task_rq_fair,
9417 
9418 	.rq_online		= rq_online_fair,
9419 	.rq_offline		= rq_offline_fair,
9420 
9421 	.task_dead		= task_dead_fair,
9422 	.set_cpus_allowed	= set_cpus_allowed_common,
9423 #endif
9424 
9425 	.set_curr_task          = set_curr_task_fair,
9426 	.task_tick		= task_tick_fair,
9427 	.task_fork		= task_fork_fair,
9428 
9429 	.prio_changed		= prio_changed_fair,
9430 	.switched_from		= switched_from_fair,
9431 	.switched_to		= switched_to_fair,
9432 
9433 	.get_rr_interval	= get_rr_interval_fair,
9434 
9435 	.update_curr		= update_curr_fair,
9436 
9437 #ifdef CONFIG_FAIR_GROUP_SCHED
9438 	.task_change_group	= task_change_group_fair,
9439 #endif
9440 };
9441 
9442 #ifdef CONFIG_SCHED_DEBUG
9443 void print_cfs_stats(struct seq_file *m, int cpu)
9444 {
9445 	struct cfs_rq *cfs_rq;
9446 
9447 	rcu_read_lock();
9448 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
9449 		print_cfs_rq(m, cpu, cfs_rq);
9450 	rcu_read_unlock();
9451 }
9452 
9453 #ifdef CONFIG_NUMA_BALANCING
9454 void show_numa_stats(struct task_struct *p, struct seq_file *m)
9455 {
9456 	int node;
9457 	unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
9458 
9459 	for_each_online_node(node) {
9460 		if (p->numa_faults) {
9461 			tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
9462 			tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
9463 		}
9464 		if (p->numa_group) {
9465 			gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
9466 			gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
9467 		}
9468 		print_numa_stats(m, node, tsf, tpf, gsf, gpf);
9469 	}
9470 }
9471 #endif /* CONFIG_NUMA_BALANCING */
9472 #endif /* CONFIG_SCHED_DEBUG */
9473 
9474 __init void init_sched_fair_class(void)
9475 {
9476 #ifdef CONFIG_SMP
9477 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
9478 
9479 #ifdef CONFIG_NO_HZ_COMMON
9480 	nohz.next_balance = jiffies;
9481 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
9482 #endif
9483 #endif /* SMP */
9484 
9485 }
9486