xref: /openbmc/linux/kernel/sched/fair.c (revision bc000245)
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22 
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 #include <linux/mempolicy.h>
30 #include <linux/migrate.h>
31 #include <linux/task_work.h>
32 
33 #include <trace/events/sched.h>
34 
35 #include "sched.h"
36 
37 /*
38  * Targeted preemption latency for CPU-bound tasks:
39  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
40  *
41  * NOTE: this latency value is not the same as the concept of
42  * 'timeslice length' - timeslices in CFS are of variable length
43  * and have no persistent notion like in traditional, time-slice
44  * based scheduling concepts.
45  *
46  * (to see the precise effective timeslice length of your workload,
47  *  run vmstat and monitor the context-switches (cs) field)
48  */
49 unsigned int sysctl_sched_latency = 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
51 
52 /*
53  * The initial- and re-scaling of tunables is configurable
54  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55  *
56  * Options are:
57  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60  */
61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 	= SCHED_TUNABLESCALING_LOG;
63 
64 /*
65  * Minimal preemption granularity for CPU-bound tasks:
66  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
67  */
68 unsigned int sysctl_sched_min_granularity = 750000ULL;
69 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
70 
71 /*
72  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73  */
74 static unsigned int sched_nr_latency = 8;
75 
76 /*
77  * After fork, child runs first. If set to 0 (default) then
78  * parent will (try to) run first.
79  */
80 unsigned int sysctl_sched_child_runs_first __read_mostly;
81 
82 /*
83  * SCHED_OTHER wake-up granularity.
84  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
85  *
86  * This option delays the preemption effects of decoupled workloads
87  * and reduces their over-scheduling. Synchronous workloads will still
88  * have immediate wakeup/sleep latencies.
89  */
90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
92 
93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94 
95 /*
96  * The exponential sliding  window over which load is averaged for shares
97  * distribution.
98  * (default: 10msec)
99  */
100 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101 
102 #ifdef CONFIG_CFS_BANDWIDTH
103 /*
104  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105  * each time a cfs_rq requests quota.
106  *
107  * Note: in the case that the slice exceeds the runtime remaining (either due
108  * to consumption or the quota being specified to be smaller than the slice)
109  * we will always only issue the remaining available time.
110  *
111  * default: 5 msec, units: microseconds
112   */
113 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114 #endif
115 
116 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117 {
118 	lw->weight += inc;
119 	lw->inv_weight = 0;
120 }
121 
122 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123 {
124 	lw->weight -= dec;
125 	lw->inv_weight = 0;
126 }
127 
128 static inline void update_load_set(struct load_weight *lw, unsigned long w)
129 {
130 	lw->weight = w;
131 	lw->inv_weight = 0;
132 }
133 
134 /*
135  * Increase the granularity value when there are more CPUs,
136  * because with more CPUs the 'effective latency' as visible
137  * to users decreases. But the relationship is not linear,
138  * so pick a second-best guess by going with the log2 of the
139  * number of CPUs.
140  *
141  * This idea comes from the SD scheduler of Con Kolivas:
142  */
143 static int get_update_sysctl_factor(void)
144 {
145 	unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 	unsigned int factor;
147 
148 	switch (sysctl_sched_tunable_scaling) {
149 	case SCHED_TUNABLESCALING_NONE:
150 		factor = 1;
151 		break;
152 	case SCHED_TUNABLESCALING_LINEAR:
153 		factor = cpus;
154 		break;
155 	case SCHED_TUNABLESCALING_LOG:
156 	default:
157 		factor = 1 + ilog2(cpus);
158 		break;
159 	}
160 
161 	return factor;
162 }
163 
164 static void update_sysctl(void)
165 {
166 	unsigned int factor = get_update_sysctl_factor();
167 
168 #define SET_SYSCTL(name) \
169 	(sysctl_##name = (factor) * normalized_sysctl_##name)
170 	SET_SYSCTL(sched_min_granularity);
171 	SET_SYSCTL(sched_latency);
172 	SET_SYSCTL(sched_wakeup_granularity);
173 #undef SET_SYSCTL
174 }
175 
176 void sched_init_granularity(void)
177 {
178 	update_sysctl();
179 }
180 
181 #if BITS_PER_LONG == 32
182 # define WMULT_CONST	(~0UL)
183 #else
184 # define WMULT_CONST	(1UL << 32)
185 #endif
186 
187 #define WMULT_SHIFT	32
188 
189 /*
190  * Shift right and round:
191  */
192 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193 
194 /*
195  * delta *= weight / lw
196  */
197 static unsigned long
198 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 		struct load_weight *lw)
200 {
201 	u64 tmp;
202 
203 	/*
204 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 	 * 2^SCHED_LOAD_RESOLUTION.
207 	 */
208 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 		tmp = (u64)delta_exec * scale_load_down(weight);
210 	else
211 		tmp = (u64)delta_exec;
212 
213 	if (!lw->inv_weight) {
214 		unsigned long w = scale_load_down(lw->weight);
215 
216 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 			lw->inv_weight = 1;
218 		else if (unlikely(!w))
219 			lw->inv_weight = WMULT_CONST;
220 		else
221 			lw->inv_weight = WMULT_CONST / w;
222 	}
223 
224 	/*
225 	 * Check whether we'd overflow the 64-bit multiplication:
226 	 */
227 	if (unlikely(tmp > WMULT_CONST))
228 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 			WMULT_SHIFT/2);
230 	else
231 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232 
233 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234 }
235 
236 
237 const struct sched_class fair_sched_class;
238 
239 /**************************************************************
240  * CFS operations on generic schedulable entities:
241  */
242 
243 #ifdef CONFIG_FAIR_GROUP_SCHED
244 
245 /* cpu runqueue to which this cfs_rq is attached */
246 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247 {
248 	return cfs_rq->rq;
249 }
250 
251 /* An entity is a task if it doesn't "own" a runqueue */
252 #define entity_is_task(se)	(!se->my_q)
253 
254 static inline struct task_struct *task_of(struct sched_entity *se)
255 {
256 #ifdef CONFIG_SCHED_DEBUG
257 	WARN_ON_ONCE(!entity_is_task(se));
258 #endif
259 	return container_of(se, struct task_struct, se);
260 }
261 
262 /* Walk up scheduling entities hierarchy */
263 #define for_each_sched_entity(se) \
264 		for (; se; se = se->parent)
265 
266 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267 {
268 	return p->se.cfs_rq;
269 }
270 
271 /* runqueue on which this entity is (to be) queued */
272 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273 {
274 	return se->cfs_rq;
275 }
276 
277 /* runqueue "owned" by this group */
278 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279 {
280 	return grp->my_q;
281 }
282 
283 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 				       int force_update);
285 
286 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287 {
288 	if (!cfs_rq->on_list) {
289 		/*
290 		 * Ensure we either appear before our parent (if already
291 		 * enqueued) or force our parent to appear after us when it is
292 		 * enqueued.  The fact that we always enqueue bottom-up
293 		 * reduces this to two cases.
294 		 */
295 		if (cfs_rq->tg->parent &&
296 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
299 		} else {
300 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
302 		}
303 
304 		cfs_rq->on_list = 1;
305 		/* We should have no load, but we need to update last_decay. */
306 		update_cfs_rq_blocked_load(cfs_rq, 0);
307 	}
308 }
309 
310 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311 {
312 	if (cfs_rq->on_list) {
313 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 		cfs_rq->on_list = 0;
315 	}
316 }
317 
318 /* Iterate thr' all leaf cfs_rq's on a runqueue */
319 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321 
322 /* Do the two (enqueued) entities belong to the same group ? */
323 static inline int
324 is_same_group(struct sched_entity *se, struct sched_entity *pse)
325 {
326 	if (se->cfs_rq == pse->cfs_rq)
327 		return 1;
328 
329 	return 0;
330 }
331 
332 static inline struct sched_entity *parent_entity(struct sched_entity *se)
333 {
334 	return se->parent;
335 }
336 
337 /* return depth at which a sched entity is present in the hierarchy */
338 static inline int depth_se(struct sched_entity *se)
339 {
340 	int depth = 0;
341 
342 	for_each_sched_entity(se)
343 		depth++;
344 
345 	return depth;
346 }
347 
348 static void
349 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350 {
351 	int se_depth, pse_depth;
352 
353 	/*
354 	 * preemption test can be made between sibling entities who are in the
355 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 	 * both tasks until we find their ancestors who are siblings of common
357 	 * parent.
358 	 */
359 
360 	/* First walk up until both entities are at same depth */
361 	se_depth = depth_se(*se);
362 	pse_depth = depth_se(*pse);
363 
364 	while (se_depth > pse_depth) {
365 		se_depth--;
366 		*se = parent_entity(*se);
367 	}
368 
369 	while (pse_depth > se_depth) {
370 		pse_depth--;
371 		*pse = parent_entity(*pse);
372 	}
373 
374 	while (!is_same_group(*se, *pse)) {
375 		*se = parent_entity(*se);
376 		*pse = parent_entity(*pse);
377 	}
378 }
379 
380 #else	/* !CONFIG_FAIR_GROUP_SCHED */
381 
382 static inline struct task_struct *task_of(struct sched_entity *se)
383 {
384 	return container_of(se, struct task_struct, se);
385 }
386 
387 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388 {
389 	return container_of(cfs_rq, struct rq, cfs);
390 }
391 
392 #define entity_is_task(se)	1
393 
394 #define for_each_sched_entity(se) \
395 		for (; se; se = NULL)
396 
397 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
398 {
399 	return &task_rq(p)->cfs;
400 }
401 
402 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403 {
404 	struct task_struct *p = task_of(se);
405 	struct rq *rq = task_rq(p);
406 
407 	return &rq->cfs;
408 }
409 
410 /* runqueue "owned" by this group */
411 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412 {
413 	return NULL;
414 }
415 
416 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417 {
418 }
419 
420 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421 {
422 }
423 
424 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426 
427 static inline int
428 is_same_group(struct sched_entity *se, struct sched_entity *pse)
429 {
430 	return 1;
431 }
432 
433 static inline struct sched_entity *parent_entity(struct sched_entity *se)
434 {
435 	return NULL;
436 }
437 
438 static inline void
439 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440 {
441 }
442 
443 #endif	/* CONFIG_FAIR_GROUP_SCHED */
444 
445 static __always_inline
446 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
447 
448 /**************************************************************
449  * Scheduling class tree data structure manipulation methods:
450  */
451 
452 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
453 {
454 	s64 delta = (s64)(vruntime - max_vruntime);
455 	if (delta > 0)
456 		max_vruntime = vruntime;
457 
458 	return max_vruntime;
459 }
460 
461 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
462 {
463 	s64 delta = (s64)(vruntime - min_vruntime);
464 	if (delta < 0)
465 		min_vruntime = vruntime;
466 
467 	return min_vruntime;
468 }
469 
470 static inline int entity_before(struct sched_entity *a,
471 				struct sched_entity *b)
472 {
473 	return (s64)(a->vruntime - b->vruntime) < 0;
474 }
475 
476 static void update_min_vruntime(struct cfs_rq *cfs_rq)
477 {
478 	u64 vruntime = cfs_rq->min_vruntime;
479 
480 	if (cfs_rq->curr)
481 		vruntime = cfs_rq->curr->vruntime;
482 
483 	if (cfs_rq->rb_leftmost) {
484 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 						   struct sched_entity,
486 						   run_node);
487 
488 		if (!cfs_rq->curr)
489 			vruntime = se->vruntime;
490 		else
491 			vruntime = min_vruntime(vruntime, se->vruntime);
492 	}
493 
494 	/* ensure we never gain time by being placed backwards. */
495 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
496 #ifndef CONFIG_64BIT
497 	smp_wmb();
498 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499 #endif
500 }
501 
502 /*
503  * Enqueue an entity into the rb-tree:
504  */
505 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
506 {
507 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 	struct rb_node *parent = NULL;
509 	struct sched_entity *entry;
510 	int leftmost = 1;
511 
512 	/*
513 	 * Find the right place in the rbtree:
514 	 */
515 	while (*link) {
516 		parent = *link;
517 		entry = rb_entry(parent, struct sched_entity, run_node);
518 		/*
519 		 * We dont care about collisions. Nodes with
520 		 * the same key stay together.
521 		 */
522 		if (entity_before(se, entry)) {
523 			link = &parent->rb_left;
524 		} else {
525 			link = &parent->rb_right;
526 			leftmost = 0;
527 		}
528 	}
529 
530 	/*
531 	 * Maintain a cache of leftmost tree entries (it is frequently
532 	 * used):
533 	 */
534 	if (leftmost)
535 		cfs_rq->rb_leftmost = &se->run_node;
536 
537 	rb_link_node(&se->run_node, parent, link);
538 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
539 }
540 
541 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
542 {
543 	if (cfs_rq->rb_leftmost == &se->run_node) {
544 		struct rb_node *next_node;
545 
546 		next_node = rb_next(&se->run_node);
547 		cfs_rq->rb_leftmost = next_node;
548 	}
549 
550 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
551 }
552 
553 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
554 {
555 	struct rb_node *left = cfs_rq->rb_leftmost;
556 
557 	if (!left)
558 		return NULL;
559 
560 	return rb_entry(left, struct sched_entity, run_node);
561 }
562 
563 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564 {
565 	struct rb_node *next = rb_next(&se->run_node);
566 
567 	if (!next)
568 		return NULL;
569 
570 	return rb_entry(next, struct sched_entity, run_node);
571 }
572 
573 #ifdef CONFIG_SCHED_DEBUG
574 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
575 {
576 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
577 
578 	if (!last)
579 		return NULL;
580 
581 	return rb_entry(last, struct sched_entity, run_node);
582 }
583 
584 /**************************************************************
585  * Scheduling class statistics methods:
586  */
587 
588 int sched_proc_update_handler(struct ctl_table *table, int write,
589 		void __user *buffer, size_t *lenp,
590 		loff_t *ppos)
591 {
592 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
593 	int factor = get_update_sysctl_factor();
594 
595 	if (ret || !write)
596 		return ret;
597 
598 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 					sysctl_sched_min_granularity);
600 
601 #define WRT_SYSCTL(name) \
602 	(normalized_sysctl_##name = sysctl_##name / (factor))
603 	WRT_SYSCTL(sched_min_granularity);
604 	WRT_SYSCTL(sched_latency);
605 	WRT_SYSCTL(sched_wakeup_granularity);
606 #undef WRT_SYSCTL
607 
608 	return 0;
609 }
610 #endif
611 
612 /*
613  * delta /= w
614  */
615 static inline unsigned long
616 calc_delta_fair(unsigned long delta, struct sched_entity *se)
617 {
618 	if (unlikely(se->load.weight != NICE_0_LOAD))
619 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
620 
621 	return delta;
622 }
623 
624 /*
625  * The idea is to set a period in which each task runs once.
626  *
627  * When there are too many tasks (sched_nr_latency) we have to stretch
628  * this period because otherwise the slices get too small.
629  *
630  * p = (nr <= nl) ? l : l*nr/nl
631  */
632 static u64 __sched_period(unsigned long nr_running)
633 {
634 	u64 period = sysctl_sched_latency;
635 	unsigned long nr_latency = sched_nr_latency;
636 
637 	if (unlikely(nr_running > nr_latency)) {
638 		period = sysctl_sched_min_granularity;
639 		period *= nr_running;
640 	}
641 
642 	return period;
643 }
644 
645 /*
646  * We calculate the wall-time slice from the period by taking a part
647  * proportional to the weight.
648  *
649  * s = p*P[w/rw]
650  */
651 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
652 {
653 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
654 
655 	for_each_sched_entity(se) {
656 		struct load_weight *load;
657 		struct load_weight lw;
658 
659 		cfs_rq = cfs_rq_of(se);
660 		load = &cfs_rq->load;
661 
662 		if (unlikely(!se->on_rq)) {
663 			lw = cfs_rq->load;
664 
665 			update_load_add(&lw, se->load.weight);
666 			load = &lw;
667 		}
668 		slice = calc_delta_mine(slice, se->load.weight, load);
669 	}
670 	return slice;
671 }
672 
673 /*
674  * We calculate the vruntime slice of a to-be-inserted task.
675  *
676  * vs = s/w
677  */
678 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
679 {
680 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
681 }
682 
683 #ifdef CONFIG_SMP
684 static unsigned long task_h_load(struct task_struct *p);
685 
686 static inline void __update_task_entity_contrib(struct sched_entity *se);
687 
688 /* Give new task start runnable values to heavy its load in infant time */
689 void init_task_runnable_average(struct task_struct *p)
690 {
691 	u32 slice;
692 
693 	p->se.avg.decay_count = 0;
694 	slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
695 	p->se.avg.runnable_avg_sum = slice;
696 	p->se.avg.runnable_avg_period = slice;
697 	__update_task_entity_contrib(&p->se);
698 }
699 #else
700 void init_task_runnable_average(struct task_struct *p)
701 {
702 }
703 #endif
704 
705 /*
706  * Update the current task's runtime statistics. Skip current tasks that
707  * are not in our scheduling class.
708  */
709 static inline void
710 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 	      unsigned long delta_exec)
712 {
713 	unsigned long delta_exec_weighted;
714 
715 	schedstat_set(curr->statistics.exec_max,
716 		      max((u64)delta_exec, curr->statistics.exec_max));
717 
718 	curr->sum_exec_runtime += delta_exec;
719 	schedstat_add(cfs_rq, exec_clock, delta_exec);
720 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
721 
722 	curr->vruntime += delta_exec_weighted;
723 	update_min_vruntime(cfs_rq);
724 }
725 
726 static void update_curr(struct cfs_rq *cfs_rq)
727 {
728 	struct sched_entity *curr = cfs_rq->curr;
729 	u64 now = rq_clock_task(rq_of(cfs_rq));
730 	unsigned long delta_exec;
731 
732 	if (unlikely(!curr))
733 		return;
734 
735 	/*
736 	 * Get the amount of time the current task was running
737 	 * since the last time we changed load (this cannot
738 	 * overflow on 32 bits):
739 	 */
740 	delta_exec = (unsigned long)(now - curr->exec_start);
741 	if (!delta_exec)
742 		return;
743 
744 	__update_curr(cfs_rq, curr, delta_exec);
745 	curr->exec_start = now;
746 
747 	if (entity_is_task(curr)) {
748 		struct task_struct *curtask = task_of(curr);
749 
750 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
751 		cpuacct_charge(curtask, delta_exec);
752 		account_group_exec_runtime(curtask, delta_exec);
753 	}
754 
755 	account_cfs_rq_runtime(cfs_rq, delta_exec);
756 }
757 
758 static inline void
759 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
760 {
761 	schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
762 }
763 
764 /*
765  * Task is being enqueued - update stats:
766  */
767 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
768 {
769 	/*
770 	 * Are we enqueueing a waiting task? (for current tasks
771 	 * a dequeue/enqueue event is a NOP)
772 	 */
773 	if (se != cfs_rq->curr)
774 		update_stats_wait_start(cfs_rq, se);
775 }
776 
777 static void
778 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
779 {
780 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
781 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
782 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
783 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
784 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
785 #ifdef CONFIG_SCHEDSTATS
786 	if (entity_is_task(se)) {
787 		trace_sched_stat_wait(task_of(se),
788 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
789 	}
790 #endif
791 	schedstat_set(se->statistics.wait_start, 0);
792 }
793 
794 static inline void
795 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
796 {
797 	/*
798 	 * Mark the end of the wait period if dequeueing a
799 	 * waiting task:
800 	 */
801 	if (se != cfs_rq->curr)
802 		update_stats_wait_end(cfs_rq, se);
803 }
804 
805 /*
806  * We are picking a new current task - update its stats:
807  */
808 static inline void
809 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
810 {
811 	/*
812 	 * We are starting a new run period:
813 	 */
814 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
815 }
816 
817 /**************************************************
818  * Scheduling class queueing methods:
819  */
820 
821 #ifdef CONFIG_NUMA_BALANCING
822 /*
823  * Approximate time to scan a full NUMA task in ms. The task scan period is
824  * calculated based on the tasks virtual memory size and
825  * numa_balancing_scan_size.
826  */
827 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
828 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
829 
830 /* Portion of address space to scan in MB */
831 unsigned int sysctl_numa_balancing_scan_size = 256;
832 
833 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
834 unsigned int sysctl_numa_balancing_scan_delay = 1000;
835 
836 /*
837  * After skipping a page migration on a shared page, skip N more numa page
838  * migrations unconditionally. This reduces the number of NUMA migrations
839  * in shared memory workloads, and has the effect of pulling tasks towards
840  * where their memory lives, over pulling the memory towards the task.
841  */
842 unsigned int sysctl_numa_balancing_migrate_deferred = 16;
843 
844 static unsigned int task_nr_scan_windows(struct task_struct *p)
845 {
846 	unsigned long rss = 0;
847 	unsigned long nr_scan_pages;
848 
849 	/*
850 	 * Calculations based on RSS as non-present and empty pages are skipped
851 	 * by the PTE scanner and NUMA hinting faults should be trapped based
852 	 * on resident pages
853 	 */
854 	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
855 	rss = get_mm_rss(p->mm);
856 	if (!rss)
857 		rss = nr_scan_pages;
858 
859 	rss = round_up(rss, nr_scan_pages);
860 	return rss / nr_scan_pages;
861 }
862 
863 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
864 #define MAX_SCAN_WINDOW 2560
865 
866 static unsigned int task_scan_min(struct task_struct *p)
867 {
868 	unsigned int scan, floor;
869 	unsigned int windows = 1;
870 
871 	if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
872 		windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
873 	floor = 1000 / windows;
874 
875 	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
876 	return max_t(unsigned int, floor, scan);
877 }
878 
879 static unsigned int task_scan_max(struct task_struct *p)
880 {
881 	unsigned int smin = task_scan_min(p);
882 	unsigned int smax;
883 
884 	/* Watch for min being lower than max due to floor calculations */
885 	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
886 	return max(smin, smax);
887 }
888 
889 /*
890  * Once a preferred node is selected the scheduler balancer will prefer moving
891  * a task to that node for sysctl_numa_balancing_settle_count number of PTE
892  * scans. This will give the process the chance to accumulate more faults on
893  * the preferred node but still allow the scheduler to move the task again if
894  * the nodes CPUs are overloaded.
895  */
896 unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
897 
898 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
899 {
900 	rq->nr_numa_running += (p->numa_preferred_nid != -1);
901 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
902 }
903 
904 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
905 {
906 	rq->nr_numa_running -= (p->numa_preferred_nid != -1);
907 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
908 }
909 
910 struct numa_group {
911 	atomic_t refcount;
912 
913 	spinlock_t lock; /* nr_tasks, tasks */
914 	int nr_tasks;
915 	pid_t gid;
916 	struct list_head task_list;
917 
918 	struct rcu_head rcu;
919 	unsigned long total_faults;
920 	unsigned long faults[0];
921 };
922 
923 pid_t task_numa_group_id(struct task_struct *p)
924 {
925 	return p->numa_group ? p->numa_group->gid : 0;
926 }
927 
928 static inline int task_faults_idx(int nid, int priv)
929 {
930 	return 2 * nid + priv;
931 }
932 
933 static inline unsigned long task_faults(struct task_struct *p, int nid)
934 {
935 	if (!p->numa_faults)
936 		return 0;
937 
938 	return p->numa_faults[task_faults_idx(nid, 0)] +
939 		p->numa_faults[task_faults_idx(nid, 1)];
940 }
941 
942 static inline unsigned long group_faults(struct task_struct *p, int nid)
943 {
944 	if (!p->numa_group)
945 		return 0;
946 
947 	return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1];
948 }
949 
950 /*
951  * These return the fraction of accesses done by a particular task, or
952  * task group, on a particular numa node.  The group weight is given a
953  * larger multiplier, in order to group tasks together that are almost
954  * evenly spread out between numa nodes.
955  */
956 static inline unsigned long task_weight(struct task_struct *p, int nid)
957 {
958 	unsigned long total_faults;
959 
960 	if (!p->numa_faults)
961 		return 0;
962 
963 	total_faults = p->total_numa_faults;
964 
965 	if (!total_faults)
966 		return 0;
967 
968 	return 1000 * task_faults(p, nid) / total_faults;
969 }
970 
971 static inline unsigned long group_weight(struct task_struct *p, int nid)
972 {
973 	if (!p->numa_group || !p->numa_group->total_faults)
974 		return 0;
975 
976 	return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
977 }
978 
979 static unsigned long weighted_cpuload(const int cpu);
980 static unsigned long source_load(int cpu, int type);
981 static unsigned long target_load(int cpu, int type);
982 static unsigned long power_of(int cpu);
983 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
984 
985 /* Cached statistics for all CPUs within a node */
986 struct numa_stats {
987 	unsigned long nr_running;
988 	unsigned long load;
989 
990 	/* Total compute capacity of CPUs on a node */
991 	unsigned long power;
992 
993 	/* Approximate capacity in terms of runnable tasks on a node */
994 	unsigned long capacity;
995 	int has_capacity;
996 };
997 
998 /*
999  * XXX borrowed from update_sg_lb_stats
1000  */
1001 static void update_numa_stats(struct numa_stats *ns, int nid)
1002 {
1003 	int cpu, cpus = 0;
1004 
1005 	memset(ns, 0, sizeof(*ns));
1006 	for_each_cpu(cpu, cpumask_of_node(nid)) {
1007 		struct rq *rq = cpu_rq(cpu);
1008 
1009 		ns->nr_running += rq->nr_running;
1010 		ns->load += weighted_cpuload(cpu);
1011 		ns->power += power_of(cpu);
1012 
1013 		cpus++;
1014 	}
1015 
1016 	/*
1017 	 * If we raced with hotplug and there are no CPUs left in our mask
1018 	 * the @ns structure is NULL'ed and task_numa_compare() will
1019 	 * not find this node attractive.
1020 	 *
1021 	 * We'll either bail at !has_capacity, or we'll detect a huge imbalance
1022 	 * and bail there.
1023 	 */
1024 	if (!cpus)
1025 		return;
1026 
1027 	ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
1028 	ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
1029 	ns->has_capacity = (ns->nr_running < ns->capacity);
1030 }
1031 
1032 struct task_numa_env {
1033 	struct task_struct *p;
1034 
1035 	int src_cpu, src_nid;
1036 	int dst_cpu, dst_nid;
1037 
1038 	struct numa_stats src_stats, dst_stats;
1039 
1040 	int imbalance_pct, idx;
1041 
1042 	struct task_struct *best_task;
1043 	long best_imp;
1044 	int best_cpu;
1045 };
1046 
1047 static void task_numa_assign(struct task_numa_env *env,
1048 			     struct task_struct *p, long imp)
1049 {
1050 	if (env->best_task)
1051 		put_task_struct(env->best_task);
1052 	if (p)
1053 		get_task_struct(p);
1054 
1055 	env->best_task = p;
1056 	env->best_imp = imp;
1057 	env->best_cpu = env->dst_cpu;
1058 }
1059 
1060 /*
1061  * This checks if the overall compute and NUMA accesses of the system would
1062  * be improved if the source tasks was migrated to the target dst_cpu taking
1063  * into account that it might be best if task running on the dst_cpu should
1064  * be exchanged with the source task
1065  */
1066 static void task_numa_compare(struct task_numa_env *env,
1067 			      long taskimp, long groupimp)
1068 {
1069 	struct rq *src_rq = cpu_rq(env->src_cpu);
1070 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
1071 	struct task_struct *cur;
1072 	long dst_load, src_load;
1073 	long load;
1074 	long imp = (groupimp > 0) ? groupimp : taskimp;
1075 
1076 	rcu_read_lock();
1077 	cur = ACCESS_ONCE(dst_rq->curr);
1078 	if (cur->pid == 0) /* idle */
1079 		cur = NULL;
1080 
1081 	/*
1082 	 * "imp" is the fault differential for the source task between the
1083 	 * source and destination node. Calculate the total differential for
1084 	 * the source task and potential destination task. The more negative
1085 	 * the value is, the more rmeote accesses that would be expected to
1086 	 * be incurred if the tasks were swapped.
1087 	 */
1088 	if (cur) {
1089 		/* Skip this swap candidate if cannot move to the source cpu */
1090 		if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1091 			goto unlock;
1092 
1093 		/*
1094 		 * If dst and source tasks are in the same NUMA group, or not
1095 		 * in any group then look only at task weights.
1096 		 */
1097 		if (cur->numa_group == env->p->numa_group) {
1098 			imp = taskimp + task_weight(cur, env->src_nid) -
1099 			      task_weight(cur, env->dst_nid);
1100 			/*
1101 			 * Add some hysteresis to prevent swapping the
1102 			 * tasks within a group over tiny differences.
1103 			 */
1104 			if (cur->numa_group)
1105 				imp -= imp/16;
1106 		} else {
1107 			/*
1108 			 * Compare the group weights. If a task is all by
1109 			 * itself (not part of a group), use the task weight
1110 			 * instead.
1111 			 */
1112 			if (env->p->numa_group)
1113 				imp = groupimp;
1114 			else
1115 				imp = taskimp;
1116 
1117 			if (cur->numa_group)
1118 				imp += group_weight(cur, env->src_nid) -
1119 				       group_weight(cur, env->dst_nid);
1120 			else
1121 				imp += task_weight(cur, env->src_nid) -
1122 				       task_weight(cur, env->dst_nid);
1123 		}
1124 	}
1125 
1126 	if (imp < env->best_imp)
1127 		goto unlock;
1128 
1129 	if (!cur) {
1130 		/* Is there capacity at our destination? */
1131 		if (env->src_stats.has_capacity &&
1132 		    !env->dst_stats.has_capacity)
1133 			goto unlock;
1134 
1135 		goto balance;
1136 	}
1137 
1138 	/* Balance doesn't matter much if we're running a task per cpu */
1139 	if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1140 		goto assign;
1141 
1142 	/*
1143 	 * In the overloaded case, try and keep the load balanced.
1144 	 */
1145 balance:
1146 	dst_load = env->dst_stats.load;
1147 	src_load = env->src_stats.load;
1148 
1149 	/* XXX missing power terms */
1150 	load = task_h_load(env->p);
1151 	dst_load += load;
1152 	src_load -= load;
1153 
1154 	if (cur) {
1155 		load = task_h_load(cur);
1156 		dst_load -= load;
1157 		src_load += load;
1158 	}
1159 
1160 	/* make src_load the smaller */
1161 	if (dst_load < src_load)
1162 		swap(dst_load, src_load);
1163 
1164 	if (src_load * env->imbalance_pct < dst_load * 100)
1165 		goto unlock;
1166 
1167 assign:
1168 	task_numa_assign(env, cur, imp);
1169 unlock:
1170 	rcu_read_unlock();
1171 }
1172 
1173 static void task_numa_find_cpu(struct task_numa_env *env,
1174 				long taskimp, long groupimp)
1175 {
1176 	int cpu;
1177 
1178 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1179 		/* Skip this CPU if the source task cannot migrate */
1180 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1181 			continue;
1182 
1183 		env->dst_cpu = cpu;
1184 		task_numa_compare(env, taskimp, groupimp);
1185 	}
1186 }
1187 
1188 static int task_numa_migrate(struct task_struct *p)
1189 {
1190 	struct task_numa_env env = {
1191 		.p = p,
1192 
1193 		.src_cpu = task_cpu(p),
1194 		.src_nid = task_node(p),
1195 
1196 		.imbalance_pct = 112,
1197 
1198 		.best_task = NULL,
1199 		.best_imp = 0,
1200 		.best_cpu = -1
1201 	};
1202 	struct sched_domain *sd;
1203 	unsigned long taskweight, groupweight;
1204 	int nid, ret;
1205 	long taskimp, groupimp;
1206 
1207 	/*
1208 	 * Pick the lowest SD_NUMA domain, as that would have the smallest
1209 	 * imbalance and would be the first to start moving tasks about.
1210 	 *
1211 	 * And we want to avoid any moving of tasks about, as that would create
1212 	 * random movement of tasks -- counter the numa conditions we're trying
1213 	 * to satisfy here.
1214 	 */
1215 	rcu_read_lock();
1216 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1217 	if (sd)
1218 		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1219 	rcu_read_unlock();
1220 
1221 	/*
1222 	 * Cpusets can break the scheduler domain tree into smaller
1223 	 * balance domains, some of which do not cross NUMA boundaries.
1224 	 * Tasks that are "trapped" in such domains cannot be migrated
1225 	 * elsewhere, so there is no point in (re)trying.
1226 	 */
1227 	if (unlikely(!sd)) {
1228 		p->numa_preferred_nid = cpu_to_node(task_cpu(p));
1229 		return -EINVAL;
1230 	}
1231 
1232 	taskweight = task_weight(p, env.src_nid);
1233 	groupweight = group_weight(p, env.src_nid);
1234 	update_numa_stats(&env.src_stats, env.src_nid);
1235 	env.dst_nid = p->numa_preferred_nid;
1236 	taskimp = task_weight(p, env.dst_nid) - taskweight;
1237 	groupimp = group_weight(p, env.dst_nid) - groupweight;
1238 	update_numa_stats(&env.dst_stats, env.dst_nid);
1239 
1240 	/* If the preferred nid has capacity, try to use it. */
1241 	if (env.dst_stats.has_capacity)
1242 		task_numa_find_cpu(&env, taskimp, groupimp);
1243 
1244 	/* No space available on the preferred nid. Look elsewhere. */
1245 	if (env.best_cpu == -1) {
1246 		for_each_online_node(nid) {
1247 			if (nid == env.src_nid || nid == p->numa_preferred_nid)
1248 				continue;
1249 
1250 			/* Only consider nodes where both task and groups benefit */
1251 			taskimp = task_weight(p, nid) - taskweight;
1252 			groupimp = group_weight(p, nid) - groupweight;
1253 			if (taskimp < 0 && groupimp < 0)
1254 				continue;
1255 
1256 			env.dst_nid = nid;
1257 			update_numa_stats(&env.dst_stats, env.dst_nid);
1258 			task_numa_find_cpu(&env, taskimp, groupimp);
1259 		}
1260 	}
1261 
1262 	/* No better CPU than the current one was found. */
1263 	if (env.best_cpu == -1)
1264 		return -EAGAIN;
1265 
1266 	sched_setnuma(p, env.dst_nid);
1267 
1268 	/*
1269 	 * Reset the scan period if the task is being rescheduled on an
1270 	 * alternative node to recheck if the tasks is now properly placed.
1271 	 */
1272 	p->numa_scan_period = task_scan_min(p);
1273 
1274 	if (env.best_task == NULL) {
1275 		int ret = migrate_task_to(p, env.best_cpu);
1276 		return ret;
1277 	}
1278 
1279 	ret = migrate_swap(p, env.best_task);
1280 	put_task_struct(env.best_task);
1281 	return ret;
1282 }
1283 
1284 /* Attempt to migrate a task to a CPU on the preferred node. */
1285 static void numa_migrate_preferred(struct task_struct *p)
1286 {
1287 	/* This task has no NUMA fault statistics yet */
1288 	if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1289 		return;
1290 
1291 	/* Periodically retry migrating the task to the preferred node */
1292 	p->numa_migrate_retry = jiffies + HZ;
1293 
1294 	/* Success if task is already running on preferred CPU */
1295 	if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
1296 		return;
1297 
1298 	/* Otherwise, try migrate to a CPU on the preferred node */
1299 	task_numa_migrate(p);
1300 }
1301 
1302 /*
1303  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1304  * increments. The more local the fault statistics are, the higher the scan
1305  * period will be for the next scan window. If local/remote ratio is below
1306  * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
1307  * scan period will decrease
1308  */
1309 #define NUMA_PERIOD_SLOTS 10
1310 #define NUMA_PERIOD_THRESHOLD 3
1311 
1312 /*
1313  * Increase the scan period (slow down scanning) if the majority of
1314  * our memory is already on our local node, or if the majority of
1315  * the page accesses are shared with other processes.
1316  * Otherwise, decrease the scan period.
1317  */
1318 static void update_task_scan_period(struct task_struct *p,
1319 			unsigned long shared, unsigned long private)
1320 {
1321 	unsigned int period_slot;
1322 	int ratio;
1323 	int diff;
1324 
1325 	unsigned long remote = p->numa_faults_locality[0];
1326 	unsigned long local = p->numa_faults_locality[1];
1327 
1328 	/*
1329 	 * If there were no record hinting faults then either the task is
1330 	 * completely idle or all activity is areas that are not of interest
1331 	 * to automatic numa balancing. Scan slower
1332 	 */
1333 	if (local + shared == 0) {
1334 		p->numa_scan_period = min(p->numa_scan_period_max,
1335 			p->numa_scan_period << 1);
1336 
1337 		p->mm->numa_next_scan = jiffies +
1338 			msecs_to_jiffies(p->numa_scan_period);
1339 
1340 		return;
1341 	}
1342 
1343 	/*
1344 	 * Prepare to scale scan period relative to the current period.
1345 	 *	 == NUMA_PERIOD_THRESHOLD scan period stays the same
1346 	 *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1347 	 *	 >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1348 	 */
1349 	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1350 	ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1351 	if (ratio >= NUMA_PERIOD_THRESHOLD) {
1352 		int slot = ratio - NUMA_PERIOD_THRESHOLD;
1353 		if (!slot)
1354 			slot = 1;
1355 		diff = slot * period_slot;
1356 	} else {
1357 		diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1358 
1359 		/*
1360 		 * Scale scan rate increases based on sharing. There is an
1361 		 * inverse relationship between the degree of sharing and
1362 		 * the adjustment made to the scanning period. Broadly
1363 		 * speaking the intent is that there is little point
1364 		 * scanning faster if shared accesses dominate as it may
1365 		 * simply bounce migrations uselessly
1366 		 */
1367 		period_slot = DIV_ROUND_UP(diff, NUMA_PERIOD_SLOTS);
1368 		ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1369 		diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1370 	}
1371 
1372 	p->numa_scan_period = clamp(p->numa_scan_period + diff,
1373 			task_scan_min(p), task_scan_max(p));
1374 	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1375 }
1376 
1377 static void task_numa_placement(struct task_struct *p)
1378 {
1379 	int seq, nid, max_nid = -1, max_group_nid = -1;
1380 	unsigned long max_faults = 0, max_group_faults = 0;
1381 	unsigned long fault_types[2] = { 0, 0 };
1382 	spinlock_t *group_lock = NULL;
1383 
1384 	seq = ACCESS_ONCE(p->mm->numa_scan_seq);
1385 	if (p->numa_scan_seq == seq)
1386 		return;
1387 	p->numa_scan_seq = seq;
1388 	p->numa_scan_period_max = task_scan_max(p);
1389 
1390 	/* If the task is part of a group prevent parallel updates to group stats */
1391 	if (p->numa_group) {
1392 		group_lock = &p->numa_group->lock;
1393 		spin_lock(group_lock);
1394 	}
1395 
1396 	/* Find the node with the highest number of faults */
1397 	for_each_online_node(nid) {
1398 		unsigned long faults = 0, group_faults = 0;
1399 		int priv, i;
1400 
1401 		for (priv = 0; priv < 2; priv++) {
1402 			long diff;
1403 
1404 			i = task_faults_idx(nid, priv);
1405 			diff = -p->numa_faults[i];
1406 
1407 			/* Decay existing window, copy faults since last scan */
1408 			p->numa_faults[i] >>= 1;
1409 			p->numa_faults[i] += p->numa_faults_buffer[i];
1410 			fault_types[priv] += p->numa_faults_buffer[i];
1411 			p->numa_faults_buffer[i] = 0;
1412 
1413 			faults += p->numa_faults[i];
1414 			diff += p->numa_faults[i];
1415 			p->total_numa_faults += diff;
1416 			if (p->numa_group) {
1417 				/* safe because we can only change our own group */
1418 				p->numa_group->faults[i] += diff;
1419 				p->numa_group->total_faults += diff;
1420 				group_faults += p->numa_group->faults[i];
1421 			}
1422 		}
1423 
1424 		if (faults > max_faults) {
1425 			max_faults = faults;
1426 			max_nid = nid;
1427 		}
1428 
1429 		if (group_faults > max_group_faults) {
1430 			max_group_faults = group_faults;
1431 			max_group_nid = nid;
1432 		}
1433 	}
1434 
1435 	update_task_scan_period(p, fault_types[0], fault_types[1]);
1436 
1437 	if (p->numa_group) {
1438 		/*
1439 		 * If the preferred task and group nids are different,
1440 		 * iterate over the nodes again to find the best place.
1441 		 */
1442 		if (max_nid != max_group_nid) {
1443 			unsigned long weight, max_weight = 0;
1444 
1445 			for_each_online_node(nid) {
1446 				weight = task_weight(p, nid) + group_weight(p, nid);
1447 				if (weight > max_weight) {
1448 					max_weight = weight;
1449 					max_nid = nid;
1450 				}
1451 			}
1452 		}
1453 
1454 		spin_unlock(group_lock);
1455 	}
1456 
1457 	/* Preferred node as the node with the most faults */
1458 	if (max_faults && max_nid != p->numa_preferred_nid) {
1459 		/* Update the preferred nid and migrate task if possible */
1460 		sched_setnuma(p, max_nid);
1461 		numa_migrate_preferred(p);
1462 	}
1463 }
1464 
1465 static inline int get_numa_group(struct numa_group *grp)
1466 {
1467 	return atomic_inc_not_zero(&grp->refcount);
1468 }
1469 
1470 static inline void put_numa_group(struct numa_group *grp)
1471 {
1472 	if (atomic_dec_and_test(&grp->refcount))
1473 		kfree_rcu(grp, rcu);
1474 }
1475 
1476 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1477 			int *priv)
1478 {
1479 	struct numa_group *grp, *my_grp;
1480 	struct task_struct *tsk;
1481 	bool join = false;
1482 	int cpu = cpupid_to_cpu(cpupid);
1483 	int i;
1484 
1485 	if (unlikely(!p->numa_group)) {
1486 		unsigned int size = sizeof(struct numa_group) +
1487 				    2*nr_node_ids*sizeof(unsigned long);
1488 
1489 		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1490 		if (!grp)
1491 			return;
1492 
1493 		atomic_set(&grp->refcount, 1);
1494 		spin_lock_init(&grp->lock);
1495 		INIT_LIST_HEAD(&grp->task_list);
1496 		grp->gid = p->pid;
1497 
1498 		for (i = 0; i < 2*nr_node_ids; i++)
1499 			grp->faults[i] = p->numa_faults[i];
1500 
1501 		grp->total_faults = p->total_numa_faults;
1502 
1503 		list_add(&p->numa_entry, &grp->task_list);
1504 		grp->nr_tasks++;
1505 		rcu_assign_pointer(p->numa_group, grp);
1506 	}
1507 
1508 	rcu_read_lock();
1509 	tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1510 
1511 	if (!cpupid_match_pid(tsk, cpupid))
1512 		goto no_join;
1513 
1514 	grp = rcu_dereference(tsk->numa_group);
1515 	if (!grp)
1516 		goto no_join;
1517 
1518 	my_grp = p->numa_group;
1519 	if (grp == my_grp)
1520 		goto no_join;
1521 
1522 	/*
1523 	 * Only join the other group if its bigger; if we're the bigger group,
1524 	 * the other task will join us.
1525 	 */
1526 	if (my_grp->nr_tasks > grp->nr_tasks)
1527 		goto no_join;
1528 
1529 	/*
1530 	 * Tie-break on the grp address.
1531 	 */
1532 	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1533 		goto no_join;
1534 
1535 	/* Always join threads in the same process. */
1536 	if (tsk->mm == current->mm)
1537 		join = true;
1538 
1539 	/* Simple filter to avoid false positives due to PID collisions */
1540 	if (flags & TNF_SHARED)
1541 		join = true;
1542 
1543 	/* Update priv based on whether false sharing was detected */
1544 	*priv = !join;
1545 
1546 	if (join && !get_numa_group(grp))
1547 		goto no_join;
1548 
1549 	rcu_read_unlock();
1550 
1551 	if (!join)
1552 		return;
1553 
1554 	double_lock(&my_grp->lock, &grp->lock);
1555 
1556 	for (i = 0; i < 2*nr_node_ids; i++) {
1557 		my_grp->faults[i] -= p->numa_faults[i];
1558 		grp->faults[i] += p->numa_faults[i];
1559 	}
1560 	my_grp->total_faults -= p->total_numa_faults;
1561 	grp->total_faults += p->total_numa_faults;
1562 
1563 	list_move(&p->numa_entry, &grp->task_list);
1564 	my_grp->nr_tasks--;
1565 	grp->nr_tasks++;
1566 
1567 	spin_unlock(&my_grp->lock);
1568 	spin_unlock(&grp->lock);
1569 
1570 	rcu_assign_pointer(p->numa_group, grp);
1571 
1572 	put_numa_group(my_grp);
1573 	return;
1574 
1575 no_join:
1576 	rcu_read_unlock();
1577 	return;
1578 }
1579 
1580 void task_numa_free(struct task_struct *p)
1581 {
1582 	struct numa_group *grp = p->numa_group;
1583 	int i;
1584 	void *numa_faults = p->numa_faults;
1585 
1586 	if (grp) {
1587 		spin_lock(&grp->lock);
1588 		for (i = 0; i < 2*nr_node_ids; i++)
1589 			grp->faults[i] -= p->numa_faults[i];
1590 		grp->total_faults -= p->total_numa_faults;
1591 
1592 		list_del(&p->numa_entry);
1593 		grp->nr_tasks--;
1594 		spin_unlock(&grp->lock);
1595 		rcu_assign_pointer(p->numa_group, NULL);
1596 		put_numa_group(grp);
1597 	}
1598 
1599 	p->numa_faults = NULL;
1600 	p->numa_faults_buffer = NULL;
1601 	kfree(numa_faults);
1602 }
1603 
1604 /*
1605  * Got a PROT_NONE fault for a page on @node.
1606  */
1607 void task_numa_fault(int last_cpupid, int node, int pages, int flags)
1608 {
1609 	struct task_struct *p = current;
1610 	bool migrated = flags & TNF_MIGRATED;
1611 	int priv;
1612 
1613 	if (!numabalancing_enabled)
1614 		return;
1615 
1616 	/* for example, ksmd faulting in a user's mm */
1617 	if (!p->mm)
1618 		return;
1619 
1620 	/* Do not worry about placement if exiting */
1621 	if (p->state == TASK_DEAD)
1622 		return;
1623 
1624 	/* Allocate buffer to track faults on a per-node basis */
1625 	if (unlikely(!p->numa_faults)) {
1626 		int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
1627 
1628 		/* numa_faults and numa_faults_buffer share the allocation */
1629 		p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
1630 		if (!p->numa_faults)
1631 			return;
1632 
1633 		BUG_ON(p->numa_faults_buffer);
1634 		p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
1635 		p->total_numa_faults = 0;
1636 		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1637 	}
1638 
1639 	/*
1640 	 * First accesses are treated as private, otherwise consider accesses
1641 	 * to be private if the accessing pid has not changed
1642 	 */
1643 	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1644 		priv = 1;
1645 	} else {
1646 		priv = cpupid_match_pid(p, last_cpupid);
1647 		if (!priv && !(flags & TNF_NO_GROUP))
1648 			task_numa_group(p, last_cpupid, flags, &priv);
1649 	}
1650 
1651 	task_numa_placement(p);
1652 
1653 	/*
1654 	 * Retry task to preferred node migration periodically, in case it
1655 	 * case it previously failed, or the scheduler moved us.
1656 	 */
1657 	if (time_after(jiffies, p->numa_migrate_retry))
1658 		numa_migrate_preferred(p);
1659 
1660 	if (migrated)
1661 		p->numa_pages_migrated += pages;
1662 
1663 	p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
1664 	p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
1665 }
1666 
1667 static void reset_ptenuma_scan(struct task_struct *p)
1668 {
1669 	ACCESS_ONCE(p->mm->numa_scan_seq)++;
1670 	p->mm->numa_scan_offset = 0;
1671 }
1672 
1673 /*
1674  * The expensive part of numa migration is done from task_work context.
1675  * Triggered from task_tick_numa().
1676  */
1677 void task_numa_work(struct callback_head *work)
1678 {
1679 	unsigned long migrate, next_scan, now = jiffies;
1680 	struct task_struct *p = current;
1681 	struct mm_struct *mm = p->mm;
1682 	struct vm_area_struct *vma;
1683 	unsigned long start, end;
1684 	unsigned long nr_pte_updates = 0;
1685 	long pages;
1686 
1687 	WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1688 
1689 	work->next = work; /* protect against double add */
1690 	/*
1691 	 * Who cares about NUMA placement when they're dying.
1692 	 *
1693 	 * NOTE: make sure not to dereference p->mm before this check,
1694 	 * exit_task_work() happens _after_ exit_mm() so we could be called
1695 	 * without p->mm even though we still had it when we enqueued this
1696 	 * work.
1697 	 */
1698 	if (p->flags & PF_EXITING)
1699 		return;
1700 
1701 	if (!mm->numa_next_scan) {
1702 		mm->numa_next_scan = now +
1703 			msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1704 	}
1705 
1706 	/*
1707 	 * Enforce maximal scan/migration frequency..
1708 	 */
1709 	migrate = mm->numa_next_scan;
1710 	if (time_before(now, migrate))
1711 		return;
1712 
1713 	if (p->numa_scan_period == 0) {
1714 		p->numa_scan_period_max = task_scan_max(p);
1715 		p->numa_scan_period = task_scan_min(p);
1716 	}
1717 
1718 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1719 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1720 		return;
1721 
1722 	/*
1723 	 * Delay this task enough that another task of this mm will likely win
1724 	 * the next time around.
1725 	 */
1726 	p->node_stamp += 2 * TICK_NSEC;
1727 
1728 	start = mm->numa_scan_offset;
1729 	pages = sysctl_numa_balancing_scan_size;
1730 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1731 	if (!pages)
1732 		return;
1733 
1734 	down_read(&mm->mmap_sem);
1735 	vma = find_vma(mm, start);
1736 	if (!vma) {
1737 		reset_ptenuma_scan(p);
1738 		start = 0;
1739 		vma = mm->mmap;
1740 	}
1741 	for (; vma; vma = vma->vm_next) {
1742 		if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
1743 			continue;
1744 
1745 		/*
1746 		 * Shared library pages mapped by multiple processes are not
1747 		 * migrated as it is expected they are cache replicated. Avoid
1748 		 * hinting faults in read-only file-backed mappings or the vdso
1749 		 * as migrating the pages will be of marginal benefit.
1750 		 */
1751 		if (!vma->vm_mm ||
1752 		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1753 			continue;
1754 
1755 		do {
1756 			start = max(start, vma->vm_start);
1757 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1758 			end = min(end, vma->vm_end);
1759 			nr_pte_updates += change_prot_numa(vma, start, end);
1760 
1761 			/*
1762 			 * Scan sysctl_numa_balancing_scan_size but ensure that
1763 			 * at least one PTE is updated so that unused virtual
1764 			 * address space is quickly skipped.
1765 			 */
1766 			if (nr_pte_updates)
1767 				pages -= (end - start) >> PAGE_SHIFT;
1768 
1769 			start = end;
1770 			if (pages <= 0)
1771 				goto out;
1772 		} while (end != vma->vm_end);
1773 	}
1774 
1775 out:
1776 	/*
1777 	 * It is possible to reach the end of the VMA list but the last few
1778 	 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1779 	 * would find the !migratable VMA on the next scan but not reset the
1780 	 * scanner to the start so check it now.
1781 	 */
1782 	if (vma)
1783 		mm->numa_scan_offset = start;
1784 	else
1785 		reset_ptenuma_scan(p);
1786 	up_read(&mm->mmap_sem);
1787 }
1788 
1789 /*
1790  * Drive the periodic memory faults..
1791  */
1792 void task_tick_numa(struct rq *rq, struct task_struct *curr)
1793 {
1794 	struct callback_head *work = &curr->numa_work;
1795 	u64 period, now;
1796 
1797 	/*
1798 	 * We don't care about NUMA placement if we don't have memory.
1799 	 */
1800 	if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1801 		return;
1802 
1803 	/*
1804 	 * Using runtime rather than walltime has the dual advantage that
1805 	 * we (mostly) drive the selection from busy threads and that the
1806 	 * task needs to have done some actual work before we bother with
1807 	 * NUMA placement.
1808 	 */
1809 	now = curr->se.sum_exec_runtime;
1810 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1811 
1812 	if (now - curr->node_stamp > period) {
1813 		if (!curr->node_stamp)
1814 			curr->numa_scan_period = task_scan_min(curr);
1815 		curr->node_stamp += period;
1816 
1817 		if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1818 			init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1819 			task_work_add(curr, work, true);
1820 		}
1821 	}
1822 }
1823 #else
1824 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1825 {
1826 }
1827 
1828 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1829 {
1830 }
1831 
1832 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1833 {
1834 }
1835 #endif /* CONFIG_NUMA_BALANCING */
1836 
1837 static void
1838 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1839 {
1840 	update_load_add(&cfs_rq->load, se->load.weight);
1841 	if (!parent_entity(se))
1842 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1843 #ifdef CONFIG_SMP
1844 	if (entity_is_task(se)) {
1845 		struct rq *rq = rq_of(cfs_rq);
1846 
1847 		account_numa_enqueue(rq, task_of(se));
1848 		list_add(&se->group_node, &rq->cfs_tasks);
1849 	}
1850 #endif
1851 	cfs_rq->nr_running++;
1852 }
1853 
1854 static void
1855 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1856 {
1857 	update_load_sub(&cfs_rq->load, se->load.weight);
1858 	if (!parent_entity(se))
1859 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
1860 	if (entity_is_task(se)) {
1861 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
1862 		list_del_init(&se->group_node);
1863 	}
1864 	cfs_rq->nr_running--;
1865 }
1866 
1867 #ifdef CONFIG_FAIR_GROUP_SCHED
1868 # ifdef CONFIG_SMP
1869 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1870 {
1871 	long tg_weight;
1872 
1873 	/*
1874 	 * Use this CPU's actual weight instead of the last load_contribution
1875 	 * to gain a more accurate current total weight. See
1876 	 * update_cfs_rq_load_contribution().
1877 	 */
1878 	tg_weight = atomic_long_read(&tg->load_avg);
1879 	tg_weight -= cfs_rq->tg_load_contrib;
1880 	tg_weight += cfs_rq->load.weight;
1881 
1882 	return tg_weight;
1883 }
1884 
1885 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1886 {
1887 	long tg_weight, load, shares;
1888 
1889 	tg_weight = calc_tg_weight(tg, cfs_rq);
1890 	load = cfs_rq->load.weight;
1891 
1892 	shares = (tg->shares * load);
1893 	if (tg_weight)
1894 		shares /= tg_weight;
1895 
1896 	if (shares < MIN_SHARES)
1897 		shares = MIN_SHARES;
1898 	if (shares > tg->shares)
1899 		shares = tg->shares;
1900 
1901 	return shares;
1902 }
1903 # else /* CONFIG_SMP */
1904 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1905 {
1906 	return tg->shares;
1907 }
1908 # endif /* CONFIG_SMP */
1909 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1910 			    unsigned long weight)
1911 {
1912 	if (se->on_rq) {
1913 		/* commit outstanding execution time */
1914 		if (cfs_rq->curr == se)
1915 			update_curr(cfs_rq);
1916 		account_entity_dequeue(cfs_rq, se);
1917 	}
1918 
1919 	update_load_set(&se->load, weight);
1920 
1921 	if (se->on_rq)
1922 		account_entity_enqueue(cfs_rq, se);
1923 }
1924 
1925 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1926 
1927 static void update_cfs_shares(struct cfs_rq *cfs_rq)
1928 {
1929 	struct task_group *tg;
1930 	struct sched_entity *se;
1931 	long shares;
1932 
1933 	tg = cfs_rq->tg;
1934 	se = tg->se[cpu_of(rq_of(cfs_rq))];
1935 	if (!se || throttled_hierarchy(cfs_rq))
1936 		return;
1937 #ifndef CONFIG_SMP
1938 	if (likely(se->load.weight == tg->shares))
1939 		return;
1940 #endif
1941 	shares = calc_cfs_shares(cfs_rq, tg);
1942 
1943 	reweight_entity(cfs_rq_of(se), se, shares);
1944 }
1945 #else /* CONFIG_FAIR_GROUP_SCHED */
1946 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1947 {
1948 }
1949 #endif /* CONFIG_FAIR_GROUP_SCHED */
1950 
1951 #ifdef CONFIG_SMP
1952 /*
1953  * We choose a half-life close to 1 scheduling period.
1954  * Note: The tables below are dependent on this value.
1955  */
1956 #define LOAD_AVG_PERIOD 32
1957 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1958 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1959 
1960 /* Precomputed fixed inverse multiplies for multiplication by y^n */
1961 static const u32 runnable_avg_yN_inv[] = {
1962 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1963 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1964 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1965 	0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1966 	0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1967 	0x85aac367, 0x82cd8698,
1968 };
1969 
1970 /*
1971  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
1972  * over-estimates when re-combining.
1973  */
1974 static const u32 runnable_avg_yN_sum[] = {
1975 	    0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1976 	 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1977 	17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1978 };
1979 
1980 /*
1981  * Approximate:
1982  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
1983  */
1984 static __always_inline u64 decay_load(u64 val, u64 n)
1985 {
1986 	unsigned int local_n;
1987 
1988 	if (!n)
1989 		return val;
1990 	else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1991 		return 0;
1992 
1993 	/* after bounds checking we can collapse to 32-bit */
1994 	local_n = n;
1995 
1996 	/*
1997 	 * As y^PERIOD = 1/2, we can combine
1998 	 *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1999 	 * With a look-up table which covers k^n (n<PERIOD)
2000 	 *
2001 	 * To achieve constant time decay_load.
2002 	 */
2003 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2004 		val >>= local_n / LOAD_AVG_PERIOD;
2005 		local_n %= LOAD_AVG_PERIOD;
2006 	}
2007 
2008 	val *= runnable_avg_yN_inv[local_n];
2009 	/* We don't use SRR here since we always want to round down. */
2010 	return val >> 32;
2011 }
2012 
2013 /*
2014  * For updates fully spanning n periods, the contribution to runnable
2015  * average will be: \Sum 1024*y^n
2016  *
2017  * We can compute this reasonably efficiently by combining:
2018  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
2019  */
2020 static u32 __compute_runnable_contrib(u64 n)
2021 {
2022 	u32 contrib = 0;
2023 
2024 	if (likely(n <= LOAD_AVG_PERIOD))
2025 		return runnable_avg_yN_sum[n];
2026 	else if (unlikely(n >= LOAD_AVG_MAX_N))
2027 		return LOAD_AVG_MAX;
2028 
2029 	/* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2030 	do {
2031 		contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2032 		contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2033 
2034 		n -= LOAD_AVG_PERIOD;
2035 	} while (n > LOAD_AVG_PERIOD);
2036 
2037 	contrib = decay_load(contrib, n);
2038 	return contrib + runnable_avg_yN_sum[n];
2039 }
2040 
2041 /*
2042  * We can represent the historical contribution to runnable average as the
2043  * coefficients of a geometric series.  To do this we sub-divide our runnable
2044  * history into segments of approximately 1ms (1024us); label the segment that
2045  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2046  *
2047  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2048  *      p0            p1           p2
2049  *     (now)       (~1ms ago)  (~2ms ago)
2050  *
2051  * Let u_i denote the fraction of p_i that the entity was runnable.
2052  *
2053  * We then designate the fractions u_i as our co-efficients, yielding the
2054  * following representation of historical load:
2055  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2056  *
2057  * We choose y based on the with of a reasonably scheduling period, fixing:
2058  *   y^32 = 0.5
2059  *
2060  * This means that the contribution to load ~32ms ago (u_32) will be weighted
2061  * approximately half as much as the contribution to load within the last ms
2062  * (u_0).
2063  *
2064  * When a period "rolls over" and we have new u_0`, multiplying the previous
2065  * sum again by y is sufficient to update:
2066  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2067  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2068  */
2069 static __always_inline int __update_entity_runnable_avg(u64 now,
2070 							struct sched_avg *sa,
2071 							int runnable)
2072 {
2073 	u64 delta, periods;
2074 	u32 runnable_contrib;
2075 	int delta_w, decayed = 0;
2076 
2077 	delta = now - sa->last_runnable_update;
2078 	/*
2079 	 * This should only happen when time goes backwards, which it
2080 	 * unfortunately does during sched clock init when we swap over to TSC.
2081 	 */
2082 	if ((s64)delta < 0) {
2083 		sa->last_runnable_update = now;
2084 		return 0;
2085 	}
2086 
2087 	/*
2088 	 * Use 1024ns as the unit of measurement since it's a reasonable
2089 	 * approximation of 1us and fast to compute.
2090 	 */
2091 	delta >>= 10;
2092 	if (!delta)
2093 		return 0;
2094 	sa->last_runnable_update = now;
2095 
2096 	/* delta_w is the amount already accumulated against our next period */
2097 	delta_w = sa->runnable_avg_period % 1024;
2098 	if (delta + delta_w >= 1024) {
2099 		/* period roll-over */
2100 		decayed = 1;
2101 
2102 		/*
2103 		 * Now that we know we're crossing a period boundary, figure
2104 		 * out how much from delta we need to complete the current
2105 		 * period and accrue it.
2106 		 */
2107 		delta_w = 1024 - delta_w;
2108 		if (runnable)
2109 			sa->runnable_avg_sum += delta_w;
2110 		sa->runnable_avg_period += delta_w;
2111 
2112 		delta -= delta_w;
2113 
2114 		/* Figure out how many additional periods this update spans */
2115 		periods = delta / 1024;
2116 		delta %= 1024;
2117 
2118 		sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2119 						  periods + 1);
2120 		sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2121 						     periods + 1);
2122 
2123 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
2124 		runnable_contrib = __compute_runnable_contrib(periods);
2125 		if (runnable)
2126 			sa->runnable_avg_sum += runnable_contrib;
2127 		sa->runnable_avg_period += runnable_contrib;
2128 	}
2129 
2130 	/* Remainder of delta accrued against u_0` */
2131 	if (runnable)
2132 		sa->runnable_avg_sum += delta;
2133 	sa->runnable_avg_period += delta;
2134 
2135 	return decayed;
2136 }
2137 
2138 /* Synchronize an entity's decay with its parenting cfs_rq.*/
2139 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2140 {
2141 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2142 	u64 decays = atomic64_read(&cfs_rq->decay_counter);
2143 
2144 	decays -= se->avg.decay_count;
2145 	if (!decays)
2146 		return 0;
2147 
2148 	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2149 	se->avg.decay_count = 0;
2150 
2151 	return decays;
2152 }
2153 
2154 #ifdef CONFIG_FAIR_GROUP_SCHED
2155 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2156 						 int force_update)
2157 {
2158 	struct task_group *tg = cfs_rq->tg;
2159 	long tg_contrib;
2160 
2161 	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2162 	tg_contrib -= cfs_rq->tg_load_contrib;
2163 
2164 	if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2165 		atomic_long_add(tg_contrib, &tg->load_avg);
2166 		cfs_rq->tg_load_contrib += tg_contrib;
2167 	}
2168 }
2169 
2170 /*
2171  * Aggregate cfs_rq runnable averages into an equivalent task_group
2172  * representation for computing load contributions.
2173  */
2174 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2175 						  struct cfs_rq *cfs_rq)
2176 {
2177 	struct task_group *tg = cfs_rq->tg;
2178 	long contrib;
2179 
2180 	/* The fraction of a cpu used by this cfs_rq */
2181 	contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2182 			  sa->runnable_avg_period + 1);
2183 	contrib -= cfs_rq->tg_runnable_contrib;
2184 
2185 	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2186 		atomic_add(contrib, &tg->runnable_avg);
2187 		cfs_rq->tg_runnable_contrib += contrib;
2188 	}
2189 }
2190 
2191 static inline void __update_group_entity_contrib(struct sched_entity *se)
2192 {
2193 	struct cfs_rq *cfs_rq = group_cfs_rq(se);
2194 	struct task_group *tg = cfs_rq->tg;
2195 	int runnable_avg;
2196 
2197 	u64 contrib;
2198 
2199 	contrib = cfs_rq->tg_load_contrib * tg->shares;
2200 	se->avg.load_avg_contrib = div_u64(contrib,
2201 				     atomic_long_read(&tg->load_avg) + 1);
2202 
2203 	/*
2204 	 * For group entities we need to compute a correction term in the case
2205 	 * that they are consuming <1 cpu so that we would contribute the same
2206 	 * load as a task of equal weight.
2207 	 *
2208 	 * Explicitly co-ordinating this measurement would be expensive, but
2209 	 * fortunately the sum of each cpus contribution forms a usable
2210 	 * lower-bound on the true value.
2211 	 *
2212 	 * Consider the aggregate of 2 contributions.  Either they are disjoint
2213 	 * (and the sum represents true value) or they are disjoint and we are
2214 	 * understating by the aggregate of their overlap.
2215 	 *
2216 	 * Extending this to N cpus, for a given overlap, the maximum amount we
2217 	 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2218 	 * cpus that overlap for this interval and w_i is the interval width.
2219 	 *
2220 	 * On a small machine; the first term is well-bounded which bounds the
2221 	 * total error since w_i is a subset of the period.  Whereas on a
2222 	 * larger machine, while this first term can be larger, if w_i is the
2223 	 * of consequential size guaranteed to see n_i*w_i quickly converge to
2224 	 * our upper bound of 1-cpu.
2225 	 */
2226 	runnable_avg = atomic_read(&tg->runnable_avg);
2227 	if (runnable_avg < NICE_0_LOAD) {
2228 		se->avg.load_avg_contrib *= runnable_avg;
2229 		se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2230 	}
2231 }
2232 #else
2233 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2234 						 int force_update) {}
2235 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2236 						  struct cfs_rq *cfs_rq) {}
2237 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2238 #endif
2239 
2240 static inline void __update_task_entity_contrib(struct sched_entity *se)
2241 {
2242 	u32 contrib;
2243 
2244 	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2245 	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2246 	contrib /= (se->avg.runnable_avg_period + 1);
2247 	se->avg.load_avg_contrib = scale_load(contrib);
2248 }
2249 
2250 /* Compute the current contribution to load_avg by se, return any delta */
2251 static long __update_entity_load_avg_contrib(struct sched_entity *se)
2252 {
2253 	long old_contrib = se->avg.load_avg_contrib;
2254 
2255 	if (entity_is_task(se)) {
2256 		__update_task_entity_contrib(se);
2257 	} else {
2258 		__update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2259 		__update_group_entity_contrib(se);
2260 	}
2261 
2262 	return se->avg.load_avg_contrib - old_contrib;
2263 }
2264 
2265 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2266 						 long load_contrib)
2267 {
2268 	if (likely(load_contrib < cfs_rq->blocked_load_avg))
2269 		cfs_rq->blocked_load_avg -= load_contrib;
2270 	else
2271 		cfs_rq->blocked_load_avg = 0;
2272 }
2273 
2274 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2275 
2276 /* Update a sched_entity's runnable average */
2277 static inline void update_entity_load_avg(struct sched_entity *se,
2278 					  int update_cfs_rq)
2279 {
2280 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2281 	long contrib_delta;
2282 	u64 now;
2283 
2284 	/*
2285 	 * For a group entity we need to use their owned cfs_rq_clock_task() in
2286 	 * case they are the parent of a throttled hierarchy.
2287 	 */
2288 	if (entity_is_task(se))
2289 		now = cfs_rq_clock_task(cfs_rq);
2290 	else
2291 		now = cfs_rq_clock_task(group_cfs_rq(se));
2292 
2293 	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2294 		return;
2295 
2296 	contrib_delta = __update_entity_load_avg_contrib(se);
2297 
2298 	if (!update_cfs_rq)
2299 		return;
2300 
2301 	if (se->on_rq)
2302 		cfs_rq->runnable_load_avg += contrib_delta;
2303 	else
2304 		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2305 }
2306 
2307 /*
2308  * Decay the load contributed by all blocked children and account this so that
2309  * their contribution may appropriately discounted when they wake up.
2310  */
2311 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2312 {
2313 	u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2314 	u64 decays;
2315 
2316 	decays = now - cfs_rq->last_decay;
2317 	if (!decays && !force_update)
2318 		return;
2319 
2320 	if (atomic_long_read(&cfs_rq->removed_load)) {
2321 		unsigned long removed_load;
2322 		removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2323 		subtract_blocked_load_contrib(cfs_rq, removed_load);
2324 	}
2325 
2326 	if (decays) {
2327 		cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2328 						      decays);
2329 		atomic64_add(decays, &cfs_rq->decay_counter);
2330 		cfs_rq->last_decay = now;
2331 	}
2332 
2333 	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2334 }
2335 
2336 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2337 {
2338 	__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2339 	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
2340 }
2341 
2342 /* Add the load generated by se into cfs_rq's child load-average */
2343 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2344 						  struct sched_entity *se,
2345 						  int wakeup)
2346 {
2347 	/*
2348 	 * We track migrations using entity decay_count <= 0, on a wake-up
2349 	 * migration we use a negative decay count to track the remote decays
2350 	 * accumulated while sleeping.
2351 	 *
2352 	 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2353 	 * are seen by enqueue_entity_load_avg() as a migration with an already
2354 	 * constructed load_avg_contrib.
2355 	 */
2356 	if (unlikely(se->avg.decay_count <= 0)) {
2357 		se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2358 		if (se->avg.decay_count) {
2359 			/*
2360 			 * In a wake-up migration we have to approximate the
2361 			 * time sleeping.  This is because we can't synchronize
2362 			 * clock_task between the two cpus, and it is not
2363 			 * guaranteed to be read-safe.  Instead, we can
2364 			 * approximate this using our carried decays, which are
2365 			 * explicitly atomically readable.
2366 			 */
2367 			se->avg.last_runnable_update -= (-se->avg.decay_count)
2368 							<< 20;
2369 			update_entity_load_avg(se, 0);
2370 			/* Indicate that we're now synchronized and on-rq */
2371 			se->avg.decay_count = 0;
2372 		}
2373 		wakeup = 0;
2374 	} else {
2375 		/*
2376 		 * Task re-woke on same cpu (or else migrate_task_rq_fair()
2377 		 * would have made count negative); we must be careful to avoid
2378 		 * double-accounting blocked time after synchronizing decays.
2379 		 */
2380 		se->avg.last_runnable_update += __synchronize_entity_decay(se)
2381 							<< 20;
2382 	}
2383 
2384 	/* migrated tasks did not contribute to our blocked load */
2385 	if (wakeup) {
2386 		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2387 		update_entity_load_avg(se, 0);
2388 	}
2389 
2390 	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2391 	/* we force update consideration on load-balancer moves */
2392 	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2393 }
2394 
2395 /*
2396  * Remove se's load from this cfs_rq child load-average, if the entity is
2397  * transitioning to a blocked state we track its projected decay using
2398  * blocked_load_avg.
2399  */
2400 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2401 						  struct sched_entity *se,
2402 						  int sleep)
2403 {
2404 	update_entity_load_avg(se, 1);
2405 	/* we force update consideration on load-balancer moves */
2406 	update_cfs_rq_blocked_load(cfs_rq, !sleep);
2407 
2408 	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2409 	if (sleep) {
2410 		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2411 		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2412 	} /* migrations, e.g. sleep=0 leave decay_count == 0 */
2413 }
2414 
2415 /*
2416  * Update the rq's load with the elapsed running time before entering
2417  * idle. if the last scheduled task is not a CFS task, idle_enter will
2418  * be the only way to update the runnable statistic.
2419  */
2420 void idle_enter_fair(struct rq *this_rq)
2421 {
2422 	update_rq_runnable_avg(this_rq, 1);
2423 }
2424 
2425 /*
2426  * Update the rq's load with the elapsed idle time before a task is
2427  * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2428  * be the only way to update the runnable statistic.
2429  */
2430 void idle_exit_fair(struct rq *this_rq)
2431 {
2432 	update_rq_runnable_avg(this_rq, 0);
2433 }
2434 
2435 #else
2436 static inline void update_entity_load_avg(struct sched_entity *se,
2437 					  int update_cfs_rq) {}
2438 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2439 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2440 					   struct sched_entity *se,
2441 					   int wakeup) {}
2442 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2443 					   struct sched_entity *se,
2444 					   int sleep) {}
2445 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2446 					      int force_update) {}
2447 #endif
2448 
2449 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2450 {
2451 #ifdef CONFIG_SCHEDSTATS
2452 	struct task_struct *tsk = NULL;
2453 
2454 	if (entity_is_task(se))
2455 		tsk = task_of(se);
2456 
2457 	if (se->statistics.sleep_start) {
2458 		u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2459 
2460 		if ((s64)delta < 0)
2461 			delta = 0;
2462 
2463 		if (unlikely(delta > se->statistics.sleep_max))
2464 			se->statistics.sleep_max = delta;
2465 
2466 		se->statistics.sleep_start = 0;
2467 		se->statistics.sum_sleep_runtime += delta;
2468 
2469 		if (tsk) {
2470 			account_scheduler_latency(tsk, delta >> 10, 1);
2471 			trace_sched_stat_sleep(tsk, delta);
2472 		}
2473 	}
2474 	if (se->statistics.block_start) {
2475 		u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
2476 
2477 		if ((s64)delta < 0)
2478 			delta = 0;
2479 
2480 		if (unlikely(delta > se->statistics.block_max))
2481 			se->statistics.block_max = delta;
2482 
2483 		se->statistics.block_start = 0;
2484 		se->statistics.sum_sleep_runtime += delta;
2485 
2486 		if (tsk) {
2487 			if (tsk->in_iowait) {
2488 				se->statistics.iowait_sum += delta;
2489 				se->statistics.iowait_count++;
2490 				trace_sched_stat_iowait(tsk, delta);
2491 			}
2492 
2493 			trace_sched_stat_blocked(tsk, delta);
2494 
2495 			/*
2496 			 * Blocking time is in units of nanosecs, so shift by
2497 			 * 20 to get a milliseconds-range estimation of the
2498 			 * amount of time that the task spent sleeping:
2499 			 */
2500 			if (unlikely(prof_on == SLEEP_PROFILING)) {
2501 				profile_hits(SLEEP_PROFILING,
2502 						(void *)get_wchan(tsk),
2503 						delta >> 20);
2504 			}
2505 			account_scheduler_latency(tsk, delta >> 10, 0);
2506 		}
2507 	}
2508 #endif
2509 }
2510 
2511 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2512 {
2513 #ifdef CONFIG_SCHED_DEBUG
2514 	s64 d = se->vruntime - cfs_rq->min_vruntime;
2515 
2516 	if (d < 0)
2517 		d = -d;
2518 
2519 	if (d > 3*sysctl_sched_latency)
2520 		schedstat_inc(cfs_rq, nr_spread_over);
2521 #endif
2522 }
2523 
2524 static void
2525 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2526 {
2527 	u64 vruntime = cfs_rq->min_vruntime;
2528 
2529 	/*
2530 	 * The 'current' period is already promised to the current tasks,
2531 	 * however the extra weight of the new task will slow them down a
2532 	 * little, place the new task so that it fits in the slot that
2533 	 * stays open at the end.
2534 	 */
2535 	if (initial && sched_feat(START_DEBIT))
2536 		vruntime += sched_vslice(cfs_rq, se);
2537 
2538 	/* sleeps up to a single latency don't count. */
2539 	if (!initial) {
2540 		unsigned long thresh = sysctl_sched_latency;
2541 
2542 		/*
2543 		 * Halve their sleep time's effect, to allow
2544 		 * for a gentler effect of sleepers:
2545 		 */
2546 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
2547 			thresh >>= 1;
2548 
2549 		vruntime -= thresh;
2550 	}
2551 
2552 	/* ensure we never gain time by being placed backwards. */
2553 	se->vruntime = max_vruntime(se->vruntime, vruntime);
2554 }
2555 
2556 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2557 
2558 static void
2559 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2560 {
2561 	/*
2562 	 * Update the normalized vruntime before updating min_vruntime
2563 	 * through calling update_curr().
2564 	 */
2565 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
2566 		se->vruntime += cfs_rq->min_vruntime;
2567 
2568 	/*
2569 	 * Update run-time statistics of the 'current'.
2570 	 */
2571 	update_curr(cfs_rq);
2572 	enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
2573 	account_entity_enqueue(cfs_rq, se);
2574 	update_cfs_shares(cfs_rq);
2575 
2576 	if (flags & ENQUEUE_WAKEUP) {
2577 		place_entity(cfs_rq, se, 0);
2578 		enqueue_sleeper(cfs_rq, se);
2579 	}
2580 
2581 	update_stats_enqueue(cfs_rq, se);
2582 	check_spread(cfs_rq, se);
2583 	if (se != cfs_rq->curr)
2584 		__enqueue_entity(cfs_rq, se);
2585 	se->on_rq = 1;
2586 
2587 	if (cfs_rq->nr_running == 1) {
2588 		list_add_leaf_cfs_rq(cfs_rq);
2589 		check_enqueue_throttle(cfs_rq);
2590 	}
2591 }
2592 
2593 static void __clear_buddies_last(struct sched_entity *se)
2594 {
2595 	for_each_sched_entity(se) {
2596 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
2597 		if (cfs_rq->last == se)
2598 			cfs_rq->last = NULL;
2599 		else
2600 			break;
2601 	}
2602 }
2603 
2604 static void __clear_buddies_next(struct sched_entity *se)
2605 {
2606 	for_each_sched_entity(se) {
2607 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
2608 		if (cfs_rq->next == se)
2609 			cfs_rq->next = NULL;
2610 		else
2611 			break;
2612 	}
2613 }
2614 
2615 static void __clear_buddies_skip(struct sched_entity *se)
2616 {
2617 	for_each_sched_entity(se) {
2618 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
2619 		if (cfs_rq->skip == se)
2620 			cfs_rq->skip = NULL;
2621 		else
2622 			break;
2623 	}
2624 }
2625 
2626 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2627 {
2628 	if (cfs_rq->last == se)
2629 		__clear_buddies_last(se);
2630 
2631 	if (cfs_rq->next == se)
2632 		__clear_buddies_next(se);
2633 
2634 	if (cfs_rq->skip == se)
2635 		__clear_buddies_skip(se);
2636 }
2637 
2638 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2639 
2640 static void
2641 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2642 {
2643 	/*
2644 	 * Update run-time statistics of the 'current'.
2645 	 */
2646 	update_curr(cfs_rq);
2647 	dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
2648 
2649 	update_stats_dequeue(cfs_rq, se);
2650 	if (flags & DEQUEUE_SLEEP) {
2651 #ifdef CONFIG_SCHEDSTATS
2652 		if (entity_is_task(se)) {
2653 			struct task_struct *tsk = task_of(se);
2654 
2655 			if (tsk->state & TASK_INTERRUPTIBLE)
2656 				se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
2657 			if (tsk->state & TASK_UNINTERRUPTIBLE)
2658 				se->statistics.block_start = rq_clock(rq_of(cfs_rq));
2659 		}
2660 #endif
2661 	}
2662 
2663 	clear_buddies(cfs_rq, se);
2664 
2665 	if (se != cfs_rq->curr)
2666 		__dequeue_entity(cfs_rq, se);
2667 	se->on_rq = 0;
2668 	account_entity_dequeue(cfs_rq, se);
2669 
2670 	/*
2671 	 * Normalize the entity after updating the min_vruntime because the
2672 	 * update can refer to the ->curr item and we need to reflect this
2673 	 * movement in our normalized position.
2674 	 */
2675 	if (!(flags & DEQUEUE_SLEEP))
2676 		se->vruntime -= cfs_rq->min_vruntime;
2677 
2678 	/* return excess runtime on last dequeue */
2679 	return_cfs_rq_runtime(cfs_rq);
2680 
2681 	update_min_vruntime(cfs_rq);
2682 	update_cfs_shares(cfs_rq);
2683 }
2684 
2685 /*
2686  * Preempt the current task with a newly woken task if needed:
2687  */
2688 static void
2689 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2690 {
2691 	unsigned long ideal_runtime, delta_exec;
2692 	struct sched_entity *se;
2693 	s64 delta;
2694 
2695 	ideal_runtime = sched_slice(cfs_rq, curr);
2696 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
2697 	if (delta_exec > ideal_runtime) {
2698 		resched_task(rq_of(cfs_rq)->curr);
2699 		/*
2700 		 * The current task ran long enough, ensure it doesn't get
2701 		 * re-elected due to buddy favours.
2702 		 */
2703 		clear_buddies(cfs_rq, curr);
2704 		return;
2705 	}
2706 
2707 	/*
2708 	 * Ensure that a task that missed wakeup preemption by a
2709 	 * narrow margin doesn't have to wait for a full slice.
2710 	 * This also mitigates buddy induced latencies under load.
2711 	 */
2712 	if (delta_exec < sysctl_sched_min_granularity)
2713 		return;
2714 
2715 	se = __pick_first_entity(cfs_rq);
2716 	delta = curr->vruntime - se->vruntime;
2717 
2718 	if (delta < 0)
2719 		return;
2720 
2721 	if (delta > ideal_runtime)
2722 		resched_task(rq_of(cfs_rq)->curr);
2723 }
2724 
2725 static void
2726 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2727 {
2728 	/* 'current' is not kept within the tree. */
2729 	if (se->on_rq) {
2730 		/*
2731 		 * Any task has to be enqueued before it get to execute on
2732 		 * a CPU. So account for the time it spent waiting on the
2733 		 * runqueue.
2734 		 */
2735 		update_stats_wait_end(cfs_rq, se);
2736 		__dequeue_entity(cfs_rq, se);
2737 	}
2738 
2739 	update_stats_curr_start(cfs_rq, se);
2740 	cfs_rq->curr = se;
2741 #ifdef CONFIG_SCHEDSTATS
2742 	/*
2743 	 * Track our maximum slice length, if the CPU's load is at
2744 	 * least twice that of our own weight (i.e. dont track it
2745 	 * when there are only lesser-weight tasks around):
2746 	 */
2747 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
2748 		se->statistics.slice_max = max(se->statistics.slice_max,
2749 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
2750 	}
2751 #endif
2752 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
2753 }
2754 
2755 static int
2756 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2757 
2758 /*
2759  * Pick the next process, keeping these things in mind, in this order:
2760  * 1) keep things fair between processes/task groups
2761  * 2) pick the "next" process, since someone really wants that to run
2762  * 3) pick the "last" process, for cache locality
2763  * 4) do not run the "skip" process, if something else is available
2764  */
2765 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
2766 {
2767 	struct sched_entity *se = __pick_first_entity(cfs_rq);
2768 	struct sched_entity *left = se;
2769 
2770 	/*
2771 	 * Avoid running the skip buddy, if running something else can
2772 	 * be done without getting too unfair.
2773 	 */
2774 	if (cfs_rq->skip == se) {
2775 		struct sched_entity *second = __pick_next_entity(se);
2776 		if (second && wakeup_preempt_entity(second, left) < 1)
2777 			se = second;
2778 	}
2779 
2780 	/*
2781 	 * Prefer last buddy, try to return the CPU to a preempted task.
2782 	 */
2783 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2784 		se = cfs_rq->last;
2785 
2786 	/*
2787 	 * Someone really wants this to run. If it's not unfair, run it.
2788 	 */
2789 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2790 		se = cfs_rq->next;
2791 
2792 	clear_buddies(cfs_rq, se);
2793 
2794 	return se;
2795 }
2796 
2797 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2798 
2799 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2800 {
2801 	/*
2802 	 * If still on the runqueue then deactivate_task()
2803 	 * was not called and update_curr() has to be done:
2804 	 */
2805 	if (prev->on_rq)
2806 		update_curr(cfs_rq);
2807 
2808 	/* throttle cfs_rqs exceeding runtime */
2809 	check_cfs_rq_runtime(cfs_rq);
2810 
2811 	check_spread(cfs_rq, prev);
2812 	if (prev->on_rq) {
2813 		update_stats_wait_start(cfs_rq, prev);
2814 		/* Put 'current' back into the tree. */
2815 		__enqueue_entity(cfs_rq, prev);
2816 		/* in !on_rq case, update occurred at dequeue */
2817 		update_entity_load_avg(prev, 1);
2818 	}
2819 	cfs_rq->curr = NULL;
2820 }
2821 
2822 static void
2823 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2824 {
2825 	/*
2826 	 * Update run-time statistics of the 'current'.
2827 	 */
2828 	update_curr(cfs_rq);
2829 
2830 	/*
2831 	 * Ensure that runnable average is periodically updated.
2832 	 */
2833 	update_entity_load_avg(curr, 1);
2834 	update_cfs_rq_blocked_load(cfs_rq, 1);
2835 	update_cfs_shares(cfs_rq);
2836 
2837 #ifdef CONFIG_SCHED_HRTICK
2838 	/*
2839 	 * queued ticks are scheduled to match the slice, so don't bother
2840 	 * validating it and just reschedule.
2841 	 */
2842 	if (queued) {
2843 		resched_task(rq_of(cfs_rq)->curr);
2844 		return;
2845 	}
2846 	/*
2847 	 * don't let the period tick interfere with the hrtick preemption
2848 	 */
2849 	if (!sched_feat(DOUBLE_TICK) &&
2850 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2851 		return;
2852 #endif
2853 
2854 	if (cfs_rq->nr_running > 1)
2855 		check_preempt_tick(cfs_rq, curr);
2856 }
2857 
2858 
2859 /**************************************************
2860  * CFS bandwidth control machinery
2861  */
2862 
2863 #ifdef CONFIG_CFS_BANDWIDTH
2864 
2865 #ifdef HAVE_JUMP_LABEL
2866 static struct static_key __cfs_bandwidth_used;
2867 
2868 static inline bool cfs_bandwidth_used(void)
2869 {
2870 	return static_key_false(&__cfs_bandwidth_used);
2871 }
2872 
2873 void cfs_bandwidth_usage_inc(void)
2874 {
2875 	static_key_slow_inc(&__cfs_bandwidth_used);
2876 }
2877 
2878 void cfs_bandwidth_usage_dec(void)
2879 {
2880 	static_key_slow_dec(&__cfs_bandwidth_used);
2881 }
2882 #else /* HAVE_JUMP_LABEL */
2883 static bool cfs_bandwidth_used(void)
2884 {
2885 	return true;
2886 }
2887 
2888 void cfs_bandwidth_usage_inc(void) {}
2889 void cfs_bandwidth_usage_dec(void) {}
2890 #endif /* HAVE_JUMP_LABEL */
2891 
2892 /*
2893  * default period for cfs group bandwidth.
2894  * default: 0.1s, units: nanoseconds
2895  */
2896 static inline u64 default_cfs_period(void)
2897 {
2898 	return 100000000ULL;
2899 }
2900 
2901 static inline u64 sched_cfs_bandwidth_slice(void)
2902 {
2903 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2904 }
2905 
2906 /*
2907  * Replenish runtime according to assigned quota and update expiration time.
2908  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2909  * additional synchronization around rq->lock.
2910  *
2911  * requires cfs_b->lock
2912  */
2913 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
2914 {
2915 	u64 now;
2916 
2917 	if (cfs_b->quota == RUNTIME_INF)
2918 		return;
2919 
2920 	now = sched_clock_cpu(smp_processor_id());
2921 	cfs_b->runtime = cfs_b->quota;
2922 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2923 }
2924 
2925 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2926 {
2927 	return &tg->cfs_bandwidth;
2928 }
2929 
2930 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2931 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2932 {
2933 	if (unlikely(cfs_rq->throttle_count))
2934 		return cfs_rq->throttled_clock_task;
2935 
2936 	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
2937 }
2938 
2939 /* returns 0 on failure to allocate runtime */
2940 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2941 {
2942 	struct task_group *tg = cfs_rq->tg;
2943 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
2944 	u64 amount = 0, min_amount, expires;
2945 
2946 	/* note: this is a positive sum as runtime_remaining <= 0 */
2947 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2948 
2949 	raw_spin_lock(&cfs_b->lock);
2950 	if (cfs_b->quota == RUNTIME_INF)
2951 		amount = min_amount;
2952 	else {
2953 		/*
2954 		 * If the bandwidth pool has become inactive, then at least one
2955 		 * period must have elapsed since the last consumption.
2956 		 * Refresh the global state and ensure bandwidth timer becomes
2957 		 * active.
2958 		 */
2959 		if (!cfs_b->timer_active) {
2960 			__refill_cfs_bandwidth_runtime(cfs_b);
2961 			__start_cfs_bandwidth(cfs_b);
2962 		}
2963 
2964 		if (cfs_b->runtime > 0) {
2965 			amount = min(cfs_b->runtime, min_amount);
2966 			cfs_b->runtime -= amount;
2967 			cfs_b->idle = 0;
2968 		}
2969 	}
2970 	expires = cfs_b->runtime_expires;
2971 	raw_spin_unlock(&cfs_b->lock);
2972 
2973 	cfs_rq->runtime_remaining += amount;
2974 	/*
2975 	 * we may have advanced our local expiration to account for allowed
2976 	 * spread between our sched_clock and the one on which runtime was
2977 	 * issued.
2978 	 */
2979 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2980 		cfs_rq->runtime_expires = expires;
2981 
2982 	return cfs_rq->runtime_remaining > 0;
2983 }
2984 
2985 /*
2986  * Note: This depends on the synchronization provided by sched_clock and the
2987  * fact that rq->clock snapshots this value.
2988  */
2989 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2990 {
2991 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2992 
2993 	/* if the deadline is ahead of our clock, nothing to do */
2994 	if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
2995 		return;
2996 
2997 	if (cfs_rq->runtime_remaining < 0)
2998 		return;
2999 
3000 	/*
3001 	 * If the local deadline has passed we have to consider the
3002 	 * possibility that our sched_clock is 'fast' and the global deadline
3003 	 * has not truly expired.
3004 	 *
3005 	 * Fortunately we can check determine whether this the case by checking
3006 	 * whether the global deadline has advanced.
3007 	 */
3008 
3009 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
3010 		/* extend local deadline, drift is bounded above by 2 ticks */
3011 		cfs_rq->runtime_expires += TICK_NSEC;
3012 	} else {
3013 		/* global deadline is ahead, expiration has passed */
3014 		cfs_rq->runtime_remaining = 0;
3015 	}
3016 }
3017 
3018 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3019 				     unsigned long delta_exec)
3020 {
3021 	/* dock delta_exec before expiring quota (as it could span periods) */
3022 	cfs_rq->runtime_remaining -= delta_exec;
3023 	expire_cfs_rq_runtime(cfs_rq);
3024 
3025 	if (likely(cfs_rq->runtime_remaining > 0))
3026 		return;
3027 
3028 	/*
3029 	 * if we're unable to extend our runtime we resched so that the active
3030 	 * hierarchy can be throttled
3031 	 */
3032 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3033 		resched_task(rq_of(cfs_rq)->curr);
3034 }
3035 
3036 static __always_inline
3037 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
3038 {
3039 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3040 		return;
3041 
3042 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
3043 }
3044 
3045 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3046 {
3047 	return cfs_bandwidth_used() && cfs_rq->throttled;
3048 }
3049 
3050 /* check whether cfs_rq, or any parent, is throttled */
3051 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3052 {
3053 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
3054 }
3055 
3056 /*
3057  * Ensure that neither of the group entities corresponding to src_cpu or
3058  * dest_cpu are members of a throttled hierarchy when performing group
3059  * load-balance operations.
3060  */
3061 static inline int throttled_lb_pair(struct task_group *tg,
3062 				    int src_cpu, int dest_cpu)
3063 {
3064 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3065 
3066 	src_cfs_rq = tg->cfs_rq[src_cpu];
3067 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
3068 
3069 	return throttled_hierarchy(src_cfs_rq) ||
3070 	       throttled_hierarchy(dest_cfs_rq);
3071 }
3072 
3073 /* updated child weight may affect parent so we have to do this bottom up */
3074 static int tg_unthrottle_up(struct task_group *tg, void *data)
3075 {
3076 	struct rq *rq = data;
3077 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3078 
3079 	cfs_rq->throttle_count--;
3080 #ifdef CONFIG_SMP
3081 	if (!cfs_rq->throttle_count) {
3082 		/* adjust cfs_rq_clock_task() */
3083 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3084 					     cfs_rq->throttled_clock_task;
3085 	}
3086 #endif
3087 
3088 	return 0;
3089 }
3090 
3091 static int tg_throttle_down(struct task_group *tg, void *data)
3092 {
3093 	struct rq *rq = data;
3094 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3095 
3096 	/* group is entering throttled state, stop time */
3097 	if (!cfs_rq->throttle_count)
3098 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
3099 	cfs_rq->throttle_count++;
3100 
3101 	return 0;
3102 }
3103 
3104 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3105 {
3106 	struct rq *rq = rq_of(cfs_rq);
3107 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3108 	struct sched_entity *se;
3109 	long task_delta, dequeue = 1;
3110 
3111 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3112 
3113 	/* freeze hierarchy runnable averages while throttled */
3114 	rcu_read_lock();
3115 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3116 	rcu_read_unlock();
3117 
3118 	task_delta = cfs_rq->h_nr_running;
3119 	for_each_sched_entity(se) {
3120 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3121 		/* throttled entity or throttle-on-deactivate */
3122 		if (!se->on_rq)
3123 			break;
3124 
3125 		if (dequeue)
3126 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3127 		qcfs_rq->h_nr_running -= task_delta;
3128 
3129 		if (qcfs_rq->load.weight)
3130 			dequeue = 0;
3131 	}
3132 
3133 	if (!se)
3134 		rq->nr_running -= task_delta;
3135 
3136 	cfs_rq->throttled = 1;
3137 	cfs_rq->throttled_clock = rq_clock(rq);
3138 	raw_spin_lock(&cfs_b->lock);
3139 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3140 	if (!cfs_b->timer_active)
3141 		__start_cfs_bandwidth(cfs_b);
3142 	raw_spin_unlock(&cfs_b->lock);
3143 }
3144 
3145 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3146 {
3147 	struct rq *rq = rq_of(cfs_rq);
3148 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3149 	struct sched_entity *se;
3150 	int enqueue = 1;
3151 	long task_delta;
3152 
3153 	se = cfs_rq->tg->se[cpu_of(rq)];
3154 
3155 	cfs_rq->throttled = 0;
3156 
3157 	update_rq_clock(rq);
3158 
3159 	raw_spin_lock(&cfs_b->lock);
3160 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3161 	list_del_rcu(&cfs_rq->throttled_list);
3162 	raw_spin_unlock(&cfs_b->lock);
3163 
3164 	/* update hierarchical throttle state */
3165 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3166 
3167 	if (!cfs_rq->load.weight)
3168 		return;
3169 
3170 	task_delta = cfs_rq->h_nr_running;
3171 	for_each_sched_entity(se) {
3172 		if (se->on_rq)
3173 			enqueue = 0;
3174 
3175 		cfs_rq = cfs_rq_of(se);
3176 		if (enqueue)
3177 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3178 		cfs_rq->h_nr_running += task_delta;
3179 
3180 		if (cfs_rq_throttled(cfs_rq))
3181 			break;
3182 	}
3183 
3184 	if (!se)
3185 		rq->nr_running += task_delta;
3186 
3187 	/* determine whether we need to wake up potentially idle cpu */
3188 	if (rq->curr == rq->idle && rq->cfs.nr_running)
3189 		resched_task(rq->curr);
3190 }
3191 
3192 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3193 		u64 remaining, u64 expires)
3194 {
3195 	struct cfs_rq *cfs_rq;
3196 	u64 runtime = remaining;
3197 
3198 	rcu_read_lock();
3199 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3200 				throttled_list) {
3201 		struct rq *rq = rq_of(cfs_rq);
3202 
3203 		raw_spin_lock(&rq->lock);
3204 		if (!cfs_rq_throttled(cfs_rq))
3205 			goto next;
3206 
3207 		runtime = -cfs_rq->runtime_remaining + 1;
3208 		if (runtime > remaining)
3209 			runtime = remaining;
3210 		remaining -= runtime;
3211 
3212 		cfs_rq->runtime_remaining += runtime;
3213 		cfs_rq->runtime_expires = expires;
3214 
3215 		/* we check whether we're throttled above */
3216 		if (cfs_rq->runtime_remaining > 0)
3217 			unthrottle_cfs_rq(cfs_rq);
3218 
3219 next:
3220 		raw_spin_unlock(&rq->lock);
3221 
3222 		if (!remaining)
3223 			break;
3224 	}
3225 	rcu_read_unlock();
3226 
3227 	return remaining;
3228 }
3229 
3230 /*
3231  * Responsible for refilling a task_group's bandwidth and unthrottling its
3232  * cfs_rqs as appropriate. If there has been no activity within the last
3233  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3234  * used to track this state.
3235  */
3236 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3237 {
3238 	u64 runtime, runtime_expires;
3239 	int idle = 1, throttled;
3240 
3241 	raw_spin_lock(&cfs_b->lock);
3242 	/* no need to continue the timer with no bandwidth constraint */
3243 	if (cfs_b->quota == RUNTIME_INF)
3244 		goto out_unlock;
3245 
3246 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3247 	/* idle depends on !throttled (for the case of a large deficit) */
3248 	idle = cfs_b->idle && !throttled;
3249 	cfs_b->nr_periods += overrun;
3250 
3251 	/* if we're going inactive then everything else can be deferred */
3252 	if (idle)
3253 		goto out_unlock;
3254 
3255 	/*
3256 	 * if we have relooped after returning idle once, we need to update our
3257 	 * status as actually running, so that other cpus doing
3258 	 * __start_cfs_bandwidth will stop trying to cancel us.
3259 	 */
3260 	cfs_b->timer_active = 1;
3261 
3262 	__refill_cfs_bandwidth_runtime(cfs_b);
3263 
3264 	if (!throttled) {
3265 		/* mark as potentially idle for the upcoming period */
3266 		cfs_b->idle = 1;
3267 		goto out_unlock;
3268 	}
3269 
3270 	/* account preceding periods in which throttling occurred */
3271 	cfs_b->nr_throttled += overrun;
3272 
3273 	/*
3274 	 * There are throttled entities so we must first use the new bandwidth
3275 	 * to unthrottle them before making it generally available.  This
3276 	 * ensures that all existing debts will be paid before a new cfs_rq is
3277 	 * allowed to run.
3278 	 */
3279 	runtime = cfs_b->runtime;
3280 	runtime_expires = cfs_b->runtime_expires;
3281 	cfs_b->runtime = 0;
3282 
3283 	/*
3284 	 * This check is repeated as we are holding onto the new bandwidth
3285 	 * while we unthrottle.  This can potentially race with an unthrottled
3286 	 * group trying to acquire new bandwidth from the global pool.
3287 	 */
3288 	while (throttled && runtime > 0) {
3289 		raw_spin_unlock(&cfs_b->lock);
3290 		/* we can't nest cfs_b->lock while distributing bandwidth */
3291 		runtime = distribute_cfs_runtime(cfs_b, runtime,
3292 						 runtime_expires);
3293 		raw_spin_lock(&cfs_b->lock);
3294 
3295 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3296 	}
3297 
3298 	/* return (any) remaining runtime */
3299 	cfs_b->runtime = runtime;
3300 	/*
3301 	 * While we are ensured activity in the period following an
3302 	 * unthrottle, this also covers the case in which the new bandwidth is
3303 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
3304 	 * timer to remain active while there are any throttled entities.)
3305 	 */
3306 	cfs_b->idle = 0;
3307 out_unlock:
3308 	if (idle)
3309 		cfs_b->timer_active = 0;
3310 	raw_spin_unlock(&cfs_b->lock);
3311 
3312 	return idle;
3313 }
3314 
3315 /* a cfs_rq won't donate quota below this amount */
3316 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3317 /* minimum remaining period time to redistribute slack quota */
3318 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3319 /* how long we wait to gather additional slack before distributing */
3320 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3321 
3322 /*
3323  * Are we near the end of the current quota period?
3324  *
3325  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3326  * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3327  * migrate_hrtimers, base is never cleared, so we are fine.
3328  */
3329 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3330 {
3331 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
3332 	u64 remaining;
3333 
3334 	/* if the call-back is running a quota refresh is already occurring */
3335 	if (hrtimer_callback_running(refresh_timer))
3336 		return 1;
3337 
3338 	/* is a quota refresh about to occur? */
3339 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3340 	if (remaining < min_expire)
3341 		return 1;
3342 
3343 	return 0;
3344 }
3345 
3346 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3347 {
3348 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3349 
3350 	/* if there's a quota refresh soon don't bother with slack */
3351 	if (runtime_refresh_within(cfs_b, min_left))
3352 		return;
3353 
3354 	start_bandwidth_timer(&cfs_b->slack_timer,
3355 				ns_to_ktime(cfs_bandwidth_slack_period));
3356 }
3357 
3358 /* we know any runtime found here is valid as update_curr() precedes return */
3359 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3360 {
3361 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3362 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3363 
3364 	if (slack_runtime <= 0)
3365 		return;
3366 
3367 	raw_spin_lock(&cfs_b->lock);
3368 	if (cfs_b->quota != RUNTIME_INF &&
3369 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3370 		cfs_b->runtime += slack_runtime;
3371 
3372 		/* we are under rq->lock, defer unthrottling using a timer */
3373 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3374 		    !list_empty(&cfs_b->throttled_cfs_rq))
3375 			start_cfs_slack_bandwidth(cfs_b);
3376 	}
3377 	raw_spin_unlock(&cfs_b->lock);
3378 
3379 	/* even if it's not valid for return we don't want to try again */
3380 	cfs_rq->runtime_remaining -= slack_runtime;
3381 }
3382 
3383 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3384 {
3385 	if (!cfs_bandwidth_used())
3386 		return;
3387 
3388 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3389 		return;
3390 
3391 	__return_cfs_rq_runtime(cfs_rq);
3392 }
3393 
3394 /*
3395  * This is done with a timer (instead of inline with bandwidth return) since
3396  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3397  */
3398 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3399 {
3400 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3401 	u64 expires;
3402 
3403 	/* confirm we're still not at a refresh boundary */
3404 	raw_spin_lock(&cfs_b->lock);
3405 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3406 		raw_spin_unlock(&cfs_b->lock);
3407 		return;
3408 	}
3409 
3410 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3411 		runtime = cfs_b->runtime;
3412 		cfs_b->runtime = 0;
3413 	}
3414 	expires = cfs_b->runtime_expires;
3415 	raw_spin_unlock(&cfs_b->lock);
3416 
3417 	if (!runtime)
3418 		return;
3419 
3420 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3421 
3422 	raw_spin_lock(&cfs_b->lock);
3423 	if (expires == cfs_b->runtime_expires)
3424 		cfs_b->runtime = runtime;
3425 	raw_spin_unlock(&cfs_b->lock);
3426 }
3427 
3428 /*
3429  * When a group wakes up we want to make sure that its quota is not already
3430  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3431  * runtime as update_curr() throttling can not not trigger until it's on-rq.
3432  */
3433 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3434 {
3435 	if (!cfs_bandwidth_used())
3436 		return;
3437 
3438 	/* an active group must be handled by the update_curr()->put() path */
3439 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3440 		return;
3441 
3442 	/* ensure the group is not already throttled */
3443 	if (cfs_rq_throttled(cfs_rq))
3444 		return;
3445 
3446 	/* update runtime allocation */
3447 	account_cfs_rq_runtime(cfs_rq, 0);
3448 	if (cfs_rq->runtime_remaining <= 0)
3449 		throttle_cfs_rq(cfs_rq);
3450 }
3451 
3452 /* conditionally throttle active cfs_rq's from put_prev_entity() */
3453 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3454 {
3455 	if (!cfs_bandwidth_used())
3456 		return;
3457 
3458 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3459 		return;
3460 
3461 	/*
3462 	 * it's possible for a throttled entity to be forced into a running
3463 	 * state (e.g. set_curr_task), in this case we're finished.
3464 	 */
3465 	if (cfs_rq_throttled(cfs_rq))
3466 		return;
3467 
3468 	throttle_cfs_rq(cfs_rq);
3469 }
3470 
3471 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3472 {
3473 	struct cfs_bandwidth *cfs_b =
3474 		container_of(timer, struct cfs_bandwidth, slack_timer);
3475 	do_sched_cfs_slack_timer(cfs_b);
3476 
3477 	return HRTIMER_NORESTART;
3478 }
3479 
3480 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3481 {
3482 	struct cfs_bandwidth *cfs_b =
3483 		container_of(timer, struct cfs_bandwidth, period_timer);
3484 	ktime_t now;
3485 	int overrun;
3486 	int idle = 0;
3487 
3488 	for (;;) {
3489 		now = hrtimer_cb_get_time(timer);
3490 		overrun = hrtimer_forward(timer, now, cfs_b->period);
3491 
3492 		if (!overrun)
3493 			break;
3494 
3495 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
3496 	}
3497 
3498 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3499 }
3500 
3501 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3502 {
3503 	raw_spin_lock_init(&cfs_b->lock);
3504 	cfs_b->runtime = 0;
3505 	cfs_b->quota = RUNTIME_INF;
3506 	cfs_b->period = ns_to_ktime(default_cfs_period());
3507 
3508 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3509 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3510 	cfs_b->period_timer.function = sched_cfs_period_timer;
3511 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3512 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
3513 }
3514 
3515 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3516 {
3517 	cfs_rq->runtime_enabled = 0;
3518 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
3519 }
3520 
3521 /* requires cfs_b->lock, may release to reprogram timer */
3522 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3523 {
3524 	/*
3525 	 * The timer may be active because we're trying to set a new bandwidth
3526 	 * period or because we're racing with the tear-down path
3527 	 * (timer_active==0 becomes visible before the hrtimer call-back
3528 	 * terminates).  In either case we ensure that it's re-programmed
3529 	 */
3530 	while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3531 	       hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3532 		/* bounce the lock to allow do_sched_cfs_period_timer to run */
3533 		raw_spin_unlock(&cfs_b->lock);
3534 		cpu_relax();
3535 		raw_spin_lock(&cfs_b->lock);
3536 		/* if someone else restarted the timer then we're done */
3537 		if (cfs_b->timer_active)
3538 			return;
3539 	}
3540 
3541 	cfs_b->timer_active = 1;
3542 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3543 }
3544 
3545 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3546 {
3547 	hrtimer_cancel(&cfs_b->period_timer);
3548 	hrtimer_cancel(&cfs_b->slack_timer);
3549 }
3550 
3551 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
3552 {
3553 	struct cfs_rq *cfs_rq;
3554 
3555 	for_each_leaf_cfs_rq(rq, cfs_rq) {
3556 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3557 
3558 		if (!cfs_rq->runtime_enabled)
3559 			continue;
3560 
3561 		/*
3562 		 * clock_task is not advancing so we just need to make sure
3563 		 * there's some valid quota amount
3564 		 */
3565 		cfs_rq->runtime_remaining = cfs_b->quota;
3566 		if (cfs_rq_throttled(cfs_rq))
3567 			unthrottle_cfs_rq(cfs_rq);
3568 	}
3569 }
3570 
3571 #else /* CONFIG_CFS_BANDWIDTH */
3572 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3573 {
3574 	return rq_clock_task(rq_of(cfs_rq));
3575 }
3576 
3577 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3578 				     unsigned long delta_exec) {}
3579 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3580 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3581 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3582 
3583 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3584 {
3585 	return 0;
3586 }
3587 
3588 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3589 {
3590 	return 0;
3591 }
3592 
3593 static inline int throttled_lb_pair(struct task_group *tg,
3594 				    int src_cpu, int dest_cpu)
3595 {
3596 	return 0;
3597 }
3598 
3599 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3600 
3601 #ifdef CONFIG_FAIR_GROUP_SCHED
3602 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3603 #endif
3604 
3605 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3606 {
3607 	return NULL;
3608 }
3609 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3610 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
3611 
3612 #endif /* CONFIG_CFS_BANDWIDTH */
3613 
3614 /**************************************************
3615  * CFS operations on tasks:
3616  */
3617 
3618 #ifdef CONFIG_SCHED_HRTICK
3619 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3620 {
3621 	struct sched_entity *se = &p->se;
3622 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3623 
3624 	WARN_ON(task_rq(p) != rq);
3625 
3626 	if (cfs_rq->nr_running > 1) {
3627 		u64 slice = sched_slice(cfs_rq, se);
3628 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3629 		s64 delta = slice - ran;
3630 
3631 		if (delta < 0) {
3632 			if (rq->curr == p)
3633 				resched_task(p);
3634 			return;
3635 		}
3636 
3637 		/*
3638 		 * Don't schedule slices shorter than 10000ns, that just
3639 		 * doesn't make sense. Rely on vruntime for fairness.
3640 		 */
3641 		if (rq->curr != p)
3642 			delta = max_t(s64, 10000LL, delta);
3643 
3644 		hrtick_start(rq, delta);
3645 	}
3646 }
3647 
3648 /*
3649  * called from enqueue/dequeue and updates the hrtick when the
3650  * current task is from our class and nr_running is low enough
3651  * to matter.
3652  */
3653 static void hrtick_update(struct rq *rq)
3654 {
3655 	struct task_struct *curr = rq->curr;
3656 
3657 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
3658 		return;
3659 
3660 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3661 		hrtick_start_fair(rq, curr);
3662 }
3663 #else /* !CONFIG_SCHED_HRTICK */
3664 static inline void
3665 hrtick_start_fair(struct rq *rq, struct task_struct *p)
3666 {
3667 }
3668 
3669 static inline void hrtick_update(struct rq *rq)
3670 {
3671 }
3672 #endif
3673 
3674 /*
3675  * The enqueue_task method is called before nr_running is
3676  * increased. Here we update the fair scheduling stats and
3677  * then put the task into the rbtree:
3678  */
3679 static void
3680 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3681 {
3682 	struct cfs_rq *cfs_rq;
3683 	struct sched_entity *se = &p->se;
3684 
3685 	for_each_sched_entity(se) {
3686 		if (se->on_rq)
3687 			break;
3688 		cfs_rq = cfs_rq_of(se);
3689 		enqueue_entity(cfs_rq, se, flags);
3690 
3691 		/*
3692 		 * end evaluation on encountering a throttled cfs_rq
3693 		 *
3694 		 * note: in the case of encountering a throttled cfs_rq we will
3695 		 * post the final h_nr_running increment below.
3696 		*/
3697 		if (cfs_rq_throttled(cfs_rq))
3698 			break;
3699 		cfs_rq->h_nr_running++;
3700 
3701 		flags = ENQUEUE_WAKEUP;
3702 	}
3703 
3704 	for_each_sched_entity(se) {
3705 		cfs_rq = cfs_rq_of(se);
3706 		cfs_rq->h_nr_running++;
3707 
3708 		if (cfs_rq_throttled(cfs_rq))
3709 			break;
3710 
3711 		update_cfs_shares(cfs_rq);
3712 		update_entity_load_avg(se, 1);
3713 	}
3714 
3715 	if (!se) {
3716 		update_rq_runnable_avg(rq, rq->nr_running);
3717 		inc_nr_running(rq);
3718 	}
3719 	hrtick_update(rq);
3720 }
3721 
3722 static void set_next_buddy(struct sched_entity *se);
3723 
3724 /*
3725  * The dequeue_task method is called before nr_running is
3726  * decreased. We remove the task from the rbtree and
3727  * update the fair scheduling stats:
3728  */
3729 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3730 {
3731 	struct cfs_rq *cfs_rq;
3732 	struct sched_entity *se = &p->se;
3733 	int task_sleep = flags & DEQUEUE_SLEEP;
3734 
3735 	for_each_sched_entity(se) {
3736 		cfs_rq = cfs_rq_of(se);
3737 		dequeue_entity(cfs_rq, se, flags);
3738 
3739 		/*
3740 		 * end evaluation on encountering a throttled cfs_rq
3741 		 *
3742 		 * note: in the case of encountering a throttled cfs_rq we will
3743 		 * post the final h_nr_running decrement below.
3744 		*/
3745 		if (cfs_rq_throttled(cfs_rq))
3746 			break;
3747 		cfs_rq->h_nr_running--;
3748 
3749 		/* Don't dequeue parent if it has other entities besides us */
3750 		if (cfs_rq->load.weight) {
3751 			/*
3752 			 * Bias pick_next to pick a task from this cfs_rq, as
3753 			 * p is sleeping when it is within its sched_slice.
3754 			 */
3755 			if (task_sleep && parent_entity(se))
3756 				set_next_buddy(parent_entity(se));
3757 
3758 			/* avoid re-evaluating load for this entity */
3759 			se = parent_entity(se);
3760 			break;
3761 		}
3762 		flags |= DEQUEUE_SLEEP;
3763 	}
3764 
3765 	for_each_sched_entity(se) {
3766 		cfs_rq = cfs_rq_of(se);
3767 		cfs_rq->h_nr_running--;
3768 
3769 		if (cfs_rq_throttled(cfs_rq))
3770 			break;
3771 
3772 		update_cfs_shares(cfs_rq);
3773 		update_entity_load_avg(se, 1);
3774 	}
3775 
3776 	if (!se) {
3777 		dec_nr_running(rq);
3778 		update_rq_runnable_avg(rq, 1);
3779 	}
3780 	hrtick_update(rq);
3781 }
3782 
3783 #ifdef CONFIG_SMP
3784 /* Used instead of source_load when we know the type == 0 */
3785 static unsigned long weighted_cpuload(const int cpu)
3786 {
3787 	return cpu_rq(cpu)->cfs.runnable_load_avg;
3788 }
3789 
3790 /*
3791  * Return a low guess at the load of a migration-source cpu weighted
3792  * according to the scheduling class and "nice" value.
3793  *
3794  * We want to under-estimate the load of migration sources, to
3795  * balance conservatively.
3796  */
3797 static unsigned long source_load(int cpu, int type)
3798 {
3799 	struct rq *rq = cpu_rq(cpu);
3800 	unsigned long total = weighted_cpuload(cpu);
3801 
3802 	if (type == 0 || !sched_feat(LB_BIAS))
3803 		return total;
3804 
3805 	return min(rq->cpu_load[type-1], total);
3806 }
3807 
3808 /*
3809  * Return a high guess at the load of a migration-target cpu weighted
3810  * according to the scheduling class and "nice" value.
3811  */
3812 static unsigned long target_load(int cpu, int type)
3813 {
3814 	struct rq *rq = cpu_rq(cpu);
3815 	unsigned long total = weighted_cpuload(cpu);
3816 
3817 	if (type == 0 || !sched_feat(LB_BIAS))
3818 		return total;
3819 
3820 	return max(rq->cpu_load[type-1], total);
3821 }
3822 
3823 static unsigned long power_of(int cpu)
3824 {
3825 	return cpu_rq(cpu)->cpu_power;
3826 }
3827 
3828 static unsigned long cpu_avg_load_per_task(int cpu)
3829 {
3830 	struct rq *rq = cpu_rq(cpu);
3831 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
3832 	unsigned long load_avg = rq->cfs.runnable_load_avg;
3833 
3834 	if (nr_running)
3835 		return load_avg / nr_running;
3836 
3837 	return 0;
3838 }
3839 
3840 static void record_wakee(struct task_struct *p)
3841 {
3842 	/*
3843 	 * Rough decay (wiping) for cost saving, don't worry
3844 	 * about the boundary, really active task won't care
3845 	 * about the loss.
3846 	 */
3847 	if (jiffies > current->wakee_flip_decay_ts + HZ) {
3848 		current->wakee_flips = 0;
3849 		current->wakee_flip_decay_ts = jiffies;
3850 	}
3851 
3852 	if (current->last_wakee != p) {
3853 		current->last_wakee = p;
3854 		current->wakee_flips++;
3855 	}
3856 }
3857 
3858 static void task_waking_fair(struct task_struct *p)
3859 {
3860 	struct sched_entity *se = &p->se;
3861 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3862 	u64 min_vruntime;
3863 
3864 #ifndef CONFIG_64BIT
3865 	u64 min_vruntime_copy;
3866 
3867 	do {
3868 		min_vruntime_copy = cfs_rq->min_vruntime_copy;
3869 		smp_rmb();
3870 		min_vruntime = cfs_rq->min_vruntime;
3871 	} while (min_vruntime != min_vruntime_copy);
3872 #else
3873 	min_vruntime = cfs_rq->min_vruntime;
3874 #endif
3875 
3876 	se->vruntime -= min_vruntime;
3877 	record_wakee(p);
3878 }
3879 
3880 #ifdef CONFIG_FAIR_GROUP_SCHED
3881 /*
3882  * effective_load() calculates the load change as seen from the root_task_group
3883  *
3884  * Adding load to a group doesn't make a group heavier, but can cause movement
3885  * of group shares between cpus. Assuming the shares were perfectly aligned one
3886  * can calculate the shift in shares.
3887  *
3888  * Calculate the effective load difference if @wl is added (subtracted) to @tg
3889  * on this @cpu and results in a total addition (subtraction) of @wg to the
3890  * total group weight.
3891  *
3892  * Given a runqueue weight distribution (rw_i) we can compute a shares
3893  * distribution (s_i) using:
3894  *
3895  *   s_i = rw_i / \Sum rw_j						(1)
3896  *
3897  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3898  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3899  * shares distribution (s_i):
3900  *
3901  *   rw_i = {   2,   4,   1,   0 }
3902  *   s_i  = { 2/7, 4/7, 1/7,   0 }
3903  *
3904  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3905  * task used to run on and the CPU the waker is running on), we need to
3906  * compute the effect of waking a task on either CPU and, in case of a sync
3907  * wakeup, compute the effect of the current task going to sleep.
3908  *
3909  * So for a change of @wl to the local @cpu with an overall group weight change
3910  * of @wl we can compute the new shares distribution (s'_i) using:
3911  *
3912  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
3913  *
3914  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3915  * differences in waking a task to CPU 0. The additional task changes the
3916  * weight and shares distributions like:
3917  *
3918  *   rw'_i = {   3,   4,   1,   0 }
3919  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
3920  *
3921  * We can then compute the difference in effective weight by using:
3922  *
3923  *   dw_i = S * (s'_i - s_i)						(3)
3924  *
3925  * Where 'S' is the group weight as seen by its parent.
3926  *
3927  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3928  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3929  * 4/7) times the weight of the group.
3930  */
3931 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3932 {
3933 	struct sched_entity *se = tg->se[cpu];
3934 
3935 	if (!tg->parent || !wl)	/* the trivial, non-cgroup case */
3936 		return wl;
3937 
3938 	for_each_sched_entity(se) {
3939 		long w, W;
3940 
3941 		tg = se->my_q->tg;
3942 
3943 		/*
3944 		 * W = @wg + \Sum rw_j
3945 		 */
3946 		W = wg + calc_tg_weight(tg, se->my_q);
3947 
3948 		/*
3949 		 * w = rw_i + @wl
3950 		 */
3951 		w = se->my_q->load.weight + wl;
3952 
3953 		/*
3954 		 * wl = S * s'_i; see (2)
3955 		 */
3956 		if (W > 0 && w < W)
3957 			wl = (w * tg->shares) / W;
3958 		else
3959 			wl = tg->shares;
3960 
3961 		/*
3962 		 * Per the above, wl is the new se->load.weight value; since
3963 		 * those are clipped to [MIN_SHARES, ...) do so now. See
3964 		 * calc_cfs_shares().
3965 		 */
3966 		if (wl < MIN_SHARES)
3967 			wl = MIN_SHARES;
3968 
3969 		/*
3970 		 * wl = dw_i = S * (s'_i - s_i); see (3)
3971 		 */
3972 		wl -= se->load.weight;
3973 
3974 		/*
3975 		 * Recursively apply this logic to all parent groups to compute
3976 		 * the final effective load change on the root group. Since
3977 		 * only the @tg group gets extra weight, all parent groups can
3978 		 * only redistribute existing shares. @wl is the shift in shares
3979 		 * resulting from this level per the above.
3980 		 */
3981 		wg = 0;
3982 	}
3983 
3984 	return wl;
3985 }
3986 #else
3987 
3988 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3989 {
3990 	return wl;
3991 }
3992 
3993 #endif
3994 
3995 static int wake_wide(struct task_struct *p)
3996 {
3997 	int factor = this_cpu_read(sd_llc_size);
3998 
3999 	/*
4000 	 * Yeah, it's the switching-frequency, could means many wakee or
4001 	 * rapidly switch, use factor here will just help to automatically
4002 	 * adjust the loose-degree, so bigger node will lead to more pull.
4003 	 */
4004 	if (p->wakee_flips > factor) {
4005 		/*
4006 		 * wakee is somewhat hot, it needs certain amount of cpu
4007 		 * resource, so if waker is far more hot, prefer to leave
4008 		 * it alone.
4009 		 */
4010 		if (current->wakee_flips > (factor * p->wakee_flips))
4011 			return 1;
4012 	}
4013 
4014 	return 0;
4015 }
4016 
4017 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4018 {
4019 	s64 this_load, load;
4020 	int idx, this_cpu, prev_cpu;
4021 	unsigned long tl_per_task;
4022 	struct task_group *tg;
4023 	unsigned long weight;
4024 	int balanced;
4025 
4026 	/*
4027 	 * If we wake multiple tasks be careful to not bounce
4028 	 * ourselves around too much.
4029 	 */
4030 	if (wake_wide(p))
4031 		return 0;
4032 
4033 	idx	  = sd->wake_idx;
4034 	this_cpu  = smp_processor_id();
4035 	prev_cpu  = task_cpu(p);
4036 	load	  = source_load(prev_cpu, idx);
4037 	this_load = target_load(this_cpu, idx);
4038 
4039 	/*
4040 	 * If sync wakeup then subtract the (maximum possible)
4041 	 * effect of the currently running task from the load
4042 	 * of the current CPU:
4043 	 */
4044 	if (sync) {
4045 		tg = task_group(current);
4046 		weight = current->se.load.weight;
4047 
4048 		this_load += effective_load(tg, this_cpu, -weight, -weight);
4049 		load += effective_load(tg, prev_cpu, 0, -weight);
4050 	}
4051 
4052 	tg = task_group(p);
4053 	weight = p->se.load.weight;
4054 
4055 	/*
4056 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
4057 	 * due to the sync cause above having dropped this_load to 0, we'll
4058 	 * always have an imbalance, but there's really nothing you can do
4059 	 * about that, so that's good too.
4060 	 *
4061 	 * Otherwise check if either cpus are near enough in load to allow this
4062 	 * task to be woken on this_cpu.
4063 	 */
4064 	if (this_load > 0) {
4065 		s64 this_eff_load, prev_eff_load;
4066 
4067 		this_eff_load = 100;
4068 		this_eff_load *= power_of(prev_cpu);
4069 		this_eff_load *= this_load +
4070 			effective_load(tg, this_cpu, weight, weight);
4071 
4072 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4073 		prev_eff_load *= power_of(this_cpu);
4074 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4075 
4076 		balanced = this_eff_load <= prev_eff_load;
4077 	} else
4078 		balanced = true;
4079 
4080 	/*
4081 	 * If the currently running task will sleep within
4082 	 * a reasonable amount of time then attract this newly
4083 	 * woken task:
4084 	 */
4085 	if (sync && balanced)
4086 		return 1;
4087 
4088 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4089 	tl_per_task = cpu_avg_load_per_task(this_cpu);
4090 
4091 	if (balanced ||
4092 	    (this_load <= load &&
4093 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
4094 		/*
4095 		 * This domain has SD_WAKE_AFFINE and
4096 		 * p is cache cold in this domain, and
4097 		 * there is no bad imbalance.
4098 		 */
4099 		schedstat_inc(sd, ttwu_move_affine);
4100 		schedstat_inc(p, se.statistics.nr_wakeups_affine);
4101 
4102 		return 1;
4103 	}
4104 	return 0;
4105 }
4106 
4107 /*
4108  * find_idlest_group finds and returns the least busy CPU group within the
4109  * domain.
4110  */
4111 static struct sched_group *
4112 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4113 		  int this_cpu, int load_idx)
4114 {
4115 	struct sched_group *idlest = NULL, *group = sd->groups;
4116 	unsigned long min_load = ULONG_MAX, this_load = 0;
4117 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
4118 
4119 	do {
4120 		unsigned long load, avg_load;
4121 		int local_group;
4122 		int i;
4123 
4124 		/* Skip over this group if it has no CPUs allowed */
4125 		if (!cpumask_intersects(sched_group_cpus(group),
4126 					tsk_cpus_allowed(p)))
4127 			continue;
4128 
4129 		local_group = cpumask_test_cpu(this_cpu,
4130 					       sched_group_cpus(group));
4131 
4132 		/* Tally up the load of all CPUs in the group */
4133 		avg_load = 0;
4134 
4135 		for_each_cpu(i, sched_group_cpus(group)) {
4136 			/* Bias balancing toward cpus of our domain */
4137 			if (local_group)
4138 				load = source_load(i, load_idx);
4139 			else
4140 				load = target_load(i, load_idx);
4141 
4142 			avg_load += load;
4143 		}
4144 
4145 		/* Adjust by relative CPU power of the group */
4146 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
4147 
4148 		if (local_group) {
4149 			this_load = avg_load;
4150 		} else if (avg_load < min_load) {
4151 			min_load = avg_load;
4152 			idlest = group;
4153 		}
4154 	} while (group = group->next, group != sd->groups);
4155 
4156 	if (!idlest || 100*this_load < imbalance*min_load)
4157 		return NULL;
4158 	return idlest;
4159 }
4160 
4161 /*
4162  * find_idlest_cpu - find the idlest cpu among the cpus in group.
4163  */
4164 static int
4165 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4166 {
4167 	unsigned long load, min_load = ULONG_MAX;
4168 	int idlest = -1;
4169 	int i;
4170 
4171 	/* Traverse only the allowed CPUs */
4172 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4173 		load = weighted_cpuload(i);
4174 
4175 		if (load < min_load || (load == min_load && i == this_cpu)) {
4176 			min_load = load;
4177 			idlest = i;
4178 		}
4179 	}
4180 
4181 	return idlest;
4182 }
4183 
4184 /*
4185  * Try and locate an idle CPU in the sched_domain.
4186  */
4187 static int select_idle_sibling(struct task_struct *p, int target)
4188 {
4189 	struct sched_domain *sd;
4190 	struct sched_group *sg;
4191 	int i = task_cpu(p);
4192 
4193 	if (idle_cpu(target))
4194 		return target;
4195 
4196 	/*
4197 	 * If the prevous cpu is cache affine and idle, don't be stupid.
4198 	 */
4199 	if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4200 		return i;
4201 
4202 	/*
4203 	 * Otherwise, iterate the domains and find an elegible idle cpu.
4204 	 */
4205 	sd = rcu_dereference(per_cpu(sd_llc, target));
4206 	for_each_lower_domain(sd) {
4207 		sg = sd->groups;
4208 		do {
4209 			if (!cpumask_intersects(sched_group_cpus(sg),
4210 						tsk_cpus_allowed(p)))
4211 				goto next;
4212 
4213 			for_each_cpu(i, sched_group_cpus(sg)) {
4214 				if (i == target || !idle_cpu(i))
4215 					goto next;
4216 			}
4217 
4218 			target = cpumask_first_and(sched_group_cpus(sg),
4219 					tsk_cpus_allowed(p));
4220 			goto done;
4221 next:
4222 			sg = sg->next;
4223 		} while (sg != sd->groups);
4224 	}
4225 done:
4226 	return target;
4227 }
4228 
4229 /*
4230  * sched_balance_self: balance the current task (running on cpu) in domains
4231  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
4232  * SD_BALANCE_EXEC.
4233  *
4234  * Balance, ie. select the least loaded group.
4235  *
4236  * Returns the target CPU number, or the same CPU if no balancing is needed.
4237  *
4238  * preempt must be disabled.
4239  */
4240 static int
4241 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
4242 {
4243 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
4244 	int cpu = smp_processor_id();
4245 	int new_cpu = cpu;
4246 	int want_affine = 0;
4247 	int sync = wake_flags & WF_SYNC;
4248 
4249 	if (p->nr_cpus_allowed == 1)
4250 		return prev_cpu;
4251 
4252 	if (sd_flag & SD_BALANCE_WAKE) {
4253 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
4254 			want_affine = 1;
4255 		new_cpu = prev_cpu;
4256 	}
4257 
4258 	rcu_read_lock();
4259 	for_each_domain(cpu, tmp) {
4260 		if (!(tmp->flags & SD_LOAD_BALANCE))
4261 			continue;
4262 
4263 		/*
4264 		 * If both cpu and prev_cpu are part of this domain,
4265 		 * cpu is a valid SD_WAKE_AFFINE target.
4266 		 */
4267 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4268 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4269 			affine_sd = tmp;
4270 			break;
4271 		}
4272 
4273 		if (tmp->flags & sd_flag)
4274 			sd = tmp;
4275 	}
4276 
4277 	if (affine_sd) {
4278 		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4279 			prev_cpu = cpu;
4280 
4281 		new_cpu = select_idle_sibling(p, prev_cpu);
4282 		goto unlock;
4283 	}
4284 
4285 	while (sd) {
4286 		int load_idx = sd->forkexec_idx;
4287 		struct sched_group *group;
4288 		int weight;
4289 
4290 		if (!(sd->flags & sd_flag)) {
4291 			sd = sd->child;
4292 			continue;
4293 		}
4294 
4295 		if (sd_flag & SD_BALANCE_WAKE)
4296 			load_idx = sd->wake_idx;
4297 
4298 		group = find_idlest_group(sd, p, cpu, load_idx);
4299 		if (!group) {
4300 			sd = sd->child;
4301 			continue;
4302 		}
4303 
4304 		new_cpu = find_idlest_cpu(group, p, cpu);
4305 		if (new_cpu == -1 || new_cpu == cpu) {
4306 			/* Now try balancing at a lower domain level of cpu */
4307 			sd = sd->child;
4308 			continue;
4309 		}
4310 
4311 		/* Now try balancing at a lower domain level of new_cpu */
4312 		cpu = new_cpu;
4313 		weight = sd->span_weight;
4314 		sd = NULL;
4315 		for_each_domain(cpu, tmp) {
4316 			if (weight <= tmp->span_weight)
4317 				break;
4318 			if (tmp->flags & sd_flag)
4319 				sd = tmp;
4320 		}
4321 		/* while loop will break here if sd == NULL */
4322 	}
4323 unlock:
4324 	rcu_read_unlock();
4325 
4326 	return new_cpu;
4327 }
4328 
4329 /*
4330  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4331  * cfs_rq_of(p) references at time of call are still valid and identify the
4332  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
4333  * other assumptions, including the state of rq->lock, should be made.
4334  */
4335 static void
4336 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4337 {
4338 	struct sched_entity *se = &p->se;
4339 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
4340 
4341 	/*
4342 	 * Load tracking: accumulate removed load so that it can be processed
4343 	 * when we next update owning cfs_rq under rq->lock.  Tasks contribute
4344 	 * to blocked load iff they have a positive decay-count.  It can never
4345 	 * be negative here since on-rq tasks have decay-count == 0.
4346 	 */
4347 	if (se->avg.decay_count) {
4348 		se->avg.decay_count = -__synchronize_entity_decay(se);
4349 		atomic_long_add(se->avg.load_avg_contrib,
4350 						&cfs_rq->removed_load);
4351 	}
4352 }
4353 #endif /* CONFIG_SMP */
4354 
4355 static unsigned long
4356 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
4357 {
4358 	unsigned long gran = sysctl_sched_wakeup_granularity;
4359 
4360 	/*
4361 	 * Since its curr running now, convert the gran from real-time
4362 	 * to virtual-time in his units.
4363 	 *
4364 	 * By using 'se' instead of 'curr' we penalize light tasks, so
4365 	 * they get preempted easier. That is, if 'se' < 'curr' then
4366 	 * the resulting gran will be larger, therefore penalizing the
4367 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4368 	 * be smaller, again penalizing the lighter task.
4369 	 *
4370 	 * This is especially important for buddies when the leftmost
4371 	 * task is higher priority than the buddy.
4372 	 */
4373 	return calc_delta_fair(gran, se);
4374 }
4375 
4376 /*
4377  * Should 'se' preempt 'curr'.
4378  *
4379  *             |s1
4380  *        |s2
4381  *   |s3
4382  *         g
4383  *      |<--->|c
4384  *
4385  *  w(c, s1) = -1
4386  *  w(c, s2) =  0
4387  *  w(c, s3) =  1
4388  *
4389  */
4390 static int
4391 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4392 {
4393 	s64 gran, vdiff = curr->vruntime - se->vruntime;
4394 
4395 	if (vdiff <= 0)
4396 		return -1;
4397 
4398 	gran = wakeup_gran(curr, se);
4399 	if (vdiff > gran)
4400 		return 1;
4401 
4402 	return 0;
4403 }
4404 
4405 static void set_last_buddy(struct sched_entity *se)
4406 {
4407 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4408 		return;
4409 
4410 	for_each_sched_entity(se)
4411 		cfs_rq_of(se)->last = se;
4412 }
4413 
4414 static void set_next_buddy(struct sched_entity *se)
4415 {
4416 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4417 		return;
4418 
4419 	for_each_sched_entity(se)
4420 		cfs_rq_of(se)->next = se;
4421 }
4422 
4423 static void set_skip_buddy(struct sched_entity *se)
4424 {
4425 	for_each_sched_entity(se)
4426 		cfs_rq_of(se)->skip = se;
4427 }
4428 
4429 /*
4430  * Preempt the current task with a newly woken task if needed:
4431  */
4432 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
4433 {
4434 	struct task_struct *curr = rq->curr;
4435 	struct sched_entity *se = &curr->se, *pse = &p->se;
4436 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4437 	int scale = cfs_rq->nr_running >= sched_nr_latency;
4438 	int next_buddy_marked = 0;
4439 
4440 	if (unlikely(se == pse))
4441 		return;
4442 
4443 	/*
4444 	 * This is possible from callers such as move_task(), in which we
4445 	 * unconditionally check_prempt_curr() after an enqueue (which may have
4446 	 * lead to a throttle).  This both saves work and prevents false
4447 	 * next-buddy nomination below.
4448 	 */
4449 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4450 		return;
4451 
4452 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
4453 		set_next_buddy(pse);
4454 		next_buddy_marked = 1;
4455 	}
4456 
4457 	/*
4458 	 * We can come here with TIF_NEED_RESCHED already set from new task
4459 	 * wake up path.
4460 	 *
4461 	 * Note: this also catches the edge-case of curr being in a throttled
4462 	 * group (e.g. via set_curr_task), since update_curr() (in the
4463 	 * enqueue of curr) will have resulted in resched being set.  This
4464 	 * prevents us from potentially nominating it as a false LAST_BUDDY
4465 	 * below.
4466 	 */
4467 	if (test_tsk_need_resched(curr))
4468 		return;
4469 
4470 	/* Idle tasks are by definition preempted by non-idle tasks. */
4471 	if (unlikely(curr->policy == SCHED_IDLE) &&
4472 	    likely(p->policy != SCHED_IDLE))
4473 		goto preempt;
4474 
4475 	/*
4476 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4477 	 * is driven by the tick):
4478 	 */
4479 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
4480 		return;
4481 
4482 	find_matching_se(&se, &pse);
4483 	update_curr(cfs_rq_of(se));
4484 	BUG_ON(!pse);
4485 	if (wakeup_preempt_entity(se, pse) == 1) {
4486 		/*
4487 		 * Bias pick_next to pick the sched entity that is
4488 		 * triggering this preemption.
4489 		 */
4490 		if (!next_buddy_marked)
4491 			set_next_buddy(pse);
4492 		goto preempt;
4493 	}
4494 
4495 	return;
4496 
4497 preempt:
4498 	resched_task(curr);
4499 	/*
4500 	 * Only set the backward buddy when the current task is still
4501 	 * on the rq. This can happen when a wakeup gets interleaved
4502 	 * with schedule on the ->pre_schedule() or idle_balance()
4503 	 * point, either of which can * drop the rq lock.
4504 	 *
4505 	 * Also, during early boot the idle thread is in the fair class,
4506 	 * for obvious reasons its a bad idea to schedule back to it.
4507 	 */
4508 	if (unlikely(!se->on_rq || curr == rq->idle))
4509 		return;
4510 
4511 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4512 		set_last_buddy(se);
4513 }
4514 
4515 static struct task_struct *pick_next_task_fair(struct rq *rq)
4516 {
4517 	struct task_struct *p;
4518 	struct cfs_rq *cfs_rq = &rq->cfs;
4519 	struct sched_entity *se;
4520 
4521 	if (!cfs_rq->nr_running)
4522 		return NULL;
4523 
4524 	do {
4525 		se = pick_next_entity(cfs_rq);
4526 		set_next_entity(cfs_rq, se);
4527 		cfs_rq = group_cfs_rq(se);
4528 	} while (cfs_rq);
4529 
4530 	p = task_of(se);
4531 	if (hrtick_enabled(rq))
4532 		hrtick_start_fair(rq, p);
4533 
4534 	return p;
4535 }
4536 
4537 /*
4538  * Account for a descheduled task:
4539  */
4540 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
4541 {
4542 	struct sched_entity *se = &prev->se;
4543 	struct cfs_rq *cfs_rq;
4544 
4545 	for_each_sched_entity(se) {
4546 		cfs_rq = cfs_rq_of(se);
4547 		put_prev_entity(cfs_rq, se);
4548 	}
4549 }
4550 
4551 /*
4552  * sched_yield() is very simple
4553  *
4554  * The magic of dealing with the ->skip buddy is in pick_next_entity.
4555  */
4556 static void yield_task_fair(struct rq *rq)
4557 {
4558 	struct task_struct *curr = rq->curr;
4559 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4560 	struct sched_entity *se = &curr->se;
4561 
4562 	/*
4563 	 * Are we the only task in the tree?
4564 	 */
4565 	if (unlikely(rq->nr_running == 1))
4566 		return;
4567 
4568 	clear_buddies(cfs_rq, se);
4569 
4570 	if (curr->policy != SCHED_BATCH) {
4571 		update_rq_clock(rq);
4572 		/*
4573 		 * Update run-time statistics of the 'current'.
4574 		 */
4575 		update_curr(cfs_rq);
4576 		/*
4577 		 * Tell update_rq_clock() that we've just updated,
4578 		 * so we don't do microscopic update in schedule()
4579 		 * and double the fastpath cost.
4580 		 */
4581 		 rq->skip_clock_update = 1;
4582 	}
4583 
4584 	set_skip_buddy(se);
4585 }
4586 
4587 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4588 {
4589 	struct sched_entity *se = &p->se;
4590 
4591 	/* throttled hierarchies are not runnable */
4592 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
4593 		return false;
4594 
4595 	/* Tell the scheduler that we'd really like pse to run next. */
4596 	set_next_buddy(se);
4597 
4598 	yield_task_fair(rq);
4599 
4600 	return true;
4601 }
4602 
4603 #ifdef CONFIG_SMP
4604 /**************************************************
4605  * Fair scheduling class load-balancing methods.
4606  *
4607  * BASICS
4608  *
4609  * The purpose of load-balancing is to achieve the same basic fairness the
4610  * per-cpu scheduler provides, namely provide a proportional amount of compute
4611  * time to each task. This is expressed in the following equation:
4612  *
4613  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
4614  *
4615  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4616  * W_i,0 is defined as:
4617  *
4618  *   W_i,0 = \Sum_j w_i,j                                             (2)
4619  *
4620  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4621  * is derived from the nice value as per prio_to_weight[].
4622  *
4623  * The weight average is an exponential decay average of the instantaneous
4624  * weight:
4625  *
4626  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
4627  *
4628  * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4629  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4630  * can also include other factors [XXX].
4631  *
4632  * To achieve this balance we define a measure of imbalance which follows
4633  * directly from (1):
4634  *
4635  *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
4636  *
4637  * We them move tasks around to minimize the imbalance. In the continuous
4638  * function space it is obvious this converges, in the discrete case we get
4639  * a few fun cases generally called infeasible weight scenarios.
4640  *
4641  * [XXX expand on:
4642  *     - infeasible weights;
4643  *     - local vs global optima in the discrete case. ]
4644  *
4645  *
4646  * SCHED DOMAINS
4647  *
4648  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4649  * for all i,j solution, we create a tree of cpus that follows the hardware
4650  * topology where each level pairs two lower groups (or better). This results
4651  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4652  * tree to only the first of the previous level and we decrease the frequency
4653  * of load-balance at each level inv. proportional to the number of cpus in
4654  * the groups.
4655  *
4656  * This yields:
4657  *
4658  *     log_2 n     1     n
4659  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
4660  *     i = 0      2^i   2^i
4661  *                               `- size of each group
4662  *         |         |     `- number of cpus doing load-balance
4663  *         |         `- freq
4664  *         `- sum over all levels
4665  *
4666  * Coupled with a limit on how many tasks we can migrate every balance pass,
4667  * this makes (5) the runtime complexity of the balancer.
4668  *
4669  * An important property here is that each CPU is still (indirectly) connected
4670  * to every other cpu in at most O(log n) steps:
4671  *
4672  * The adjacency matrix of the resulting graph is given by:
4673  *
4674  *             log_2 n
4675  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
4676  *             k = 0
4677  *
4678  * And you'll find that:
4679  *
4680  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
4681  *
4682  * Showing there's indeed a path between every cpu in at most O(log n) steps.
4683  * The task movement gives a factor of O(m), giving a convergence complexity
4684  * of:
4685  *
4686  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
4687  *
4688  *
4689  * WORK CONSERVING
4690  *
4691  * In order to avoid CPUs going idle while there's still work to do, new idle
4692  * balancing is more aggressive and has the newly idle cpu iterate up the domain
4693  * tree itself instead of relying on other CPUs to bring it work.
4694  *
4695  * This adds some complexity to both (5) and (8) but it reduces the total idle
4696  * time.
4697  *
4698  * [XXX more?]
4699  *
4700  *
4701  * CGROUPS
4702  *
4703  * Cgroups make a horror show out of (2), instead of a simple sum we get:
4704  *
4705  *                                s_k,i
4706  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
4707  *                                 S_k
4708  *
4709  * Where
4710  *
4711  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
4712  *
4713  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4714  *
4715  * The big problem is S_k, its a global sum needed to compute a local (W_i)
4716  * property.
4717  *
4718  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4719  *      rewrite all of this once again.]
4720  */
4721 
4722 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4723 
4724 enum fbq_type { regular, remote, all };
4725 
4726 #define LBF_ALL_PINNED	0x01
4727 #define LBF_NEED_BREAK	0x02
4728 #define LBF_DST_PINNED  0x04
4729 #define LBF_SOME_PINNED	0x08
4730 
4731 struct lb_env {
4732 	struct sched_domain	*sd;
4733 
4734 	struct rq		*src_rq;
4735 	int			src_cpu;
4736 
4737 	int			dst_cpu;
4738 	struct rq		*dst_rq;
4739 
4740 	struct cpumask		*dst_grpmask;
4741 	int			new_dst_cpu;
4742 	enum cpu_idle_type	idle;
4743 	long			imbalance;
4744 	/* The set of CPUs under consideration for load-balancing */
4745 	struct cpumask		*cpus;
4746 
4747 	unsigned int		flags;
4748 
4749 	unsigned int		loop;
4750 	unsigned int		loop_break;
4751 	unsigned int		loop_max;
4752 
4753 	enum fbq_type		fbq_type;
4754 };
4755 
4756 /*
4757  * move_task - move a task from one runqueue to another runqueue.
4758  * Both runqueues must be locked.
4759  */
4760 static void move_task(struct task_struct *p, struct lb_env *env)
4761 {
4762 	deactivate_task(env->src_rq, p, 0);
4763 	set_task_cpu(p, env->dst_cpu);
4764 	activate_task(env->dst_rq, p, 0);
4765 	check_preempt_curr(env->dst_rq, p, 0);
4766 }
4767 
4768 /*
4769  * Is this task likely cache-hot:
4770  */
4771 static int
4772 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4773 {
4774 	s64 delta;
4775 
4776 	if (p->sched_class != &fair_sched_class)
4777 		return 0;
4778 
4779 	if (unlikely(p->policy == SCHED_IDLE))
4780 		return 0;
4781 
4782 	/*
4783 	 * Buddy candidates are cache hot:
4784 	 */
4785 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4786 			(&p->se == cfs_rq_of(&p->se)->next ||
4787 			 &p->se == cfs_rq_of(&p->se)->last))
4788 		return 1;
4789 
4790 	if (sysctl_sched_migration_cost == -1)
4791 		return 1;
4792 	if (sysctl_sched_migration_cost == 0)
4793 		return 0;
4794 
4795 	delta = now - p->se.exec_start;
4796 
4797 	return delta < (s64)sysctl_sched_migration_cost;
4798 }
4799 
4800 #ifdef CONFIG_NUMA_BALANCING
4801 /* Returns true if the destination node has incurred more faults */
4802 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4803 {
4804 	int src_nid, dst_nid;
4805 
4806 	if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4807 	    !(env->sd->flags & SD_NUMA)) {
4808 		return false;
4809 	}
4810 
4811 	src_nid = cpu_to_node(env->src_cpu);
4812 	dst_nid = cpu_to_node(env->dst_cpu);
4813 
4814 	if (src_nid == dst_nid)
4815 		return false;
4816 
4817 	/* Always encourage migration to the preferred node. */
4818 	if (dst_nid == p->numa_preferred_nid)
4819 		return true;
4820 
4821 	/* If both task and group weight improve, this move is a winner. */
4822 	if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
4823 	    group_weight(p, dst_nid) > group_weight(p, src_nid))
4824 		return true;
4825 
4826 	return false;
4827 }
4828 
4829 
4830 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4831 {
4832 	int src_nid, dst_nid;
4833 
4834 	if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4835 		return false;
4836 
4837 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
4838 		return false;
4839 
4840 	src_nid = cpu_to_node(env->src_cpu);
4841 	dst_nid = cpu_to_node(env->dst_cpu);
4842 
4843 	if (src_nid == dst_nid)
4844 		return false;
4845 
4846 	/* Migrating away from the preferred node is always bad. */
4847 	if (src_nid == p->numa_preferred_nid)
4848 		return true;
4849 
4850 	/* If either task or group weight get worse, don't do it. */
4851 	if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
4852 	    group_weight(p, dst_nid) < group_weight(p, src_nid))
4853 		return true;
4854 
4855 	return false;
4856 }
4857 
4858 #else
4859 static inline bool migrate_improves_locality(struct task_struct *p,
4860 					     struct lb_env *env)
4861 {
4862 	return false;
4863 }
4864 
4865 static inline bool migrate_degrades_locality(struct task_struct *p,
4866 					     struct lb_env *env)
4867 {
4868 	return false;
4869 }
4870 #endif
4871 
4872 /*
4873  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4874  */
4875 static
4876 int can_migrate_task(struct task_struct *p, struct lb_env *env)
4877 {
4878 	int tsk_cache_hot = 0;
4879 	/*
4880 	 * We do not migrate tasks that are:
4881 	 * 1) throttled_lb_pair, or
4882 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
4883 	 * 3) running (obviously), or
4884 	 * 4) are cache-hot on their current CPU.
4885 	 */
4886 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4887 		return 0;
4888 
4889 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
4890 		int cpu;
4891 
4892 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
4893 
4894 		env->flags |= LBF_SOME_PINNED;
4895 
4896 		/*
4897 		 * Remember if this task can be migrated to any other cpu in
4898 		 * our sched_group. We may want to revisit it if we couldn't
4899 		 * meet load balance goals by pulling other tasks on src_cpu.
4900 		 *
4901 		 * Also avoid computing new_dst_cpu if we have already computed
4902 		 * one in current iteration.
4903 		 */
4904 		if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
4905 			return 0;
4906 
4907 		/* Prevent to re-select dst_cpu via env's cpus */
4908 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4909 			if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
4910 				env->flags |= LBF_DST_PINNED;
4911 				env->new_dst_cpu = cpu;
4912 				break;
4913 			}
4914 		}
4915 
4916 		return 0;
4917 	}
4918 
4919 	/* Record that we found atleast one task that could run on dst_cpu */
4920 	env->flags &= ~LBF_ALL_PINNED;
4921 
4922 	if (task_running(env->src_rq, p)) {
4923 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
4924 		return 0;
4925 	}
4926 
4927 	/*
4928 	 * Aggressive migration if:
4929 	 * 1) destination numa is preferred
4930 	 * 2) task is cache cold, or
4931 	 * 3) too many balance attempts have failed.
4932 	 */
4933 	tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
4934 	if (!tsk_cache_hot)
4935 		tsk_cache_hot = migrate_degrades_locality(p, env);
4936 
4937 	if (migrate_improves_locality(p, env)) {
4938 #ifdef CONFIG_SCHEDSTATS
4939 		if (tsk_cache_hot) {
4940 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4941 			schedstat_inc(p, se.statistics.nr_forced_migrations);
4942 		}
4943 #endif
4944 		return 1;
4945 	}
4946 
4947 	if (!tsk_cache_hot ||
4948 		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
4949 
4950 		if (tsk_cache_hot) {
4951 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4952 			schedstat_inc(p, se.statistics.nr_forced_migrations);
4953 		}
4954 
4955 		return 1;
4956 	}
4957 
4958 	schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4959 	return 0;
4960 }
4961 
4962 /*
4963  * move_one_task tries to move exactly one task from busiest to this_rq, as
4964  * part of active balancing operations within "domain".
4965  * Returns 1 if successful and 0 otherwise.
4966  *
4967  * Called with both runqueues locked.
4968  */
4969 static int move_one_task(struct lb_env *env)
4970 {
4971 	struct task_struct *p, *n;
4972 
4973 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
4974 		if (!can_migrate_task(p, env))
4975 			continue;
4976 
4977 		move_task(p, env);
4978 		/*
4979 		 * Right now, this is only the second place move_task()
4980 		 * is called, so we can safely collect move_task()
4981 		 * stats here rather than inside move_task().
4982 		 */
4983 		schedstat_inc(env->sd, lb_gained[env->idle]);
4984 		return 1;
4985 	}
4986 	return 0;
4987 }
4988 
4989 static const unsigned int sched_nr_migrate_break = 32;
4990 
4991 /*
4992  * move_tasks tries to move up to imbalance weighted load from busiest to
4993  * this_rq, as part of a balancing operation within domain "sd".
4994  * Returns 1 if successful and 0 otherwise.
4995  *
4996  * Called with both runqueues locked.
4997  */
4998 static int move_tasks(struct lb_env *env)
4999 {
5000 	struct list_head *tasks = &env->src_rq->cfs_tasks;
5001 	struct task_struct *p;
5002 	unsigned long load;
5003 	int pulled = 0;
5004 
5005 	if (env->imbalance <= 0)
5006 		return 0;
5007 
5008 	while (!list_empty(tasks)) {
5009 		p = list_first_entry(tasks, struct task_struct, se.group_node);
5010 
5011 		env->loop++;
5012 		/* We've more or less seen every task there is, call it quits */
5013 		if (env->loop > env->loop_max)
5014 			break;
5015 
5016 		/* take a breather every nr_migrate tasks */
5017 		if (env->loop > env->loop_break) {
5018 			env->loop_break += sched_nr_migrate_break;
5019 			env->flags |= LBF_NEED_BREAK;
5020 			break;
5021 		}
5022 
5023 		if (!can_migrate_task(p, env))
5024 			goto next;
5025 
5026 		load = task_h_load(p);
5027 
5028 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5029 			goto next;
5030 
5031 		if ((load / 2) > env->imbalance)
5032 			goto next;
5033 
5034 		move_task(p, env);
5035 		pulled++;
5036 		env->imbalance -= load;
5037 
5038 #ifdef CONFIG_PREEMPT
5039 		/*
5040 		 * NEWIDLE balancing is a source of latency, so preemptible
5041 		 * kernels will stop after the first task is pulled to minimize
5042 		 * the critical section.
5043 		 */
5044 		if (env->idle == CPU_NEWLY_IDLE)
5045 			break;
5046 #endif
5047 
5048 		/*
5049 		 * We only want to steal up to the prescribed amount of
5050 		 * weighted load.
5051 		 */
5052 		if (env->imbalance <= 0)
5053 			break;
5054 
5055 		continue;
5056 next:
5057 		list_move_tail(&p->se.group_node, tasks);
5058 	}
5059 
5060 	/*
5061 	 * Right now, this is one of only two places move_task() is called,
5062 	 * so we can safely collect move_task() stats here rather than
5063 	 * inside move_task().
5064 	 */
5065 	schedstat_add(env->sd, lb_gained[env->idle], pulled);
5066 
5067 	return pulled;
5068 }
5069 
5070 #ifdef CONFIG_FAIR_GROUP_SCHED
5071 /*
5072  * update tg->load_weight by folding this cpu's load_avg
5073  */
5074 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
5075 {
5076 	struct sched_entity *se = tg->se[cpu];
5077 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
5078 
5079 	/* throttled entities do not contribute to load */
5080 	if (throttled_hierarchy(cfs_rq))
5081 		return;
5082 
5083 	update_cfs_rq_blocked_load(cfs_rq, 1);
5084 
5085 	if (se) {
5086 		update_entity_load_avg(se, 1);
5087 		/*
5088 		 * We pivot on our runnable average having decayed to zero for
5089 		 * list removal.  This generally implies that all our children
5090 		 * have also been removed (modulo rounding error or bandwidth
5091 		 * control); however, such cases are rare and we can fix these
5092 		 * at enqueue.
5093 		 *
5094 		 * TODO: fix up out-of-order children on enqueue.
5095 		 */
5096 		if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5097 			list_del_leaf_cfs_rq(cfs_rq);
5098 	} else {
5099 		struct rq *rq = rq_of(cfs_rq);
5100 		update_rq_runnable_avg(rq, rq->nr_running);
5101 	}
5102 }
5103 
5104 static void update_blocked_averages(int cpu)
5105 {
5106 	struct rq *rq = cpu_rq(cpu);
5107 	struct cfs_rq *cfs_rq;
5108 	unsigned long flags;
5109 
5110 	raw_spin_lock_irqsave(&rq->lock, flags);
5111 	update_rq_clock(rq);
5112 	/*
5113 	 * Iterates the task_group tree in a bottom up fashion, see
5114 	 * list_add_leaf_cfs_rq() for details.
5115 	 */
5116 	for_each_leaf_cfs_rq(rq, cfs_rq) {
5117 		/*
5118 		 * Note: We may want to consider periodically releasing
5119 		 * rq->lock about these updates so that creating many task
5120 		 * groups does not result in continually extending hold time.
5121 		 */
5122 		__update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
5123 	}
5124 
5125 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5126 }
5127 
5128 /*
5129  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5130  * This needs to be done in a top-down fashion because the load of a child
5131  * group is a fraction of its parents load.
5132  */
5133 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
5134 {
5135 	struct rq *rq = rq_of(cfs_rq);
5136 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
5137 	unsigned long now = jiffies;
5138 	unsigned long load;
5139 
5140 	if (cfs_rq->last_h_load_update == now)
5141 		return;
5142 
5143 	cfs_rq->h_load_next = NULL;
5144 	for_each_sched_entity(se) {
5145 		cfs_rq = cfs_rq_of(se);
5146 		cfs_rq->h_load_next = se;
5147 		if (cfs_rq->last_h_load_update == now)
5148 			break;
5149 	}
5150 
5151 	if (!se) {
5152 		cfs_rq->h_load = cfs_rq->runnable_load_avg;
5153 		cfs_rq->last_h_load_update = now;
5154 	}
5155 
5156 	while ((se = cfs_rq->h_load_next) != NULL) {
5157 		load = cfs_rq->h_load;
5158 		load = div64_ul(load * se->avg.load_avg_contrib,
5159 				cfs_rq->runnable_load_avg + 1);
5160 		cfs_rq = group_cfs_rq(se);
5161 		cfs_rq->h_load = load;
5162 		cfs_rq->last_h_load_update = now;
5163 	}
5164 }
5165 
5166 static unsigned long task_h_load(struct task_struct *p)
5167 {
5168 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
5169 
5170 	update_cfs_rq_h_load(cfs_rq);
5171 	return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5172 			cfs_rq->runnable_load_avg + 1);
5173 }
5174 #else
5175 static inline void update_blocked_averages(int cpu)
5176 {
5177 }
5178 
5179 static unsigned long task_h_load(struct task_struct *p)
5180 {
5181 	return p->se.avg.load_avg_contrib;
5182 }
5183 #endif
5184 
5185 /********** Helpers for find_busiest_group ************************/
5186 /*
5187  * sg_lb_stats - stats of a sched_group required for load_balancing
5188  */
5189 struct sg_lb_stats {
5190 	unsigned long avg_load; /*Avg load across the CPUs of the group */
5191 	unsigned long group_load; /* Total load over the CPUs of the group */
5192 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5193 	unsigned long load_per_task;
5194 	unsigned long group_power;
5195 	unsigned int sum_nr_running; /* Nr tasks running in the group */
5196 	unsigned int group_capacity;
5197 	unsigned int idle_cpus;
5198 	unsigned int group_weight;
5199 	int group_imb; /* Is there an imbalance in the group ? */
5200 	int group_has_capacity; /* Is there extra capacity in the group? */
5201 #ifdef CONFIG_NUMA_BALANCING
5202 	unsigned int nr_numa_running;
5203 	unsigned int nr_preferred_running;
5204 #endif
5205 };
5206 
5207 /*
5208  * sd_lb_stats - Structure to store the statistics of a sched_domain
5209  *		 during load balancing.
5210  */
5211 struct sd_lb_stats {
5212 	struct sched_group *busiest;	/* Busiest group in this sd */
5213 	struct sched_group *local;	/* Local group in this sd */
5214 	unsigned long total_load;	/* Total load of all groups in sd */
5215 	unsigned long total_pwr;	/* Total power of all groups in sd */
5216 	unsigned long avg_load;	/* Average load across all groups in sd */
5217 
5218 	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
5219 	struct sg_lb_stats local_stat;	/* Statistics of the local group */
5220 };
5221 
5222 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5223 {
5224 	/*
5225 	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5226 	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5227 	 * We must however clear busiest_stat::avg_load because
5228 	 * update_sd_pick_busiest() reads this before assignment.
5229 	 */
5230 	*sds = (struct sd_lb_stats){
5231 		.busiest = NULL,
5232 		.local = NULL,
5233 		.total_load = 0UL,
5234 		.total_pwr = 0UL,
5235 		.busiest_stat = {
5236 			.avg_load = 0UL,
5237 		},
5238 	};
5239 }
5240 
5241 /**
5242  * get_sd_load_idx - Obtain the load index for a given sched domain.
5243  * @sd: The sched_domain whose load_idx is to be obtained.
5244  * @idle: The idle status of the CPU for whose sd load_idx is obtained.
5245  *
5246  * Return: The load index.
5247  */
5248 static inline int get_sd_load_idx(struct sched_domain *sd,
5249 					enum cpu_idle_type idle)
5250 {
5251 	int load_idx;
5252 
5253 	switch (idle) {
5254 	case CPU_NOT_IDLE:
5255 		load_idx = sd->busy_idx;
5256 		break;
5257 
5258 	case CPU_NEWLY_IDLE:
5259 		load_idx = sd->newidle_idx;
5260 		break;
5261 	default:
5262 		load_idx = sd->idle_idx;
5263 		break;
5264 	}
5265 
5266 	return load_idx;
5267 }
5268 
5269 static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
5270 {
5271 	return SCHED_POWER_SCALE;
5272 }
5273 
5274 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5275 {
5276 	return default_scale_freq_power(sd, cpu);
5277 }
5278 
5279 static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
5280 {
5281 	unsigned long weight = sd->span_weight;
5282 	unsigned long smt_gain = sd->smt_gain;
5283 
5284 	smt_gain /= weight;
5285 
5286 	return smt_gain;
5287 }
5288 
5289 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5290 {
5291 	return default_scale_smt_power(sd, cpu);
5292 }
5293 
5294 static unsigned long scale_rt_power(int cpu)
5295 {
5296 	struct rq *rq = cpu_rq(cpu);
5297 	u64 total, available, age_stamp, avg;
5298 
5299 	/*
5300 	 * Since we're reading these variables without serialization make sure
5301 	 * we read them once before doing sanity checks on them.
5302 	 */
5303 	age_stamp = ACCESS_ONCE(rq->age_stamp);
5304 	avg = ACCESS_ONCE(rq->rt_avg);
5305 
5306 	total = sched_avg_period() + (rq_clock(rq) - age_stamp);
5307 
5308 	if (unlikely(total < avg)) {
5309 		/* Ensures that power won't end up being negative */
5310 		available = 0;
5311 	} else {
5312 		available = total - avg;
5313 	}
5314 
5315 	if (unlikely((s64)total < SCHED_POWER_SCALE))
5316 		total = SCHED_POWER_SCALE;
5317 
5318 	total >>= SCHED_POWER_SHIFT;
5319 
5320 	return div_u64(available, total);
5321 }
5322 
5323 static void update_cpu_power(struct sched_domain *sd, int cpu)
5324 {
5325 	unsigned long weight = sd->span_weight;
5326 	unsigned long power = SCHED_POWER_SCALE;
5327 	struct sched_group *sdg = sd->groups;
5328 
5329 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5330 		if (sched_feat(ARCH_POWER))
5331 			power *= arch_scale_smt_power(sd, cpu);
5332 		else
5333 			power *= default_scale_smt_power(sd, cpu);
5334 
5335 		power >>= SCHED_POWER_SHIFT;
5336 	}
5337 
5338 	sdg->sgp->power_orig = power;
5339 
5340 	if (sched_feat(ARCH_POWER))
5341 		power *= arch_scale_freq_power(sd, cpu);
5342 	else
5343 		power *= default_scale_freq_power(sd, cpu);
5344 
5345 	power >>= SCHED_POWER_SHIFT;
5346 
5347 	power *= scale_rt_power(cpu);
5348 	power >>= SCHED_POWER_SHIFT;
5349 
5350 	if (!power)
5351 		power = 1;
5352 
5353 	cpu_rq(cpu)->cpu_power = power;
5354 	sdg->sgp->power = power;
5355 }
5356 
5357 void update_group_power(struct sched_domain *sd, int cpu)
5358 {
5359 	struct sched_domain *child = sd->child;
5360 	struct sched_group *group, *sdg = sd->groups;
5361 	unsigned long power, power_orig;
5362 	unsigned long interval;
5363 
5364 	interval = msecs_to_jiffies(sd->balance_interval);
5365 	interval = clamp(interval, 1UL, max_load_balance_interval);
5366 	sdg->sgp->next_update = jiffies + interval;
5367 
5368 	if (!child) {
5369 		update_cpu_power(sd, cpu);
5370 		return;
5371 	}
5372 
5373 	power_orig = power = 0;
5374 
5375 	if (child->flags & SD_OVERLAP) {
5376 		/*
5377 		 * SD_OVERLAP domains cannot assume that child groups
5378 		 * span the current group.
5379 		 */
5380 
5381 		for_each_cpu(cpu, sched_group_cpus(sdg)) {
5382 			struct sched_group_power *sgp;
5383 			struct rq *rq = cpu_rq(cpu);
5384 
5385 			/*
5386 			 * build_sched_domains() -> init_sched_groups_power()
5387 			 * gets here before we've attached the domains to the
5388 			 * runqueues.
5389 			 *
5390 			 * Use power_of(), which is set irrespective of domains
5391 			 * in update_cpu_power().
5392 			 *
5393 			 * This avoids power/power_orig from being 0 and
5394 			 * causing divide-by-zero issues on boot.
5395 			 *
5396 			 * Runtime updates will correct power_orig.
5397 			 */
5398 			if (unlikely(!rq->sd)) {
5399 				power_orig += power_of(cpu);
5400 				power += power_of(cpu);
5401 				continue;
5402 			}
5403 
5404 			sgp = rq->sd->groups->sgp;
5405 			power_orig += sgp->power_orig;
5406 			power += sgp->power;
5407 		}
5408 	} else  {
5409 		/*
5410 		 * !SD_OVERLAP domains can assume that child groups
5411 		 * span the current group.
5412 		 */
5413 
5414 		group = child->groups;
5415 		do {
5416 			power_orig += group->sgp->power_orig;
5417 			power += group->sgp->power;
5418 			group = group->next;
5419 		} while (group != child->groups);
5420 	}
5421 
5422 	sdg->sgp->power_orig = power_orig;
5423 	sdg->sgp->power = power;
5424 }
5425 
5426 /*
5427  * Try and fix up capacity for tiny siblings, this is needed when
5428  * things like SD_ASYM_PACKING need f_b_g to select another sibling
5429  * which on its own isn't powerful enough.
5430  *
5431  * See update_sd_pick_busiest() and check_asym_packing().
5432  */
5433 static inline int
5434 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5435 {
5436 	/*
5437 	 * Only siblings can have significantly less than SCHED_POWER_SCALE
5438 	 */
5439 	if (!(sd->flags & SD_SHARE_CPUPOWER))
5440 		return 0;
5441 
5442 	/*
5443 	 * If ~90% of the cpu_power is still there, we're good.
5444 	 */
5445 	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
5446 		return 1;
5447 
5448 	return 0;
5449 }
5450 
5451 /*
5452  * Group imbalance indicates (and tries to solve) the problem where balancing
5453  * groups is inadequate due to tsk_cpus_allowed() constraints.
5454  *
5455  * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5456  * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5457  * Something like:
5458  *
5459  * 	{ 0 1 2 3 } { 4 5 6 7 }
5460  * 	        *     * * *
5461  *
5462  * If we were to balance group-wise we'd place two tasks in the first group and
5463  * two tasks in the second group. Clearly this is undesired as it will overload
5464  * cpu 3 and leave one of the cpus in the second group unused.
5465  *
5466  * The current solution to this issue is detecting the skew in the first group
5467  * by noticing the lower domain failed to reach balance and had difficulty
5468  * moving tasks due to affinity constraints.
5469  *
5470  * When this is so detected; this group becomes a candidate for busiest; see
5471  * update_sd_pick_busiest(). And calculate_imbalance() and
5472  * find_busiest_group() avoid some of the usual balance conditions to allow it
5473  * to create an effective group imbalance.
5474  *
5475  * This is a somewhat tricky proposition since the next run might not find the
5476  * group imbalance and decide the groups need to be balanced again. A most
5477  * subtle and fragile situation.
5478  */
5479 
5480 static inline int sg_imbalanced(struct sched_group *group)
5481 {
5482 	return group->sgp->imbalance;
5483 }
5484 
5485 /*
5486  * Compute the group capacity.
5487  *
5488  * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5489  * first dividing out the smt factor and computing the actual number of cores
5490  * and limit power unit capacity with that.
5491  */
5492 static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5493 {
5494 	unsigned int capacity, smt, cpus;
5495 	unsigned int power, power_orig;
5496 
5497 	power = group->sgp->power;
5498 	power_orig = group->sgp->power_orig;
5499 	cpus = group->group_weight;
5500 
5501 	/* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5502 	smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5503 	capacity = cpus / smt; /* cores */
5504 
5505 	capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
5506 	if (!capacity)
5507 		capacity = fix_small_capacity(env->sd, group);
5508 
5509 	return capacity;
5510 }
5511 
5512 /**
5513  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5514  * @env: The load balancing environment.
5515  * @group: sched_group whose statistics are to be updated.
5516  * @load_idx: Load index of sched_domain of this_cpu for load calc.
5517  * @local_group: Does group contain this_cpu.
5518  * @sgs: variable to hold the statistics for this group.
5519  */
5520 static inline void update_sg_lb_stats(struct lb_env *env,
5521 			struct sched_group *group, int load_idx,
5522 			int local_group, struct sg_lb_stats *sgs)
5523 {
5524 	unsigned long nr_running;
5525 	unsigned long load;
5526 	int i;
5527 
5528 	memset(sgs, 0, sizeof(*sgs));
5529 
5530 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
5531 		struct rq *rq = cpu_rq(i);
5532 
5533 		nr_running = rq->nr_running;
5534 
5535 		/* Bias balancing toward cpus of our domain */
5536 		if (local_group)
5537 			load = target_load(i, load_idx);
5538 		else
5539 			load = source_load(i, load_idx);
5540 
5541 		sgs->group_load += load;
5542 		sgs->sum_nr_running += nr_running;
5543 #ifdef CONFIG_NUMA_BALANCING
5544 		sgs->nr_numa_running += rq->nr_numa_running;
5545 		sgs->nr_preferred_running += rq->nr_preferred_running;
5546 #endif
5547 		sgs->sum_weighted_load += weighted_cpuload(i);
5548 		if (idle_cpu(i))
5549 			sgs->idle_cpus++;
5550 	}
5551 
5552 	/* Adjust by relative CPU power of the group */
5553 	sgs->group_power = group->sgp->power;
5554 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
5555 
5556 	if (sgs->sum_nr_running)
5557 		sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
5558 
5559 	sgs->group_weight = group->group_weight;
5560 
5561 	sgs->group_imb = sg_imbalanced(group);
5562 	sgs->group_capacity = sg_capacity(env, group);
5563 
5564 	if (sgs->group_capacity > sgs->sum_nr_running)
5565 		sgs->group_has_capacity = 1;
5566 }
5567 
5568 /**
5569  * update_sd_pick_busiest - return 1 on busiest group
5570  * @env: The load balancing environment.
5571  * @sds: sched_domain statistics
5572  * @sg: sched_group candidate to be checked for being the busiest
5573  * @sgs: sched_group statistics
5574  *
5575  * Determine if @sg is a busier group than the previously selected
5576  * busiest group.
5577  *
5578  * Return: %true if @sg is a busier group than the previously selected
5579  * busiest group. %false otherwise.
5580  */
5581 static bool update_sd_pick_busiest(struct lb_env *env,
5582 				   struct sd_lb_stats *sds,
5583 				   struct sched_group *sg,
5584 				   struct sg_lb_stats *sgs)
5585 {
5586 	if (sgs->avg_load <= sds->busiest_stat.avg_load)
5587 		return false;
5588 
5589 	if (sgs->sum_nr_running > sgs->group_capacity)
5590 		return true;
5591 
5592 	if (sgs->group_imb)
5593 		return true;
5594 
5595 	/*
5596 	 * ASYM_PACKING needs to move all the work to the lowest
5597 	 * numbered CPUs in the group, therefore mark all groups
5598 	 * higher than ourself as busy.
5599 	 */
5600 	if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5601 	    env->dst_cpu < group_first_cpu(sg)) {
5602 		if (!sds->busiest)
5603 			return true;
5604 
5605 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5606 			return true;
5607 	}
5608 
5609 	return false;
5610 }
5611 
5612 #ifdef CONFIG_NUMA_BALANCING
5613 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5614 {
5615 	if (sgs->sum_nr_running > sgs->nr_numa_running)
5616 		return regular;
5617 	if (sgs->sum_nr_running > sgs->nr_preferred_running)
5618 		return remote;
5619 	return all;
5620 }
5621 
5622 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5623 {
5624 	if (rq->nr_running > rq->nr_numa_running)
5625 		return regular;
5626 	if (rq->nr_running > rq->nr_preferred_running)
5627 		return remote;
5628 	return all;
5629 }
5630 #else
5631 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5632 {
5633 	return all;
5634 }
5635 
5636 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5637 {
5638 	return regular;
5639 }
5640 #endif /* CONFIG_NUMA_BALANCING */
5641 
5642 /**
5643  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
5644  * @env: The load balancing environment.
5645  * @sds: variable to hold the statistics for this sched_domain.
5646  */
5647 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
5648 {
5649 	struct sched_domain *child = env->sd->child;
5650 	struct sched_group *sg = env->sd->groups;
5651 	struct sg_lb_stats tmp_sgs;
5652 	int load_idx, prefer_sibling = 0;
5653 
5654 	if (child && child->flags & SD_PREFER_SIBLING)
5655 		prefer_sibling = 1;
5656 
5657 	load_idx = get_sd_load_idx(env->sd, env->idle);
5658 
5659 	do {
5660 		struct sg_lb_stats *sgs = &tmp_sgs;
5661 		int local_group;
5662 
5663 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
5664 		if (local_group) {
5665 			sds->local = sg;
5666 			sgs = &sds->local_stat;
5667 
5668 			if (env->idle != CPU_NEWLY_IDLE ||
5669 			    time_after_eq(jiffies, sg->sgp->next_update))
5670 				update_group_power(env->sd, env->dst_cpu);
5671 		}
5672 
5673 		update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
5674 
5675 		if (local_group)
5676 			goto next_group;
5677 
5678 		/*
5679 		 * In case the child domain prefers tasks go to siblings
5680 		 * first, lower the sg capacity to one so that we'll try
5681 		 * and move all the excess tasks away. We lower the capacity
5682 		 * of a group only if the local group has the capacity to fit
5683 		 * these excess tasks, i.e. nr_running < group_capacity. The
5684 		 * extra check prevents the case where you always pull from the
5685 		 * heaviest group when it is already under-utilized (possible
5686 		 * with a large weight task outweighs the tasks on the system).
5687 		 */
5688 		if (prefer_sibling && sds->local &&
5689 		    sds->local_stat.group_has_capacity)
5690 			sgs->group_capacity = min(sgs->group_capacity, 1U);
5691 
5692 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
5693 			sds->busiest = sg;
5694 			sds->busiest_stat = *sgs;
5695 		}
5696 
5697 next_group:
5698 		/* Now, start updating sd_lb_stats */
5699 		sds->total_load += sgs->group_load;
5700 		sds->total_pwr += sgs->group_power;
5701 
5702 		sg = sg->next;
5703 	} while (sg != env->sd->groups);
5704 
5705 	if (env->sd->flags & SD_NUMA)
5706 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
5707 }
5708 
5709 /**
5710  * check_asym_packing - Check to see if the group is packed into the
5711  *			sched doman.
5712  *
5713  * This is primarily intended to used at the sibling level.  Some
5714  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
5715  * case of POWER7, it can move to lower SMT modes only when higher
5716  * threads are idle.  When in lower SMT modes, the threads will
5717  * perform better since they share less core resources.  Hence when we
5718  * have idle threads, we want them to be the higher ones.
5719  *
5720  * This packing function is run on idle threads.  It checks to see if
5721  * the busiest CPU in this domain (core in the P7 case) has a higher
5722  * CPU number than the packing function is being run on.  Here we are
5723  * assuming lower CPU number will be equivalent to lower a SMT thread
5724  * number.
5725  *
5726  * Return: 1 when packing is required and a task should be moved to
5727  * this CPU.  The amount of the imbalance is returned in *imbalance.
5728  *
5729  * @env: The load balancing environment.
5730  * @sds: Statistics of the sched_domain which is to be packed
5731  */
5732 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
5733 {
5734 	int busiest_cpu;
5735 
5736 	if (!(env->sd->flags & SD_ASYM_PACKING))
5737 		return 0;
5738 
5739 	if (!sds->busiest)
5740 		return 0;
5741 
5742 	busiest_cpu = group_first_cpu(sds->busiest);
5743 	if (env->dst_cpu > busiest_cpu)
5744 		return 0;
5745 
5746 	env->imbalance = DIV_ROUND_CLOSEST(
5747 		sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5748 		SCHED_POWER_SCALE);
5749 
5750 	return 1;
5751 }
5752 
5753 /**
5754  * fix_small_imbalance - Calculate the minor imbalance that exists
5755  *			amongst the groups of a sched_domain, during
5756  *			load balancing.
5757  * @env: The load balancing environment.
5758  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
5759  */
5760 static inline
5761 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
5762 {
5763 	unsigned long tmp, pwr_now = 0, pwr_move = 0;
5764 	unsigned int imbn = 2;
5765 	unsigned long scaled_busy_load_per_task;
5766 	struct sg_lb_stats *local, *busiest;
5767 
5768 	local = &sds->local_stat;
5769 	busiest = &sds->busiest_stat;
5770 
5771 	if (!local->sum_nr_running)
5772 		local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5773 	else if (busiest->load_per_task > local->load_per_task)
5774 		imbn = 1;
5775 
5776 	scaled_busy_load_per_task =
5777 		(busiest->load_per_task * SCHED_POWER_SCALE) /
5778 		busiest->group_power;
5779 
5780 	if (busiest->avg_load + scaled_busy_load_per_task >=
5781 	    local->avg_load + (scaled_busy_load_per_task * imbn)) {
5782 		env->imbalance = busiest->load_per_task;
5783 		return;
5784 	}
5785 
5786 	/*
5787 	 * OK, we don't have enough imbalance to justify moving tasks,
5788 	 * however we may be able to increase total CPU power used by
5789 	 * moving them.
5790 	 */
5791 
5792 	pwr_now += busiest->group_power *
5793 			min(busiest->load_per_task, busiest->avg_load);
5794 	pwr_now += local->group_power *
5795 			min(local->load_per_task, local->avg_load);
5796 	pwr_now /= SCHED_POWER_SCALE;
5797 
5798 	/* Amount of load we'd subtract */
5799 	tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
5800 		busiest->group_power;
5801 	if (busiest->avg_load > tmp) {
5802 		pwr_move += busiest->group_power *
5803 			    min(busiest->load_per_task,
5804 				busiest->avg_load - tmp);
5805 	}
5806 
5807 	/* Amount of load we'd add */
5808 	if (busiest->avg_load * busiest->group_power <
5809 	    busiest->load_per_task * SCHED_POWER_SCALE) {
5810 		tmp = (busiest->avg_load * busiest->group_power) /
5811 		      local->group_power;
5812 	} else {
5813 		tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
5814 		      local->group_power;
5815 	}
5816 	pwr_move += local->group_power *
5817 		    min(local->load_per_task, local->avg_load + tmp);
5818 	pwr_move /= SCHED_POWER_SCALE;
5819 
5820 	/* Move if we gain throughput */
5821 	if (pwr_move > pwr_now)
5822 		env->imbalance = busiest->load_per_task;
5823 }
5824 
5825 /**
5826  * calculate_imbalance - Calculate the amount of imbalance present within the
5827  *			 groups of a given sched_domain during load balance.
5828  * @env: load balance environment
5829  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
5830  */
5831 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
5832 {
5833 	unsigned long max_pull, load_above_capacity = ~0UL;
5834 	struct sg_lb_stats *local, *busiest;
5835 
5836 	local = &sds->local_stat;
5837 	busiest = &sds->busiest_stat;
5838 
5839 	if (busiest->group_imb) {
5840 		/*
5841 		 * In the group_imb case we cannot rely on group-wide averages
5842 		 * to ensure cpu-load equilibrium, look at wider averages. XXX
5843 		 */
5844 		busiest->load_per_task =
5845 			min(busiest->load_per_task, sds->avg_load);
5846 	}
5847 
5848 	/*
5849 	 * In the presence of smp nice balancing, certain scenarios can have
5850 	 * max load less than avg load(as we skip the groups at or below
5851 	 * its cpu_power, while calculating max_load..)
5852 	 */
5853 	if (busiest->avg_load <= sds->avg_load ||
5854 	    local->avg_load >= sds->avg_load) {
5855 		env->imbalance = 0;
5856 		return fix_small_imbalance(env, sds);
5857 	}
5858 
5859 	if (!busiest->group_imb) {
5860 		/*
5861 		 * Don't want to pull so many tasks that a group would go idle.
5862 		 * Except of course for the group_imb case, since then we might
5863 		 * have to drop below capacity to reach cpu-load equilibrium.
5864 		 */
5865 		load_above_capacity =
5866 			(busiest->sum_nr_running - busiest->group_capacity);
5867 
5868 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
5869 		load_above_capacity /= busiest->group_power;
5870 	}
5871 
5872 	/*
5873 	 * We're trying to get all the cpus to the average_load, so we don't
5874 	 * want to push ourselves above the average load, nor do we wish to
5875 	 * reduce the max loaded cpu below the average load. At the same time,
5876 	 * we also don't want to reduce the group load below the group capacity
5877 	 * (so that we can implement power-savings policies etc). Thus we look
5878 	 * for the minimum possible imbalance.
5879 	 */
5880 	max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
5881 
5882 	/* How much load to actually move to equalise the imbalance */
5883 	env->imbalance = min(
5884 		max_pull * busiest->group_power,
5885 		(sds->avg_load - local->avg_load) * local->group_power
5886 	) / SCHED_POWER_SCALE;
5887 
5888 	/*
5889 	 * if *imbalance is less than the average load per runnable task
5890 	 * there is no guarantee that any tasks will be moved so we'll have
5891 	 * a think about bumping its value to force at least one task to be
5892 	 * moved
5893 	 */
5894 	if (env->imbalance < busiest->load_per_task)
5895 		return fix_small_imbalance(env, sds);
5896 }
5897 
5898 /******* find_busiest_group() helpers end here *********************/
5899 
5900 /**
5901  * find_busiest_group - Returns the busiest group within the sched_domain
5902  * if there is an imbalance. If there isn't an imbalance, and
5903  * the user has opted for power-savings, it returns a group whose
5904  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5905  * such a group exists.
5906  *
5907  * Also calculates the amount of weighted load which should be moved
5908  * to restore balance.
5909  *
5910  * @env: The load balancing environment.
5911  *
5912  * Return:	- The busiest group if imbalance exists.
5913  *		- If no imbalance and user has opted for power-savings balance,
5914  *		   return the least loaded group whose CPUs can be
5915  *		   put to idle by rebalancing its tasks onto our group.
5916  */
5917 static struct sched_group *find_busiest_group(struct lb_env *env)
5918 {
5919 	struct sg_lb_stats *local, *busiest;
5920 	struct sd_lb_stats sds;
5921 
5922 	init_sd_lb_stats(&sds);
5923 
5924 	/*
5925 	 * Compute the various statistics relavent for load balancing at
5926 	 * this level.
5927 	 */
5928 	update_sd_lb_stats(env, &sds);
5929 	local = &sds.local_stat;
5930 	busiest = &sds.busiest_stat;
5931 
5932 	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5933 	    check_asym_packing(env, &sds))
5934 		return sds.busiest;
5935 
5936 	/* There is no busy sibling group to pull tasks from */
5937 	if (!sds.busiest || busiest->sum_nr_running == 0)
5938 		goto out_balanced;
5939 
5940 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
5941 
5942 	/*
5943 	 * If the busiest group is imbalanced the below checks don't
5944 	 * work because they assume all things are equal, which typically
5945 	 * isn't true due to cpus_allowed constraints and the like.
5946 	 */
5947 	if (busiest->group_imb)
5948 		goto force_balance;
5949 
5950 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
5951 	if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5952 	    !busiest->group_has_capacity)
5953 		goto force_balance;
5954 
5955 	/*
5956 	 * If the local group is more busy than the selected busiest group
5957 	 * don't try and pull any tasks.
5958 	 */
5959 	if (local->avg_load >= busiest->avg_load)
5960 		goto out_balanced;
5961 
5962 	/*
5963 	 * Don't pull any tasks if this group is already above the domain
5964 	 * average load.
5965 	 */
5966 	if (local->avg_load >= sds.avg_load)
5967 		goto out_balanced;
5968 
5969 	if (env->idle == CPU_IDLE) {
5970 		/*
5971 		 * This cpu is idle. If the busiest group load doesn't
5972 		 * have more tasks than the number of available cpu's and
5973 		 * there is no imbalance between this and busiest group
5974 		 * wrt to idle cpu's, it is balanced.
5975 		 */
5976 		if ((local->idle_cpus < busiest->idle_cpus) &&
5977 		    busiest->sum_nr_running <= busiest->group_weight)
5978 			goto out_balanced;
5979 	} else {
5980 		/*
5981 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5982 		 * imbalance_pct to be conservative.
5983 		 */
5984 		if (100 * busiest->avg_load <=
5985 				env->sd->imbalance_pct * local->avg_load)
5986 			goto out_balanced;
5987 	}
5988 
5989 force_balance:
5990 	/* Looks like there is an imbalance. Compute it */
5991 	calculate_imbalance(env, &sds);
5992 	return sds.busiest;
5993 
5994 out_balanced:
5995 	env->imbalance = 0;
5996 	return NULL;
5997 }
5998 
5999 /*
6000  * find_busiest_queue - find the busiest runqueue among the cpus in group.
6001  */
6002 static struct rq *find_busiest_queue(struct lb_env *env,
6003 				     struct sched_group *group)
6004 {
6005 	struct rq *busiest = NULL, *rq;
6006 	unsigned long busiest_load = 0, busiest_power = 1;
6007 	int i;
6008 
6009 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6010 		unsigned long power, capacity, wl;
6011 		enum fbq_type rt;
6012 
6013 		rq = cpu_rq(i);
6014 		rt = fbq_classify_rq(rq);
6015 
6016 		/*
6017 		 * We classify groups/runqueues into three groups:
6018 		 *  - regular: there are !numa tasks
6019 		 *  - remote:  there are numa tasks that run on the 'wrong' node
6020 		 *  - all:     there is no distinction
6021 		 *
6022 		 * In order to avoid migrating ideally placed numa tasks,
6023 		 * ignore those when there's better options.
6024 		 *
6025 		 * If we ignore the actual busiest queue to migrate another
6026 		 * task, the next balance pass can still reduce the busiest
6027 		 * queue by moving tasks around inside the node.
6028 		 *
6029 		 * If we cannot move enough load due to this classification
6030 		 * the next pass will adjust the group classification and
6031 		 * allow migration of more tasks.
6032 		 *
6033 		 * Both cases only affect the total convergence complexity.
6034 		 */
6035 		if (rt > env->fbq_type)
6036 			continue;
6037 
6038 		power = power_of(i);
6039 		capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
6040 		if (!capacity)
6041 			capacity = fix_small_capacity(env->sd, group);
6042 
6043 		wl = weighted_cpuload(i);
6044 
6045 		/*
6046 		 * When comparing with imbalance, use weighted_cpuload()
6047 		 * which is not scaled with the cpu power.
6048 		 */
6049 		if (capacity && rq->nr_running == 1 && wl > env->imbalance)
6050 			continue;
6051 
6052 		/*
6053 		 * For the load comparisons with the other cpu's, consider
6054 		 * the weighted_cpuload() scaled with the cpu power, so that
6055 		 * the load can be moved away from the cpu that is potentially
6056 		 * running at a lower capacity.
6057 		 *
6058 		 * Thus we're looking for max(wl_i / power_i), crosswise
6059 		 * multiplication to rid ourselves of the division works out
6060 		 * to: wl_i * power_j > wl_j * power_i;  where j is our
6061 		 * previous maximum.
6062 		 */
6063 		if (wl * busiest_power > busiest_load * power) {
6064 			busiest_load = wl;
6065 			busiest_power = power;
6066 			busiest = rq;
6067 		}
6068 	}
6069 
6070 	return busiest;
6071 }
6072 
6073 /*
6074  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6075  * so long as it is large enough.
6076  */
6077 #define MAX_PINNED_INTERVAL	512
6078 
6079 /* Working cpumask for load_balance and load_balance_newidle. */
6080 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6081 
6082 static int need_active_balance(struct lb_env *env)
6083 {
6084 	struct sched_domain *sd = env->sd;
6085 
6086 	if (env->idle == CPU_NEWLY_IDLE) {
6087 
6088 		/*
6089 		 * ASYM_PACKING needs to force migrate tasks from busy but
6090 		 * higher numbered CPUs in order to pack all tasks in the
6091 		 * lowest numbered CPUs.
6092 		 */
6093 		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
6094 			return 1;
6095 	}
6096 
6097 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6098 }
6099 
6100 static int active_load_balance_cpu_stop(void *data);
6101 
6102 static int should_we_balance(struct lb_env *env)
6103 {
6104 	struct sched_group *sg = env->sd->groups;
6105 	struct cpumask *sg_cpus, *sg_mask;
6106 	int cpu, balance_cpu = -1;
6107 
6108 	/*
6109 	 * In the newly idle case, we will allow all the cpu's
6110 	 * to do the newly idle load balance.
6111 	 */
6112 	if (env->idle == CPU_NEWLY_IDLE)
6113 		return 1;
6114 
6115 	sg_cpus = sched_group_cpus(sg);
6116 	sg_mask = sched_group_mask(sg);
6117 	/* Try to find first idle cpu */
6118 	for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6119 		if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6120 			continue;
6121 
6122 		balance_cpu = cpu;
6123 		break;
6124 	}
6125 
6126 	if (balance_cpu == -1)
6127 		balance_cpu = group_balance_cpu(sg);
6128 
6129 	/*
6130 	 * First idle cpu or the first cpu(busiest) in this sched group
6131 	 * is eligible for doing load balancing at this and above domains.
6132 	 */
6133 	return balance_cpu == env->dst_cpu;
6134 }
6135 
6136 /*
6137  * Check this_cpu to ensure it is balanced within domain. Attempt to move
6138  * tasks if there is an imbalance.
6139  */
6140 static int load_balance(int this_cpu, struct rq *this_rq,
6141 			struct sched_domain *sd, enum cpu_idle_type idle,
6142 			int *continue_balancing)
6143 {
6144 	int ld_moved, cur_ld_moved, active_balance = 0;
6145 	struct sched_domain *sd_parent = sd->parent;
6146 	struct sched_group *group;
6147 	struct rq *busiest;
6148 	unsigned long flags;
6149 	struct cpumask *cpus = __get_cpu_var(load_balance_mask);
6150 
6151 	struct lb_env env = {
6152 		.sd		= sd,
6153 		.dst_cpu	= this_cpu,
6154 		.dst_rq		= this_rq,
6155 		.dst_grpmask    = sched_group_cpus(sd->groups),
6156 		.idle		= idle,
6157 		.loop_break	= sched_nr_migrate_break,
6158 		.cpus		= cpus,
6159 		.fbq_type	= all,
6160 	};
6161 
6162 	/*
6163 	 * For NEWLY_IDLE load_balancing, we don't need to consider
6164 	 * other cpus in our group
6165 	 */
6166 	if (idle == CPU_NEWLY_IDLE)
6167 		env.dst_grpmask = NULL;
6168 
6169 	cpumask_copy(cpus, cpu_active_mask);
6170 
6171 	schedstat_inc(sd, lb_count[idle]);
6172 
6173 redo:
6174 	if (!should_we_balance(&env)) {
6175 		*continue_balancing = 0;
6176 		goto out_balanced;
6177 	}
6178 
6179 	group = find_busiest_group(&env);
6180 	if (!group) {
6181 		schedstat_inc(sd, lb_nobusyg[idle]);
6182 		goto out_balanced;
6183 	}
6184 
6185 	busiest = find_busiest_queue(&env, group);
6186 	if (!busiest) {
6187 		schedstat_inc(sd, lb_nobusyq[idle]);
6188 		goto out_balanced;
6189 	}
6190 
6191 	BUG_ON(busiest == env.dst_rq);
6192 
6193 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
6194 
6195 	ld_moved = 0;
6196 	if (busiest->nr_running > 1) {
6197 		/*
6198 		 * Attempt to move tasks. If find_busiest_group has found
6199 		 * an imbalance but busiest->nr_running <= 1, the group is
6200 		 * still unbalanced. ld_moved simply stays zero, so it is
6201 		 * correctly treated as an imbalance.
6202 		 */
6203 		env.flags |= LBF_ALL_PINNED;
6204 		env.src_cpu   = busiest->cpu;
6205 		env.src_rq    = busiest;
6206 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
6207 
6208 more_balance:
6209 		local_irq_save(flags);
6210 		double_rq_lock(env.dst_rq, busiest);
6211 
6212 		/*
6213 		 * cur_ld_moved - load moved in current iteration
6214 		 * ld_moved     - cumulative load moved across iterations
6215 		 */
6216 		cur_ld_moved = move_tasks(&env);
6217 		ld_moved += cur_ld_moved;
6218 		double_rq_unlock(env.dst_rq, busiest);
6219 		local_irq_restore(flags);
6220 
6221 		/*
6222 		 * some other cpu did the load balance for us.
6223 		 */
6224 		if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6225 			resched_cpu(env.dst_cpu);
6226 
6227 		if (env.flags & LBF_NEED_BREAK) {
6228 			env.flags &= ~LBF_NEED_BREAK;
6229 			goto more_balance;
6230 		}
6231 
6232 		/*
6233 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6234 		 * us and move them to an alternate dst_cpu in our sched_group
6235 		 * where they can run. The upper limit on how many times we
6236 		 * iterate on same src_cpu is dependent on number of cpus in our
6237 		 * sched_group.
6238 		 *
6239 		 * This changes load balance semantics a bit on who can move
6240 		 * load to a given_cpu. In addition to the given_cpu itself
6241 		 * (or a ilb_cpu acting on its behalf where given_cpu is
6242 		 * nohz-idle), we now have balance_cpu in a position to move
6243 		 * load to given_cpu. In rare situations, this may cause
6244 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6245 		 * _independently_ and at _same_ time to move some load to
6246 		 * given_cpu) causing exceess load to be moved to given_cpu.
6247 		 * This however should not happen so much in practice and
6248 		 * moreover subsequent load balance cycles should correct the
6249 		 * excess load moved.
6250 		 */
6251 		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
6252 
6253 			/* Prevent to re-select dst_cpu via env's cpus */
6254 			cpumask_clear_cpu(env.dst_cpu, env.cpus);
6255 
6256 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
6257 			env.dst_cpu	 = env.new_dst_cpu;
6258 			env.flags	&= ~LBF_DST_PINNED;
6259 			env.loop	 = 0;
6260 			env.loop_break	 = sched_nr_migrate_break;
6261 
6262 			/*
6263 			 * Go back to "more_balance" rather than "redo" since we
6264 			 * need to continue with same src_cpu.
6265 			 */
6266 			goto more_balance;
6267 		}
6268 
6269 		/*
6270 		 * We failed to reach balance because of affinity.
6271 		 */
6272 		if (sd_parent) {
6273 			int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6274 
6275 			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6276 				*group_imbalance = 1;
6277 			} else if (*group_imbalance)
6278 				*group_imbalance = 0;
6279 		}
6280 
6281 		/* All tasks on this runqueue were pinned by CPU affinity */
6282 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
6283 			cpumask_clear_cpu(cpu_of(busiest), cpus);
6284 			if (!cpumask_empty(cpus)) {
6285 				env.loop = 0;
6286 				env.loop_break = sched_nr_migrate_break;
6287 				goto redo;
6288 			}
6289 			goto out_balanced;
6290 		}
6291 	}
6292 
6293 	if (!ld_moved) {
6294 		schedstat_inc(sd, lb_failed[idle]);
6295 		/*
6296 		 * Increment the failure counter only on periodic balance.
6297 		 * We do not want newidle balance, which can be very
6298 		 * frequent, pollute the failure counter causing
6299 		 * excessive cache_hot migrations and active balances.
6300 		 */
6301 		if (idle != CPU_NEWLY_IDLE)
6302 			sd->nr_balance_failed++;
6303 
6304 		if (need_active_balance(&env)) {
6305 			raw_spin_lock_irqsave(&busiest->lock, flags);
6306 
6307 			/* don't kick the active_load_balance_cpu_stop,
6308 			 * if the curr task on busiest cpu can't be
6309 			 * moved to this_cpu
6310 			 */
6311 			if (!cpumask_test_cpu(this_cpu,
6312 					tsk_cpus_allowed(busiest->curr))) {
6313 				raw_spin_unlock_irqrestore(&busiest->lock,
6314 							    flags);
6315 				env.flags |= LBF_ALL_PINNED;
6316 				goto out_one_pinned;
6317 			}
6318 
6319 			/*
6320 			 * ->active_balance synchronizes accesses to
6321 			 * ->active_balance_work.  Once set, it's cleared
6322 			 * only after active load balance is finished.
6323 			 */
6324 			if (!busiest->active_balance) {
6325 				busiest->active_balance = 1;
6326 				busiest->push_cpu = this_cpu;
6327 				active_balance = 1;
6328 			}
6329 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
6330 
6331 			if (active_balance) {
6332 				stop_one_cpu_nowait(cpu_of(busiest),
6333 					active_load_balance_cpu_stop, busiest,
6334 					&busiest->active_balance_work);
6335 			}
6336 
6337 			/*
6338 			 * We've kicked active balancing, reset the failure
6339 			 * counter.
6340 			 */
6341 			sd->nr_balance_failed = sd->cache_nice_tries+1;
6342 		}
6343 	} else
6344 		sd->nr_balance_failed = 0;
6345 
6346 	if (likely(!active_balance)) {
6347 		/* We were unbalanced, so reset the balancing interval */
6348 		sd->balance_interval = sd->min_interval;
6349 	} else {
6350 		/*
6351 		 * If we've begun active balancing, start to back off. This
6352 		 * case may not be covered by the all_pinned logic if there
6353 		 * is only 1 task on the busy runqueue (because we don't call
6354 		 * move_tasks).
6355 		 */
6356 		if (sd->balance_interval < sd->max_interval)
6357 			sd->balance_interval *= 2;
6358 	}
6359 
6360 	goto out;
6361 
6362 out_balanced:
6363 	schedstat_inc(sd, lb_balanced[idle]);
6364 
6365 	sd->nr_balance_failed = 0;
6366 
6367 out_one_pinned:
6368 	/* tune up the balancing interval */
6369 	if (((env.flags & LBF_ALL_PINNED) &&
6370 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
6371 			(sd->balance_interval < sd->max_interval))
6372 		sd->balance_interval *= 2;
6373 
6374 	ld_moved = 0;
6375 out:
6376 	return ld_moved;
6377 }
6378 
6379 /*
6380  * idle_balance is called by schedule() if this_cpu is about to become
6381  * idle. Attempts to pull tasks from other CPUs.
6382  */
6383 void idle_balance(int this_cpu, struct rq *this_rq)
6384 {
6385 	struct sched_domain *sd;
6386 	int pulled_task = 0;
6387 	unsigned long next_balance = jiffies + HZ;
6388 	u64 curr_cost = 0;
6389 
6390 	this_rq->idle_stamp = rq_clock(this_rq);
6391 
6392 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
6393 		return;
6394 
6395 	/*
6396 	 * Drop the rq->lock, but keep IRQ/preempt disabled.
6397 	 */
6398 	raw_spin_unlock(&this_rq->lock);
6399 
6400 	update_blocked_averages(this_cpu);
6401 	rcu_read_lock();
6402 	for_each_domain(this_cpu, sd) {
6403 		unsigned long interval;
6404 		int continue_balancing = 1;
6405 		u64 t0, domain_cost;
6406 
6407 		if (!(sd->flags & SD_LOAD_BALANCE))
6408 			continue;
6409 
6410 		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6411 			break;
6412 
6413 		if (sd->flags & SD_BALANCE_NEWIDLE) {
6414 			t0 = sched_clock_cpu(this_cpu);
6415 
6416 			/* If we've pulled tasks over stop searching: */
6417 			pulled_task = load_balance(this_cpu, this_rq,
6418 						   sd, CPU_NEWLY_IDLE,
6419 						   &continue_balancing);
6420 
6421 			domain_cost = sched_clock_cpu(this_cpu) - t0;
6422 			if (domain_cost > sd->max_newidle_lb_cost)
6423 				sd->max_newidle_lb_cost = domain_cost;
6424 
6425 			curr_cost += domain_cost;
6426 		}
6427 
6428 		interval = msecs_to_jiffies(sd->balance_interval);
6429 		if (time_after(next_balance, sd->last_balance + interval))
6430 			next_balance = sd->last_balance + interval;
6431 		if (pulled_task) {
6432 			this_rq->idle_stamp = 0;
6433 			break;
6434 		}
6435 	}
6436 	rcu_read_unlock();
6437 
6438 	raw_spin_lock(&this_rq->lock);
6439 
6440 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6441 		/*
6442 		 * We are going idle. next_balance may be set based on
6443 		 * a busy processor. So reset next_balance.
6444 		 */
6445 		this_rq->next_balance = next_balance;
6446 	}
6447 
6448 	if (curr_cost > this_rq->max_idle_balance_cost)
6449 		this_rq->max_idle_balance_cost = curr_cost;
6450 }
6451 
6452 /*
6453  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6454  * running tasks off the busiest CPU onto idle CPUs. It requires at
6455  * least 1 task to be running on each physical CPU where possible, and
6456  * avoids physical / logical imbalances.
6457  */
6458 static int active_load_balance_cpu_stop(void *data)
6459 {
6460 	struct rq *busiest_rq = data;
6461 	int busiest_cpu = cpu_of(busiest_rq);
6462 	int target_cpu = busiest_rq->push_cpu;
6463 	struct rq *target_rq = cpu_rq(target_cpu);
6464 	struct sched_domain *sd;
6465 
6466 	raw_spin_lock_irq(&busiest_rq->lock);
6467 
6468 	/* make sure the requested cpu hasn't gone down in the meantime */
6469 	if (unlikely(busiest_cpu != smp_processor_id() ||
6470 		     !busiest_rq->active_balance))
6471 		goto out_unlock;
6472 
6473 	/* Is there any task to move? */
6474 	if (busiest_rq->nr_running <= 1)
6475 		goto out_unlock;
6476 
6477 	/*
6478 	 * This condition is "impossible", if it occurs
6479 	 * we need to fix it. Originally reported by
6480 	 * Bjorn Helgaas on a 128-cpu setup.
6481 	 */
6482 	BUG_ON(busiest_rq == target_rq);
6483 
6484 	/* move a task from busiest_rq to target_rq */
6485 	double_lock_balance(busiest_rq, target_rq);
6486 
6487 	/* Search for an sd spanning us and the target CPU. */
6488 	rcu_read_lock();
6489 	for_each_domain(target_cpu, sd) {
6490 		if ((sd->flags & SD_LOAD_BALANCE) &&
6491 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6492 				break;
6493 	}
6494 
6495 	if (likely(sd)) {
6496 		struct lb_env env = {
6497 			.sd		= sd,
6498 			.dst_cpu	= target_cpu,
6499 			.dst_rq		= target_rq,
6500 			.src_cpu	= busiest_rq->cpu,
6501 			.src_rq		= busiest_rq,
6502 			.idle		= CPU_IDLE,
6503 		};
6504 
6505 		schedstat_inc(sd, alb_count);
6506 
6507 		if (move_one_task(&env))
6508 			schedstat_inc(sd, alb_pushed);
6509 		else
6510 			schedstat_inc(sd, alb_failed);
6511 	}
6512 	rcu_read_unlock();
6513 	double_unlock_balance(busiest_rq, target_rq);
6514 out_unlock:
6515 	busiest_rq->active_balance = 0;
6516 	raw_spin_unlock_irq(&busiest_rq->lock);
6517 	return 0;
6518 }
6519 
6520 #ifdef CONFIG_NO_HZ_COMMON
6521 /*
6522  * idle load balancing details
6523  * - When one of the busy CPUs notice that there may be an idle rebalancing
6524  *   needed, they will kick the idle load balancer, which then does idle
6525  *   load balancing for all the idle CPUs.
6526  */
6527 static struct {
6528 	cpumask_var_t idle_cpus_mask;
6529 	atomic_t nr_cpus;
6530 	unsigned long next_balance;     /* in jiffy units */
6531 } nohz ____cacheline_aligned;
6532 
6533 static inline int find_new_ilb(int call_cpu)
6534 {
6535 	int ilb = cpumask_first(nohz.idle_cpus_mask);
6536 
6537 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
6538 		return ilb;
6539 
6540 	return nr_cpu_ids;
6541 }
6542 
6543 /*
6544  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6545  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6546  * CPU (if there is one).
6547  */
6548 static void nohz_balancer_kick(int cpu)
6549 {
6550 	int ilb_cpu;
6551 
6552 	nohz.next_balance++;
6553 
6554 	ilb_cpu = find_new_ilb(cpu);
6555 
6556 	if (ilb_cpu >= nr_cpu_ids)
6557 		return;
6558 
6559 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
6560 		return;
6561 	/*
6562 	 * Use smp_send_reschedule() instead of resched_cpu().
6563 	 * This way we generate a sched IPI on the target cpu which
6564 	 * is idle. And the softirq performing nohz idle load balance
6565 	 * will be run before returning from the IPI.
6566 	 */
6567 	smp_send_reschedule(ilb_cpu);
6568 	return;
6569 }
6570 
6571 static inline void nohz_balance_exit_idle(int cpu)
6572 {
6573 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6574 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6575 		atomic_dec(&nohz.nr_cpus);
6576 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6577 	}
6578 }
6579 
6580 static inline void set_cpu_sd_state_busy(void)
6581 {
6582 	struct sched_domain *sd;
6583 	int cpu = smp_processor_id();
6584 
6585 	rcu_read_lock();
6586 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
6587 
6588 	if (!sd || !sd->nohz_idle)
6589 		goto unlock;
6590 	sd->nohz_idle = 0;
6591 
6592 	atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6593 unlock:
6594 	rcu_read_unlock();
6595 }
6596 
6597 void set_cpu_sd_state_idle(void)
6598 {
6599 	struct sched_domain *sd;
6600 	int cpu = smp_processor_id();
6601 
6602 	rcu_read_lock();
6603 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
6604 
6605 	if (!sd || sd->nohz_idle)
6606 		goto unlock;
6607 	sd->nohz_idle = 1;
6608 
6609 	atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6610 unlock:
6611 	rcu_read_unlock();
6612 }
6613 
6614 /*
6615  * This routine will record that the cpu is going idle with tick stopped.
6616  * This info will be used in performing idle load balancing in the future.
6617  */
6618 void nohz_balance_enter_idle(int cpu)
6619 {
6620 	/*
6621 	 * If this cpu is going down, then nothing needs to be done.
6622 	 */
6623 	if (!cpu_active(cpu))
6624 		return;
6625 
6626 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6627 		return;
6628 
6629 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6630 	atomic_inc(&nohz.nr_cpus);
6631 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6632 }
6633 
6634 static int sched_ilb_notifier(struct notifier_block *nfb,
6635 					unsigned long action, void *hcpu)
6636 {
6637 	switch (action & ~CPU_TASKS_FROZEN) {
6638 	case CPU_DYING:
6639 		nohz_balance_exit_idle(smp_processor_id());
6640 		return NOTIFY_OK;
6641 	default:
6642 		return NOTIFY_DONE;
6643 	}
6644 }
6645 #endif
6646 
6647 static DEFINE_SPINLOCK(balancing);
6648 
6649 /*
6650  * Scale the max load_balance interval with the number of CPUs in the system.
6651  * This trades load-balance latency on larger machines for less cross talk.
6652  */
6653 void update_max_interval(void)
6654 {
6655 	max_load_balance_interval = HZ*num_online_cpus()/10;
6656 }
6657 
6658 /*
6659  * It checks each scheduling domain to see if it is due to be balanced,
6660  * and initiates a balancing operation if so.
6661  *
6662  * Balancing parameters are set up in init_sched_domains.
6663  */
6664 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
6665 {
6666 	int continue_balancing = 1;
6667 	struct rq *rq = cpu_rq(cpu);
6668 	unsigned long interval;
6669 	struct sched_domain *sd;
6670 	/* Earliest time when we have to do rebalance again */
6671 	unsigned long next_balance = jiffies + 60*HZ;
6672 	int update_next_balance = 0;
6673 	int need_serialize, need_decay = 0;
6674 	u64 max_cost = 0;
6675 
6676 	update_blocked_averages(cpu);
6677 
6678 	rcu_read_lock();
6679 	for_each_domain(cpu, sd) {
6680 		/*
6681 		 * Decay the newidle max times here because this is a regular
6682 		 * visit to all the domains. Decay ~1% per second.
6683 		 */
6684 		if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6685 			sd->max_newidle_lb_cost =
6686 				(sd->max_newidle_lb_cost * 253) / 256;
6687 			sd->next_decay_max_lb_cost = jiffies + HZ;
6688 			need_decay = 1;
6689 		}
6690 		max_cost += sd->max_newidle_lb_cost;
6691 
6692 		if (!(sd->flags & SD_LOAD_BALANCE))
6693 			continue;
6694 
6695 		/*
6696 		 * Stop the load balance at this level. There is another
6697 		 * CPU in our sched group which is doing load balancing more
6698 		 * actively.
6699 		 */
6700 		if (!continue_balancing) {
6701 			if (need_decay)
6702 				continue;
6703 			break;
6704 		}
6705 
6706 		interval = sd->balance_interval;
6707 		if (idle != CPU_IDLE)
6708 			interval *= sd->busy_factor;
6709 
6710 		/* scale ms to jiffies */
6711 		interval = msecs_to_jiffies(interval);
6712 		interval = clamp(interval, 1UL, max_load_balance_interval);
6713 
6714 		need_serialize = sd->flags & SD_SERIALIZE;
6715 
6716 		if (need_serialize) {
6717 			if (!spin_trylock(&balancing))
6718 				goto out;
6719 		}
6720 
6721 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
6722 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
6723 				/*
6724 				 * The LBF_DST_PINNED logic could have changed
6725 				 * env->dst_cpu, so we can't know our idle
6726 				 * state even if we migrated tasks. Update it.
6727 				 */
6728 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
6729 			}
6730 			sd->last_balance = jiffies;
6731 		}
6732 		if (need_serialize)
6733 			spin_unlock(&balancing);
6734 out:
6735 		if (time_after(next_balance, sd->last_balance + interval)) {
6736 			next_balance = sd->last_balance + interval;
6737 			update_next_balance = 1;
6738 		}
6739 	}
6740 	if (need_decay) {
6741 		/*
6742 		 * Ensure the rq-wide value also decays but keep it at a
6743 		 * reasonable floor to avoid funnies with rq->avg_idle.
6744 		 */
6745 		rq->max_idle_balance_cost =
6746 			max((u64)sysctl_sched_migration_cost, max_cost);
6747 	}
6748 	rcu_read_unlock();
6749 
6750 	/*
6751 	 * next_balance will be updated only when there is a need.
6752 	 * When the cpu is attached to null domain for ex, it will not be
6753 	 * updated.
6754 	 */
6755 	if (likely(update_next_balance))
6756 		rq->next_balance = next_balance;
6757 }
6758 
6759 #ifdef CONFIG_NO_HZ_COMMON
6760 /*
6761  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
6762  * rebalancing for all the cpus for whom scheduler ticks are stopped.
6763  */
6764 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
6765 {
6766 	struct rq *this_rq = cpu_rq(this_cpu);
6767 	struct rq *rq;
6768 	int balance_cpu;
6769 
6770 	if (idle != CPU_IDLE ||
6771 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6772 		goto end;
6773 
6774 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
6775 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
6776 			continue;
6777 
6778 		/*
6779 		 * If this cpu gets work to do, stop the load balancing
6780 		 * work being done for other cpus. Next load
6781 		 * balancing owner will pick it up.
6782 		 */
6783 		if (need_resched())
6784 			break;
6785 
6786 		rq = cpu_rq(balance_cpu);
6787 
6788 		raw_spin_lock_irq(&rq->lock);
6789 		update_rq_clock(rq);
6790 		update_idle_cpu_load(rq);
6791 		raw_spin_unlock_irq(&rq->lock);
6792 
6793 		rebalance_domains(balance_cpu, CPU_IDLE);
6794 
6795 		if (time_after(this_rq->next_balance, rq->next_balance))
6796 			this_rq->next_balance = rq->next_balance;
6797 	}
6798 	nohz.next_balance = this_rq->next_balance;
6799 end:
6800 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
6801 }
6802 
6803 /*
6804  * Current heuristic for kicking the idle load balancer in the presence
6805  * of an idle cpu is the system.
6806  *   - This rq has more than one task.
6807  *   - At any scheduler domain level, this cpu's scheduler group has multiple
6808  *     busy cpu's exceeding the group's power.
6809  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6810  *     domain span are idle.
6811  */
6812 static inline int nohz_kick_needed(struct rq *rq, int cpu)
6813 {
6814 	unsigned long now = jiffies;
6815 	struct sched_domain *sd;
6816 	struct sched_group_power *sgp;
6817 	int nr_busy;
6818 
6819 	if (unlikely(idle_cpu(cpu)))
6820 		return 0;
6821 
6822        /*
6823 	* We may be recently in ticked or tickless idle mode. At the first
6824 	* busy tick after returning from idle, we will update the busy stats.
6825 	*/
6826 	set_cpu_sd_state_busy();
6827 	nohz_balance_exit_idle(cpu);
6828 
6829 	/*
6830 	 * None are in tickless mode and hence no need for NOHZ idle load
6831 	 * balancing.
6832 	 */
6833 	if (likely(!atomic_read(&nohz.nr_cpus)))
6834 		return 0;
6835 
6836 	if (time_before(now, nohz.next_balance))
6837 		return 0;
6838 
6839 	if (rq->nr_running >= 2)
6840 		goto need_kick;
6841 
6842 	rcu_read_lock();
6843 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
6844 
6845 	if (sd) {
6846 		sgp = sd->groups->sgp;
6847 		nr_busy = atomic_read(&sgp->nr_busy_cpus);
6848 
6849 		if (nr_busy > 1)
6850 			goto need_kick_unlock;
6851 	}
6852 
6853 	sd = rcu_dereference(per_cpu(sd_asym, cpu));
6854 
6855 	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
6856 				  sched_domain_span(sd)) < cpu))
6857 		goto need_kick_unlock;
6858 
6859 	rcu_read_unlock();
6860 	return 0;
6861 
6862 need_kick_unlock:
6863 	rcu_read_unlock();
6864 need_kick:
6865 	return 1;
6866 }
6867 #else
6868 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6869 #endif
6870 
6871 /*
6872  * run_rebalance_domains is triggered when needed from the scheduler tick.
6873  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
6874  */
6875 static void run_rebalance_domains(struct softirq_action *h)
6876 {
6877 	int this_cpu = smp_processor_id();
6878 	struct rq *this_rq = cpu_rq(this_cpu);
6879 	enum cpu_idle_type idle = this_rq->idle_balance ?
6880 						CPU_IDLE : CPU_NOT_IDLE;
6881 
6882 	rebalance_domains(this_cpu, idle);
6883 
6884 	/*
6885 	 * If this cpu has a pending nohz_balance_kick, then do the
6886 	 * balancing on behalf of the other idle cpus whose ticks are
6887 	 * stopped.
6888 	 */
6889 	nohz_idle_balance(this_cpu, idle);
6890 }
6891 
6892 static inline int on_null_domain(int cpu)
6893 {
6894 	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
6895 }
6896 
6897 /*
6898  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
6899  */
6900 void trigger_load_balance(struct rq *rq, int cpu)
6901 {
6902 	/* Don't need to rebalance while attached to NULL domain */
6903 	if (time_after_eq(jiffies, rq->next_balance) &&
6904 	    likely(!on_null_domain(cpu)))
6905 		raise_softirq(SCHED_SOFTIRQ);
6906 #ifdef CONFIG_NO_HZ_COMMON
6907 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
6908 		nohz_balancer_kick(cpu);
6909 #endif
6910 }
6911 
6912 static void rq_online_fair(struct rq *rq)
6913 {
6914 	update_sysctl();
6915 }
6916 
6917 static void rq_offline_fair(struct rq *rq)
6918 {
6919 	update_sysctl();
6920 
6921 	/* Ensure any throttled groups are reachable by pick_next_task */
6922 	unthrottle_offline_cfs_rqs(rq);
6923 }
6924 
6925 #endif /* CONFIG_SMP */
6926 
6927 /*
6928  * scheduler tick hitting a task of our scheduling class:
6929  */
6930 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
6931 {
6932 	struct cfs_rq *cfs_rq;
6933 	struct sched_entity *se = &curr->se;
6934 
6935 	for_each_sched_entity(se) {
6936 		cfs_rq = cfs_rq_of(se);
6937 		entity_tick(cfs_rq, se, queued);
6938 	}
6939 
6940 	if (numabalancing_enabled)
6941 		task_tick_numa(rq, curr);
6942 
6943 	update_rq_runnable_avg(rq, 1);
6944 }
6945 
6946 /*
6947  * called on fork with the child task as argument from the parent's context
6948  *  - child not yet on the tasklist
6949  *  - preemption disabled
6950  */
6951 static void task_fork_fair(struct task_struct *p)
6952 {
6953 	struct cfs_rq *cfs_rq;
6954 	struct sched_entity *se = &p->se, *curr;
6955 	int this_cpu = smp_processor_id();
6956 	struct rq *rq = this_rq();
6957 	unsigned long flags;
6958 
6959 	raw_spin_lock_irqsave(&rq->lock, flags);
6960 
6961 	update_rq_clock(rq);
6962 
6963 	cfs_rq = task_cfs_rq(current);
6964 	curr = cfs_rq->curr;
6965 
6966 	/*
6967 	 * Not only the cpu but also the task_group of the parent might have
6968 	 * been changed after parent->se.parent,cfs_rq were copied to
6969 	 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6970 	 * of child point to valid ones.
6971 	 */
6972 	rcu_read_lock();
6973 	__set_task_cpu(p, this_cpu);
6974 	rcu_read_unlock();
6975 
6976 	update_curr(cfs_rq);
6977 
6978 	if (curr)
6979 		se->vruntime = curr->vruntime;
6980 	place_entity(cfs_rq, se, 1);
6981 
6982 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
6983 		/*
6984 		 * Upon rescheduling, sched_class::put_prev_task() will place
6985 		 * 'current' within the tree based on its new key value.
6986 		 */
6987 		swap(curr->vruntime, se->vruntime);
6988 		resched_task(rq->curr);
6989 	}
6990 
6991 	se->vruntime -= cfs_rq->min_vruntime;
6992 
6993 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6994 }
6995 
6996 /*
6997  * Priority of the task has changed. Check to see if we preempt
6998  * the current task.
6999  */
7000 static void
7001 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7002 {
7003 	if (!p->se.on_rq)
7004 		return;
7005 
7006 	/*
7007 	 * Reschedule if we are currently running on this runqueue and
7008 	 * our priority decreased, or if we are not currently running on
7009 	 * this runqueue and our priority is higher than the current's
7010 	 */
7011 	if (rq->curr == p) {
7012 		if (p->prio > oldprio)
7013 			resched_task(rq->curr);
7014 	} else
7015 		check_preempt_curr(rq, p, 0);
7016 }
7017 
7018 static void switched_from_fair(struct rq *rq, struct task_struct *p)
7019 {
7020 	struct sched_entity *se = &p->se;
7021 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
7022 
7023 	/*
7024 	 * Ensure the task's vruntime is normalized, so that when its
7025 	 * switched back to the fair class the enqueue_entity(.flags=0) will
7026 	 * do the right thing.
7027 	 *
7028 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
7029 	 * have normalized the vruntime, if it was !on_rq, then only when
7030 	 * the task is sleeping will it still have non-normalized vruntime.
7031 	 */
7032 	if (!se->on_rq && p->state != TASK_RUNNING) {
7033 		/*
7034 		 * Fix up our vruntime so that the current sleep doesn't
7035 		 * cause 'unlimited' sleep bonus.
7036 		 */
7037 		place_entity(cfs_rq, se, 0);
7038 		se->vruntime -= cfs_rq->min_vruntime;
7039 	}
7040 
7041 #ifdef CONFIG_SMP
7042 	/*
7043 	* Remove our load from contribution when we leave sched_fair
7044 	* and ensure we don't carry in an old decay_count if we
7045 	* switch back.
7046 	*/
7047 	if (se->avg.decay_count) {
7048 		__synchronize_entity_decay(se);
7049 		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
7050 	}
7051 #endif
7052 }
7053 
7054 /*
7055  * We switched to the sched_fair class.
7056  */
7057 static void switched_to_fair(struct rq *rq, struct task_struct *p)
7058 {
7059 	if (!p->se.on_rq)
7060 		return;
7061 
7062 	/*
7063 	 * We were most likely switched from sched_rt, so
7064 	 * kick off the schedule if running, otherwise just see
7065 	 * if we can still preempt the current task.
7066 	 */
7067 	if (rq->curr == p)
7068 		resched_task(rq->curr);
7069 	else
7070 		check_preempt_curr(rq, p, 0);
7071 }
7072 
7073 /* Account for a task changing its policy or group.
7074  *
7075  * This routine is mostly called to set cfs_rq->curr field when a task
7076  * migrates between groups/classes.
7077  */
7078 static void set_curr_task_fair(struct rq *rq)
7079 {
7080 	struct sched_entity *se = &rq->curr->se;
7081 
7082 	for_each_sched_entity(se) {
7083 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
7084 
7085 		set_next_entity(cfs_rq, se);
7086 		/* ensure bandwidth has been allocated on our new cfs_rq */
7087 		account_cfs_rq_runtime(cfs_rq, 0);
7088 	}
7089 }
7090 
7091 void init_cfs_rq(struct cfs_rq *cfs_rq)
7092 {
7093 	cfs_rq->tasks_timeline = RB_ROOT;
7094 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7095 #ifndef CONFIG_64BIT
7096 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7097 #endif
7098 #ifdef CONFIG_SMP
7099 	atomic64_set(&cfs_rq->decay_counter, 1);
7100 	atomic_long_set(&cfs_rq->removed_load, 0);
7101 #endif
7102 }
7103 
7104 #ifdef CONFIG_FAIR_GROUP_SCHED
7105 static void task_move_group_fair(struct task_struct *p, int on_rq)
7106 {
7107 	struct cfs_rq *cfs_rq;
7108 	/*
7109 	 * If the task was not on the rq at the time of this cgroup movement
7110 	 * it must have been asleep, sleeping tasks keep their ->vruntime
7111 	 * absolute on their old rq until wakeup (needed for the fair sleeper
7112 	 * bonus in place_entity()).
7113 	 *
7114 	 * If it was on the rq, we've just 'preempted' it, which does convert
7115 	 * ->vruntime to a relative base.
7116 	 *
7117 	 * Make sure both cases convert their relative position when migrating
7118 	 * to another cgroup's rq. This does somewhat interfere with the
7119 	 * fair sleeper stuff for the first placement, but who cares.
7120 	 */
7121 	/*
7122 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
7123 	 * But there are some cases where it has already been normalized:
7124 	 *
7125 	 * - Moving a forked child which is waiting for being woken up by
7126 	 *   wake_up_new_task().
7127 	 * - Moving a task which has been woken up by try_to_wake_up() and
7128 	 *   waiting for actually being woken up by sched_ttwu_pending().
7129 	 *
7130 	 * To prevent boost or penalty in the new cfs_rq caused by delta
7131 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7132 	 */
7133 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7134 		on_rq = 1;
7135 
7136 	if (!on_rq)
7137 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
7138 	set_task_rq(p, task_cpu(p));
7139 	if (!on_rq) {
7140 		cfs_rq = cfs_rq_of(&p->se);
7141 		p->se.vruntime += cfs_rq->min_vruntime;
7142 #ifdef CONFIG_SMP
7143 		/*
7144 		 * migrate_task_rq_fair() will have removed our previous
7145 		 * contribution, but we must synchronize for ongoing future
7146 		 * decay.
7147 		 */
7148 		p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7149 		cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
7150 #endif
7151 	}
7152 }
7153 
7154 void free_fair_sched_group(struct task_group *tg)
7155 {
7156 	int i;
7157 
7158 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7159 
7160 	for_each_possible_cpu(i) {
7161 		if (tg->cfs_rq)
7162 			kfree(tg->cfs_rq[i]);
7163 		if (tg->se)
7164 			kfree(tg->se[i]);
7165 	}
7166 
7167 	kfree(tg->cfs_rq);
7168 	kfree(tg->se);
7169 }
7170 
7171 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7172 {
7173 	struct cfs_rq *cfs_rq;
7174 	struct sched_entity *se;
7175 	int i;
7176 
7177 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7178 	if (!tg->cfs_rq)
7179 		goto err;
7180 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7181 	if (!tg->se)
7182 		goto err;
7183 
7184 	tg->shares = NICE_0_LOAD;
7185 
7186 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7187 
7188 	for_each_possible_cpu(i) {
7189 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7190 				      GFP_KERNEL, cpu_to_node(i));
7191 		if (!cfs_rq)
7192 			goto err;
7193 
7194 		se = kzalloc_node(sizeof(struct sched_entity),
7195 				  GFP_KERNEL, cpu_to_node(i));
7196 		if (!se)
7197 			goto err_free_rq;
7198 
7199 		init_cfs_rq(cfs_rq);
7200 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7201 	}
7202 
7203 	return 1;
7204 
7205 err_free_rq:
7206 	kfree(cfs_rq);
7207 err:
7208 	return 0;
7209 }
7210 
7211 void unregister_fair_sched_group(struct task_group *tg, int cpu)
7212 {
7213 	struct rq *rq = cpu_rq(cpu);
7214 	unsigned long flags;
7215 
7216 	/*
7217 	* Only empty task groups can be destroyed; so we can speculatively
7218 	* check on_list without danger of it being re-added.
7219 	*/
7220 	if (!tg->cfs_rq[cpu]->on_list)
7221 		return;
7222 
7223 	raw_spin_lock_irqsave(&rq->lock, flags);
7224 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7225 	raw_spin_unlock_irqrestore(&rq->lock, flags);
7226 }
7227 
7228 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7229 			struct sched_entity *se, int cpu,
7230 			struct sched_entity *parent)
7231 {
7232 	struct rq *rq = cpu_rq(cpu);
7233 
7234 	cfs_rq->tg = tg;
7235 	cfs_rq->rq = rq;
7236 	init_cfs_rq_runtime(cfs_rq);
7237 
7238 	tg->cfs_rq[cpu] = cfs_rq;
7239 	tg->se[cpu] = se;
7240 
7241 	/* se could be NULL for root_task_group */
7242 	if (!se)
7243 		return;
7244 
7245 	if (!parent)
7246 		se->cfs_rq = &rq->cfs;
7247 	else
7248 		se->cfs_rq = parent->my_q;
7249 
7250 	se->my_q = cfs_rq;
7251 	/* guarantee group entities always have weight */
7252 	update_load_set(&se->load, NICE_0_LOAD);
7253 	se->parent = parent;
7254 }
7255 
7256 static DEFINE_MUTEX(shares_mutex);
7257 
7258 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7259 {
7260 	int i;
7261 	unsigned long flags;
7262 
7263 	/*
7264 	 * We can't change the weight of the root cgroup.
7265 	 */
7266 	if (!tg->se[0])
7267 		return -EINVAL;
7268 
7269 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7270 
7271 	mutex_lock(&shares_mutex);
7272 	if (tg->shares == shares)
7273 		goto done;
7274 
7275 	tg->shares = shares;
7276 	for_each_possible_cpu(i) {
7277 		struct rq *rq = cpu_rq(i);
7278 		struct sched_entity *se;
7279 
7280 		se = tg->se[i];
7281 		/* Propagate contribution to hierarchy */
7282 		raw_spin_lock_irqsave(&rq->lock, flags);
7283 
7284 		/* Possible calls to update_curr() need rq clock */
7285 		update_rq_clock(rq);
7286 		for_each_sched_entity(se)
7287 			update_cfs_shares(group_cfs_rq(se));
7288 		raw_spin_unlock_irqrestore(&rq->lock, flags);
7289 	}
7290 
7291 done:
7292 	mutex_unlock(&shares_mutex);
7293 	return 0;
7294 }
7295 #else /* CONFIG_FAIR_GROUP_SCHED */
7296 
7297 void free_fair_sched_group(struct task_group *tg) { }
7298 
7299 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7300 {
7301 	return 1;
7302 }
7303 
7304 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7305 
7306 #endif /* CONFIG_FAIR_GROUP_SCHED */
7307 
7308 
7309 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
7310 {
7311 	struct sched_entity *se = &task->se;
7312 	unsigned int rr_interval = 0;
7313 
7314 	/*
7315 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7316 	 * idle runqueue:
7317 	 */
7318 	if (rq->cfs.load.weight)
7319 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
7320 
7321 	return rr_interval;
7322 }
7323 
7324 /*
7325  * All the scheduling class methods:
7326  */
7327 const struct sched_class fair_sched_class = {
7328 	.next			= &idle_sched_class,
7329 	.enqueue_task		= enqueue_task_fair,
7330 	.dequeue_task		= dequeue_task_fair,
7331 	.yield_task		= yield_task_fair,
7332 	.yield_to_task		= yield_to_task_fair,
7333 
7334 	.check_preempt_curr	= check_preempt_wakeup,
7335 
7336 	.pick_next_task		= pick_next_task_fair,
7337 	.put_prev_task		= put_prev_task_fair,
7338 
7339 #ifdef CONFIG_SMP
7340 	.select_task_rq		= select_task_rq_fair,
7341 	.migrate_task_rq	= migrate_task_rq_fair,
7342 
7343 	.rq_online		= rq_online_fair,
7344 	.rq_offline		= rq_offline_fair,
7345 
7346 	.task_waking		= task_waking_fair,
7347 #endif
7348 
7349 	.set_curr_task          = set_curr_task_fair,
7350 	.task_tick		= task_tick_fair,
7351 	.task_fork		= task_fork_fair,
7352 
7353 	.prio_changed		= prio_changed_fair,
7354 	.switched_from		= switched_from_fair,
7355 	.switched_to		= switched_to_fair,
7356 
7357 	.get_rr_interval	= get_rr_interval_fair,
7358 
7359 #ifdef CONFIG_FAIR_GROUP_SCHED
7360 	.task_move_group	= task_move_group_fair,
7361 #endif
7362 };
7363 
7364 #ifdef CONFIG_SCHED_DEBUG
7365 void print_cfs_stats(struct seq_file *m, int cpu)
7366 {
7367 	struct cfs_rq *cfs_rq;
7368 
7369 	rcu_read_lock();
7370 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
7371 		print_cfs_rq(m, cpu, cfs_rq);
7372 	rcu_read_unlock();
7373 }
7374 #endif
7375 
7376 __init void init_sched_fair_class(void)
7377 {
7378 #ifdef CONFIG_SMP
7379 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7380 
7381 #ifdef CONFIG_NO_HZ_COMMON
7382 	nohz.next_balance = jiffies;
7383 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7384 	cpu_notifier(sched_ilb_notifier, 0);
7385 #endif
7386 #endif /* SMP */
7387 
7388 }
7389