xref: /openbmc/linux/kernel/sched/fair.c (revision f5e4e7fdd57691d5308cf854dd0dbcfd58799e9a)
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22 
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 #include <linux/mempolicy.h>
30 #include <linux/migrate.h>
31 #include <linux/task_work.h>
32 
33 #include <trace/events/sched.h>
34 
35 #include "sched.h"
36 
37 /*
38  * Targeted preemption latency for CPU-bound tasks:
39  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
40  *
41  * NOTE: this latency value is not the same as the concept of
42  * 'timeslice length' - timeslices in CFS are of variable length
43  * and have no persistent notion like in traditional, time-slice
44  * based scheduling concepts.
45  *
46  * (to see the precise effective timeslice length of your workload,
47  *  run vmstat and monitor the context-switches (cs) field)
48  */
49 unsigned int sysctl_sched_latency = 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
51 
52 /*
53  * The initial- and re-scaling of tunables is configurable
54  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55  *
56  * Options are:
57  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60  */
61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 	= SCHED_TUNABLESCALING_LOG;
63 
64 /*
65  * Minimal preemption granularity for CPU-bound tasks:
66  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
67  */
68 unsigned int sysctl_sched_min_granularity = 750000ULL;
69 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
70 
71 /*
72  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73  */
74 static unsigned int sched_nr_latency = 8;
75 
76 /*
77  * After fork, child runs first. If set to 0 (default) then
78  * parent will (try to) run first.
79  */
80 unsigned int sysctl_sched_child_runs_first __read_mostly;
81 
82 /*
83  * SCHED_OTHER wake-up granularity.
84  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
85  *
86  * This option delays the preemption effects of decoupled workloads
87  * and reduces their over-scheduling. Synchronous workloads will still
88  * have immediate wakeup/sleep latencies.
89  */
90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
92 
93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94 
95 /*
96  * The exponential sliding  window over which load is averaged for shares
97  * distribution.
98  * (default: 10msec)
99  */
100 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101 
102 #ifdef CONFIG_CFS_BANDWIDTH
103 /*
104  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105  * each time a cfs_rq requests quota.
106  *
107  * Note: in the case that the slice exceeds the runtime remaining (either due
108  * to consumption or the quota being specified to be smaller than the slice)
109  * we will always only issue the remaining available time.
110  *
111  * default: 5 msec, units: microseconds
112   */
113 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114 #endif
115 
116 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117 {
118 	lw->weight += inc;
119 	lw->inv_weight = 0;
120 }
121 
122 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123 {
124 	lw->weight -= dec;
125 	lw->inv_weight = 0;
126 }
127 
128 static inline void update_load_set(struct load_weight *lw, unsigned long w)
129 {
130 	lw->weight = w;
131 	lw->inv_weight = 0;
132 }
133 
134 /*
135  * Increase the granularity value when there are more CPUs,
136  * because with more CPUs the 'effective latency' as visible
137  * to users decreases. But the relationship is not linear,
138  * so pick a second-best guess by going with the log2 of the
139  * number of CPUs.
140  *
141  * This idea comes from the SD scheduler of Con Kolivas:
142  */
143 static int get_update_sysctl_factor(void)
144 {
145 	unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 	unsigned int factor;
147 
148 	switch (sysctl_sched_tunable_scaling) {
149 	case SCHED_TUNABLESCALING_NONE:
150 		factor = 1;
151 		break;
152 	case SCHED_TUNABLESCALING_LINEAR:
153 		factor = cpus;
154 		break;
155 	case SCHED_TUNABLESCALING_LOG:
156 	default:
157 		factor = 1 + ilog2(cpus);
158 		break;
159 	}
160 
161 	return factor;
162 }
163 
164 static void update_sysctl(void)
165 {
166 	unsigned int factor = get_update_sysctl_factor();
167 
168 #define SET_SYSCTL(name) \
169 	(sysctl_##name = (factor) * normalized_sysctl_##name)
170 	SET_SYSCTL(sched_min_granularity);
171 	SET_SYSCTL(sched_latency);
172 	SET_SYSCTL(sched_wakeup_granularity);
173 #undef SET_SYSCTL
174 }
175 
176 void sched_init_granularity(void)
177 {
178 	update_sysctl();
179 }
180 
181 #if BITS_PER_LONG == 32
182 # define WMULT_CONST	(~0UL)
183 #else
184 # define WMULT_CONST	(1UL << 32)
185 #endif
186 
187 #define WMULT_SHIFT	32
188 
189 /*
190  * Shift right and round:
191  */
192 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193 
194 /*
195  * delta *= weight / lw
196  */
197 static unsigned long
198 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 		struct load_weight *lw)
200 {
201 	u64 tmp;
202 
203 	/*
204 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 	 * 2^SCHED_LOAD_RESOLUTION.
207 	 */
208 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 		tmp = (u64)delta_exec * scale_load_down(weight);
210 	else
211 		tmp = (u64)delta_exec;
212 
213 	if (!lw->inv_weight) {
214 		unsigned long w = scale_load_down(lw->weight);
215 
216 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 			lw->inv_weight = 1;
218 		else if (unlikely(!w))
219 			lw->inv_weight = WMULT_CONST;
220 		else
221 			lw->inv_weight = WMULT_CONST / w;
222 	}
223 
224 	/*
225 	 * Check whether we'd overflow the 64-bit multiplication:
226 	 */
227 	if (unlikely(tmp > WMULT_CONST))
228 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 			WMULT_SHIFT/2);
230 	else
231 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232 
233 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234 }
235 
236 
237 const struct sched_class fair_sched_class;
238 
239 /**************************************************************
240  * CFS operations on generic schedulable entities:
241  */
242 
243 #ifdef CONFIG_FAIR_GROUP_SCHED
244 
245 /* cpu runqueue to which this cfs_rq is attached */
246 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247 {
248 	return cfs_rq->rq;
249 }
250 
251 /* An entity is a task if it doesn't "own" a runqueue */
252 #define entity_is_task(se)	(!se->my_q)
253 
254 static inline struct task_struct *task_of(struct sched_entity *se)
255 {
256 #ifdef CONFIG_SCHED_DEBUG
257 	WARN_ON_ONCE(!entity_is_task(se));
258 #endif
259 	return container_of(se, struct task_struct, se);
260 }
261 
262 /* Walk up scheduling entities hierarchy */
263 #define for_each_sched_entity(se) \
264 		for (; se; se = se->parent)
265 
266 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267 {
268 	return p->se.cfs_rq;
269 }
270 
271 /* runqueue on which this entity is (to be) queued */
272 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273 {
274 	return se->cfs_rq;
275 }
276 
277 /* runqueue "owned" by this group */
278 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279 {
280 	return grp->my_q;
281 }
282 
283 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 				       int force_update);
285 
286 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287 {
288 	if (!cfs_rq->on_list) {
289 		/*
290 		 * Ensure we either appear before our parent (if already
291 		 * enqueued) or force our parent to appear after us when it is
292 		 * enqueued.  The fact that we always enqueue bottom-up
293 		 * reduces this to two cases.
294 		 */
295 		if (cfs_rq->tg->parent &&
296 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
299 		} else {
300 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
302 		}
303 
304 		cfs_rq->on_list = 1;
305 		/* We should have no load, but we need to update last_decay. */
306 		update_cfs_rq_blocked_load(cfs_rq, 0);
307 	}
308 }
309 
310 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311 {
312 	if (cfs_rq->on_list) {
313 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 		cfs_rq->on_list = 0;
315 	}
316 }
317 
318 /* Iterate thr' all leaf cfs_rq's on a runqueue */
319 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321 
322 /* Do the two (enqueued) entities belong to the same group ? */
323 static inline int
324 is_same_group(struct sched_entity *se, struct sched_entity *pse)
325 {
326 	if (se->cfs_rq == pse->cfs_rq)
327 		return 1;
328 
329 	return 0;
330 }
331 
332 static inline struct sched_entity *parent_entity(struct sched_entity *se)
333 {
334 	return se->parent;
335 }
336 
337 /* return depth at which a sched entity is present in the hierarchy */
338 static inline int depth_se(struct sched_entity *se)
339 {
340 	int depth = 0;
341 
342 	for_each_sched_entity(se)
343 		depth++;
344 
345 	return depth;
346 }
347 
348 static void
349 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350 {
351 	int se_depth, pse_depth;
352 
353 	/*
354 	 * preemption test can be made between sibling entities who are in the
355 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 	 * both tasks until we find their ancestors who are siblings of common
357 	 * parent.
358 	 */
359 
360 	/* First walk up until both entities are at same depth */
361 	se_depth = depth_se(*se);
362 	pse_depth = depth_se(*pse);
363 
364 	while (se_depth > pse_depth) {
365 		se_depth--;
366 		*se = parent_entity(*se);
367 	}
368 
369 	while (pse_depth > se_depth) {
370 		pse_depth--;
371 		*pse = parent_entity(*pse);
372 	}
373 
374 	while (!is_same_group(*se, *pse)) {
375 		*se = parent_entity(*se);
376 		*pse = parent_entity(*pse);
377 	}
378 }
379 
380 #else	/* !CONFIG_FAIR_GROUP_SCHED */
381 
382 static inline struct task_struct *task_of(struct sched_entity *se)
383 {
384 	return container_of(se, struct task_struct, se);
385 }
386 
387 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388 {
389 	return container_of(cfs_rq, struct rq, cfs);
390 }
391 
392 #define entity_is_task(se)	1
393 
394 #define for_each_sched_entity(se) \
395 		for (; se; se = NULL)
396 
397 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
398 {
399 	return &task_rq(p)->cfs;
400 }
401 
402 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403 {
404 	struct task_struct *p = task_of(se);
405 	struct rq *rq = task_rq(p);
406 
407 	return &rq->cfs;
408 }
409 
410 /* runqueue "owned" by this group */
411 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412 {
413 	return NULL;
414 }
415 
416 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417 {
418 }
419 
420 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421 {
422 }
423 
424 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426 
427 static inline int
428 is_same_group(struct sched_entity *se, struct sched_entity *pse)
429 {
430 	return 1;
431 }
432 
433 static inline struct sched_entity *parent_entity(struct sched_entity *se)
434 {
435 	return NULL;
436 }
437 
438 static inline void
439 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440 {
441 }
442 
443 #endif	/* CONFIG_FAIR_GROUP_SCHED */
444 
445 static __always_inline
446 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
447 
448 /**************************************************************
449  * Scheduling class tree data structure manipulation methods:
450  */
451 
452 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
453 {
454 	s64 delta = (s64)(vruntime - max_vruntime);
455 	if (delta > 0)
456 		max_vruntime = vruntime;
457 
458 	return max_vruntime;
459 }
460 
461 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
462 {
463 	s64 delta = (s64)(vruntime - min_vruntime);
464 	if (delta < 0)
465 		min_vruntime = vruntime;
466 
467 	return min_vruntime;
468 }
469 
470 static inline int entity_before(struct sched_entity *a,
471 				struct sched_entity *b)
472 {
473 	return (s64)(a->vruntime - b->vruntime) < 0;
474 }
475 
476 static void update_min_vruntime(struct cfs_rq *cfs_rq)
477 {
478 	u64 vruntime = cfs_rq->min_vruntime;
479 
480 	if (cfs_rq->curr)
481 		vruntime = cfs_rq->curr->vruntime;
482 
483 	if (cfs_rq->rb_leftmost) {
484 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 						   struct sched_entity,
486 						   run_node);
487 
488 		if (!cfs_rq->curr)
489 			vruntime = se->vruntime;
490 		else
491 			vruntime = min_vruntime(vruntime, se->vruntime);
492 	}
493 
494 	/* ensure we never gain time by being placed backwards. */
495 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
496 #ifndef CONFIG_64BIT
497 	smp_wmb();
498 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499 #endif
500 }
501 
502 /*
503  * Enqueue an entity into the rb-tree:
504  */
505 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
506 {
507 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 	struct rb_node *parent = NULL;
509 	struct sched_entity *entry;
510 	int leftmost = 1;
511 
512 	/*
513 	 * Find the right place in the rbtree:
514 	 */
515 	while (*link) {
516 		parent = *link;
517 		entry = rb_entry(parent, struct sched_entity, run_node);
518 		/*
519 		 * We dont care about collisions. Nodes with
520 		 * the same key stay together.
521 		 */
522 		if (entity_before(se, entry)) {
523 			link = &parent->rb_left;
524 		} else {
525 			link = &parent->rb_right;
526 			leftmost = 0;
527 		}
528 	}
529 
530 	/*
531 	 * Maintain a cache of leftmost tree entries (it is frequently
532 	 * used):
533 	 */
534 	if (leftmost)
535 		cfs_rq->rb_leftmost = &se->run_node;
536 
537 	rb_link_node(&se->run_node, parent, link);
538 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
539 }
540 
541 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
542 {
543 	if (cfs_rq->rb_leftmost == &se->run_node) {
544 		struct rb_node *next_node;
545 
546 		next_node = rb_next(&se->run_node);
547 		cfs_rq->rb_leftmost = next_node;
548 	}
549 
550 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
551 }
552 
553 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
554 {
555 	struct rb_node *left = cfs_rq->rb_leftmost;
556 
557 	if (!left)
558 		return NULL;
559 
560 	return rb_entry(left, struct sched_entity, run_node);
561 }
562 
563 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564 {
565 	struct rb_node *next = rb_next(&se->run_node);
566 
567 	if (!next)
568 		return NULL;
569 
570 	return rb_entry(next, struct sched_entity, run_node);
571 }
572 
573 #ifdef CONFIG_SCHED_DEBUG
574 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
575 {
576 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
577 
578 	if (!last)
579 		return NULL;
580 
581 	return rb_entry(last, struct sched_entity, run_node);
582 }
583 
584 /**************************************************************
585  * Scheduling class statistics methods:
586  */
587 
588 int sched_proc_update_handler(struct ctl_table *table, int write,
589 		void __user *buffer, size_t *lenp,
590 		loff_t *ppos)
591 {
592 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
593 	int factor = get_update_sysctl_factor();
594 
595 	if (ret || !write)
596 		return ret;
597 
598 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 					sysctl_sched_min_granularity);
600 
601 #define WRT_SYSCTL(name) \
602 	(normalized_sysctl_##name = sysctl_##name / (factor))
603 	WRT_SYSCTL(sched_min_granularity);
604 	WRT_SYSCTL(sched_latency);
605 	WRT_SYSCTL(sched_wakeup_granularity);
606 #undef WRT_SYSCTL
607 
608 	return 0;
609 }
610 #endif
611 
612 /*
613  * delta /= w
614  */
615 static inline unsigned long
616 calc_delta_fair(unsigned long delta, struct sched_entity *se)
617 {
618 	if (unlikely(se->load.weight != NICE_0_LOAD))
619 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
620 
621 	return delta;
622 }
623 
624 /*
625  * The idea is to set a period in which each task runs once.
626  *
627  * When there are too many tasks (sched_nr_latency) we have to stretch
628  * this period because otherwise the slices get too small.
629  *
630  * p = (nr <= nl) ? l : l*nr/nl
631  */
632 static u64 __sched_period(unsigned long nr_running)
633 {
634 	u64 period = sysctl_sched_latency;
635 	unsigned long nr_latency = sched_nr_latency;
636 
637 	if (unlikely(nr_running > nr_latency)) {
638 		period = sysctl_sched_min_granularity;
639 		period *= nr_running;
640 	}
641 
642 	return period;
643 }
644 
645 /*
646  * We calculate the wall-time slice from the period by taking a part
647  * proportional to the weight.
648  *
649  * s = p*P[w/rw]
650  */
651 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
652 {
653 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
654 
655 	for_each_sched_entity(se) {
656 		struct load_weight *load;
657 		struct load_weight lw;
658 
659 		cfs_rq = cfs_rq_of(se);
660 		load = &cfs_rq->load;
661 
662 		if (unlikely(!se->on_rq)) {
663 			lw = cfs_rq->load;
664 
665 			update_load_add(&lw, se->load.weight);
666 			load = &lw;
667 		}
668 		slice = calc_delta_mine(slice, se->load.weight, load);
669 	}
670 	return slice;
671 }
672 
673 /*
674  * We calculate the vruntime slice of a to-be-inserted task.
675  *
676  * vs = s/w
677  */
678 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
679 {
680 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
681 }
682 
683 #ifdef CONFIG_SMP
684 static inline void __update_task_entity_contrib(struct sched_entity *se);
685 
686 /* Give new task start runnable values to heavy its load in infant time */
687 void init_task_runnable_average(struct task_struct *p)
688 {
689 	u32 slice;
690 
691 	p->se.avg.decay_count = 0;
692 	slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 	p->se.avg.runnable_avg_sum = slice;
694 	p->se.avg.runnable_avg_period = slice;
695 	__update_task_entity_contrib(&p->se);
696 }
697 #else
698 void init_task_runnable_average(struct task_struct *p)
699 {
700 }
701 #endif
702 
703 /*
704  * Update the current task's runtime statistics. Skip current tasks that
705  * are not in our scheduling class.
706  */
707 static inline void
708 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 	      unsigned long delta_exec)
710 {
711 	unsigned long delta_exec_weighted;
712 
713 	schedstat_set(curr->statistics.exec_max,
714 		      max((u64)delta_exec, curr->statistics.exec_max));
715 
716 	curr->sum_exec_runtime += delta_exec;
717 	schedstat_add(cfs_rq, exec_clock, delta_exec);
718 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
719 
720 	curr->vruntime += delta_exec_weighted;
721 	update_min_vruntime(cfs_rq);
722 }
723 
724 static void update_curr(struct cfs_rq *cfs_rq)
725 {
726 	struct sched_entity *curr = cfs_rq->curr;
727 	u64 now = rq_clock_task(rq_of(cfs_rq));
728 	unsigned long delta_exec;
729 
730 	if (unlikely(!curr))
731 		return;
732 
733 	/*
734 	 * Get the amount of time the current task was running
735 	 * since the last time we changed load (this cannot
736 	 * overflow on 32 bits):
737 	 */
738 	delta_exec = (unsigned long)(now - curr->exec_start);
739 	if (!delta_exec)
740 		return;
741 
742 	__update_curr(cfs_rq, curr, delta_exec);
743 	curr->exec_start = now;
744 
745 	if (entity_is_task(curr)) {
746 		struct task_struct *curtask = task_of(curr);
747 
748 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
749 		cpuacct_charge(curtask, delta_exec);
750 		account_group_exec_runtime(curtask, delta_exec);
751 	}
752 
753 	account_cfs_rq_runtime(cfs_rq, delta_exec);
754 }
755 
756 static inline void
757 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
758 {
759 	schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
760 }
761 
762 /*
763  * Task is being enqueued - update stats:
764  */
765 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
766 {
767 	/*
768 	 * Are we enqueueing a waiting task? (for current tasks
769 	 * a dequeue/enqueue event is a NOP)
770 	 */
771 	if (se != cfs_rq->curr)
772 		update_stats_wait_start(cfs_rq, se);
773 }
774 
775 static void
776 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
777 {
778 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
779 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
780 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
782 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
783 #ifdef CONFIG_SCHEDSTATS
784 	if (entity_is_task(se)) {
785 		trace_sched_stat_wait(task_of(se),
786 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
787 	}
788 #endif
789 	schedstat_set(se->statistics.wait_start, 0);
790 }
791 
792 static inline void
793 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
794 {
795 	/*
796 	 * Mark the end of the wait period if dequeueing a
797 	 * waiting task:
798 	 */
799 	if (se != cfs_rq->curr)
800 		update_stats_wait_end(cfs_rq, se);
801 }
802 
803 /*
804  * We are picking a new current task - update its stats:
805  */
806 static inline void
807 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
808 {
809 	/*
810 	 * We are starting a new run period:
811 	 */
812 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
813 }
814 
815 /**************************************************
816  * Scheduling class queueing methods:
817  */
818 
819 #ifdef CONFIG_NUMA_BALANCING
820 /*
821  * numa task sample period in ms
822  */
823 unsigned int sysctl_numa_balancing_scan_period_min = 100;
824 unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
825 unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
826 
827 /* Portion of address space to scan in MB */
828 unsigned int sysctl_numa_balancing_scan_size = 256;
829 
830 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
831 unsigned int sysctl_numa_balancing_scan_delay = 1000;
832 
833 static void task_numa_placement(struct task_struct *p)
834 {
835 	int seq;
836 
837 	if (!p->mm)	/* for example, ksmd faulting in a user's mm */
838 		return;
839 	seq = ACCESS_ONCE(p->mm->numa_scan_seq);
840 	if (p->numa_scan_seq == seq)
841 		return;
842 	p->numa_scan_seq = seq;
843 
844 	/* FIXME: Scheduling placement policy hints go here */
845 }
846 
847 /*
848  * Got a PROT_NONE fault for a page on @node.
849  */
850 void task_numa_fault(int node, int pages, bool migrated)
851 {
852 	struct task_struct *p = current;
853 
854 	if (!numabalancing_enabled)
855 		return;
856 
857 	/* FIXME: Allocate task-specific structure for placement policy here */
858 
859 	/*
860 	 * If pages are properly placed (did not migrate) then scan slower.
861 	 * This is reset periodically in case of phase changes
862 	 */
863         if (!migrated)
864 		p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
865 			p->numa_scan_period + jiffies_to_msecs(10));
866 
867 	task_numa_placement(p);
868 }
869 
870 static void reset_ptenuma_scan(struct task_struct *p)
871 {
872 	ACCESS_ONCE(p->mm->numa_scan_seq)++;
873 	p->mm->numa_scan_offset = 0;
874 }
875 
876 /*
877  * The expensive part of numa migration is done from task_work context.
878  * Triggered from task_tick_numa().
879  */
880 void task_numa_work(struct callback_head *work)
881 {
882 	unsigned long migrate, next_scan, now = jiffies;
883 	struct task_struct *p = current;
884 	struct mm_struct *mm = p->mm;
885 	struct vm_area_struct *vma;
886 	unsigned long start, end;
887 	long pages;
888 
889 	WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
890 
891 	work->next = work; /* protect against double add */
892 	/*
893 	 * Who cares about NUMA placement when they're dying.
894 	 *
895 	 * NOTE: make sure not to dereference p->mm before this check,
896 	 * exit_task_work() happens _after_ exit_mm() so we could be called
897 	 * without p->mm even though we still had it when we enqueued this
898 	 * work.
899 	 */
900 	if (p->flags & PF_EXITING)
901 		return;
902 
903 	/*
904 	 * We do not care about task placement until a task runs on a node
905 	 * other than the first one used by the address space. This is
906 	 * largely because migrations are driven by what CPU the task
907 	 * is running on. If it's never scheduled on another node, it'll
908 	 * not migrate so why bother trapping the fault.
909 	 */
910 	if (mm->first_nid == NUMA_PTE_SCAN_INIT)
911 		mm->first_nid = numa_node_id();
912 	if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
913 		/* Are we running on a new node yet? */
914 		if (numa_node_id() == mm->first_nid &&
915 		    !sched_feat_numa(NUMA_FORCE))
916 			return;
917 
918 		mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
919 	}
920 
921 	/*
922 	 * Reset the scan period if enough time has gone by. Objective is that
923 	 * scanning will be reduced if pages are properly placed. As tasks
924 	 * can enter different phases this needs to be re-examined. Lacking
925 	 * proper tracking of reference behaviour, this blunt hammer is used.
926 	 */
927 	migrate = mm->numa_next_reset;
928 	if (time_after(now, migrate)) {
929 		p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
930 		next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
931 		xchg(&mm->numa_next_reset, next_scan);
932 	}
933 
934 	/*
935 	 * Enforce maximal scan/migration frequency..
936 	 */
937 	migrate = mm->numa_next_scan;
938 	if (time_before(now, migrate))
939 		return;
940 
941 	if (p->numa_scan_period == 0)
942 		p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
943 
944 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
945 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
946 		return;
947 
948 	/*
949 	 * Do not set pte_numa if the current running node is rate-limited.
950 	 * This loses statistics on the fault but if we are unwilling to
951 	 * migrate to this node, it is less likely we can do useful work
952 	 */
953 	if (migrate_ratelimited(numa_node_id()))
954 		return;
955 
956 	start = mm->numa_scan_offset;
957 	pages = sysctl_numa_balancing_scan_size;
958 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
959 	if (!pages)
960 		return;
961 
962 	down_read(&mm->mmap_sem);
963 	vma = find_vma(mm, start);
964 	if (!vma) {
965 		reset_ptenuma_scan(p);
966 		start = 0;
967 		vma = mm->mmap;
968 	}
969 	for (; vma; vma = vma->vm_next) {
970 		if (!vma_migratable(vma))
971 			continue;
972 
973 		/* Skip small VMAs. They are not likely to be of relevance */
974 		if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
975 			continue;
976 
977 		do {
978 			start = max(start, vma->vm_start);
979 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
980 			end = min(end, vma->vm_end);
981 			pages -= change_prot_numa(vma, start, end);
982 
983 			start = end;
984 			if (pages <= 0)
985 				goto out;
986 		} while (end != vma->vm_end);
987 	}
988 
989 out:
990 	/*
991 	 * It is possible to reach the end of the VMA list but the last few VMAs are
992 	 * not guaranteed to the vma_migratable. If they are not, we would find the
993 	 * !migratable VMA on the next scan but not reset the scanner to the start
994 	 * so check it now.
995 	 */
996 	if (vma)
997 		mm->numa_scan_offset = start;
998 	else
999 		reset_ptenuma_scan(p);
1000 	up_read(&mm->mmap_sem);
1001 }
1002 
1003 /*
1004  * Drive the periodic memory faults..
1005  */
1006 void task_tick_numa(struct rq *rq, struct task_struct *curr)
1007 {
1008 	struct callback_head *work = &curr->numa_work;
1009 	u64 period, now;
1010 
1011 	/*
1012 	 * We don't care about NUMA placement if we don't have memory.
1013 	 */
1014 	if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1015 		return;
1016 
1017 	/*
1018 	 * Using runtime rather than walltime has the dual advantage that
1019 	 * we (mostly) drive the selection from busy threads and that the
1020 	 * task needs to have done some actual work before we bother with
1021 	 * NUMA placement.
1022 	 */
1023 	now = curr->se.sum_exec_runtime;
1024 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1025 
1026 	if (now - curr->node_stamp > period) {
1027 		if (!curr->node_stamp)
1028 			curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
1029 		curr->node_stamp = now;
1030 
1031 		if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1032 			init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1033 			task_work_add(curr, work, true);
1034 		}
1035 	}
1036 }
1037 #else
1038 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1039 {
1040 }
1041 #endif /* CONFIG_NUMA_BALANCING */
1042 
1043 static void
1044 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1045 {
1046 	update_load_add(&cfs_rq->load, se->load.weight);
1047 	if (!parent_entity(se))
1048 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1049 #ifdef CONFIG_SMP
1050 	if (entity_is_task(se))
1051 		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
1052 #endif
1053 	cfs_rq->nr_running++;
1054 }
1055 
1056 static void
1057 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1058 {
1059 	update_load_sub(&cfs_rq->load, se->load.weight);
1060 	if (!parent_entity(se))
1061 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
1062 	if (entity_is_task(se))
1063 		list_del_init(&se->group_node);
1064 	cfs_rq->nr_running--;
1065 }
1066 
1067 #ifdef CONFIG_FAIR_GROUP_SCHED
1068 # ifdef CONFIG_SMP
1069 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1070 {
1071 	long tg_weight;
1072 
1073 	/*
1074 	 * Use this CPU's actual weight instead of the last load_contribution
1075 	 * to gain a more accurate current total weight. See
1076 	 * update_cfs_rq_load_contribution().
1077 	 */
1078 	tg_weight = atomic_long_read(&tg->load_avg);
1079 	tg_weight -= cfs_rq->tg_load_contrib;
1080 	tg_weight += cfs_rq->load.weight;
1081 
1082 	return tg_weight;
1083 }
1084 
1085 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1086 {
1087 	long tg_weight, load, shares;
1088 
1089 	tg_weight = calc_tg_weight(tg, cfs_rq);
1090 	load = cfs_rq->load.weight;
1091 
1092 	shares = (tg->shares * load);
1093 	if (tg_weight)
1094 		shares /= tg_weight;
1095 
1096 	if (shares < MIN_SHARES)
1097 		shares = MIN_SHARES;
1098 	if (shares > tg->shares)
1099 		shares = tg->shares;
1100 
1101 	return shares;
1102 }
1103 # else /* CONFIG_SMP */
1104 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1105 {
1106 	return tg->shares;
1107 }
1108 # endif /* CONFIG_SMP */
1109 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1110 			    unsigned long weight)
1111 {
1112 	if (se->on_rq) {
1113 		/* commit outstanding execution time */
1114 		if (cfs_rq->curr == se)
1115 			update_curr(cfs_rq);
1116 		account_entity_dequeue(cfs_rq, se);
1117 	}
1118 
1119 	update_load_set(&se->load, weight);
1120 
1121 	if (se->on_rq)
1122 		account_entity_enqueue(cfs_rq, se);
1123 }
1124 
1125 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1126 
1127 static void update_cfs_shares(struct cfs_rq *cfs_rq)
1128 {
1129 	struct task_group *tg;
1130 	struct sched_entity *se;
1131 	long shares;
1132 
1133 	tg = cfs_rq->tg;
1134 	se = tg->se[cpu_of(rq_of(cfs_rq))];
1135 	if (!se || throttled_hierarchy(cfs_rq))
1136 		return;
1137 #ifndef CONFIG_SMP
1138 	if (likely(se->load.weight == tg->shares))
1139 		return;
1140 #endif
1141 	shares = calc_cfs_shares(cfs_rq, tg);
1142 
1143 	reweight_entity(cfs_rq_of(se), se, shares);
1144 }
1145 #else /* CONFIG_FAIR_GROUP_SCHED */
1146 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1147 {
1148 }
1149 #endif /* CONFIG_FAIR_GROUP_SCHED */
1150 
1151 #ifdef CONFIG_SMP
1152 /*
1153  * We choose a half-life close to 1 scheduling period.
1154  * Note: The tables below are dependent on this value.
1155  */
1156 #define LOAD_AVG_PERIOD 32
1157 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1158 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1159 
1160 /* Precomputed fixed inverse multiplies for multiplication by y^n */
1161 static const u32 runnable_avg_yN_inv[] = {
1162 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1163 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1164 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1165 	0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1166 	0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1167 	0x85aac367, 0x82cd8698,
1168 };
1169 
1170 /*
1171  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
1172  * over-estimates when re-combining.
1173  */
1174 static const u32 runnable_avg_yN_sum[] = {
1175 	    0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1176 	 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1177 	17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1178 };
1179 
1180 /*
1181  * Approximate:
1182  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
1183  */
1184 static __always_inline u64 decay_load(u64 val, u64 n)
1185 {
1186 	unsigned int local_n;
1187 
1188 	if (!n)
1189 		return val;
1190 	else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1191 		return 0;
1192 
1193 	/* after bounds checking we can collapse to 32-bit */
1194 	local_n = n;
1195 
1196 	/*
1197 	 * As y^PERIOD = 1/2, we can combine
1198 	 *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1199 	 * With a look-up table which covers k^n (n<PERIOD)
1200 	 *
1201 	 * To achieve constant time decay_load.
1202 	 */
1203 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1204 		val >>= local_n / LOAD_AVG_PERIOD;
1205 		local_n %= LOAD_AVG_PERIOD;
1206 	}
1207 
1208 	val *= runnable_avg_yN_inv[local_n];
1209 	/* We don't use SRR here since we always want to round down. */
1210 	return val >> 32;
1211 }
1212 
1213 /*
1214  * For updates fully spanning n periods, the contribution to runnable
1215  * average will be: \Sum 1024*y^n
1216  *
1217  * We can compute this reasonably efficiently by combining:
1218  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
1219  */
1220 static u32 __compute_runnable_contrib(u64 n)
1221 {
1222 	u32 contrib = 0;
1223 
1224 	if (likely(n <= LOAD_AVG_PERIOD))
1225 		return runnable_avg_yN_sum[n];
1226 	else if (unlikely(n >= LOAD_AVG_MAX_N))
1227 		return LOAD_AVG_MAX;
1228 
1229 	/* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1230 	do {
1231 		contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1232 		contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1233 
1234 		n -= LOAD_AVG_PERIOD;
1235 	} while (n > LOAD_AVG_PERIOD);
1236 
1237 	contrib = decay_load(contrib, n);
1238 	return contrib + runnable_avg_yN_sum[n];
1239 }
1240 
1241 /*
1242  * We can represent the historical contribution to runnable average as the
1243  * coefficients of a geometric series.  To do this we sub-divide our runnable
1244  * history into segments of approximately 1ms (1024us); label the segment that
1245  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1246  *
1247  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1248  *      p0            p1           p2
1249  *     (now)       (~1ms ago)  (~2ms ago)
1250  *
1251  * Let u_i denote the fraction of p_i that the entity was runnable.
1252  *
1253  * We then designate the fractions u_i as our co-efficients, yielding the
1254  * following representation of historical load:
1255  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1256  *
1257  * We choose y based on the with of a reasonably scheduling period, fixing:
1258  *   y^32 = 0.5
1259  *
1260  * This means that the contribution to load ~32ms ago (u_32) will be weighted
1261  * approximately half as much as the contribution to load within the last ms
1262  * (u_0).
1263  *
1264  * When a period "rolls over" and we have new u_0`, multiplying the previous
1265  * sum again by y is sufficient to update:
1266  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1267  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1268  */
1269 static __always_inline int __update_entity_runnable_avg(u64 now,
1270 							struct sched_avg *sa,
1271 							int runnable)
1272 {
1273 	u64 delta, periods;
1274 	u32 runnable_contrib;
1275 	int delta_w, decayed = 0;
1276 
1277 	delta = now - sa->last_runnable_update;
1278 	/*
1279 	 * This should only happen when time goes backwards, which it
1280 	 * unfortunately does during sched clock init when we swap over to TSC.
1281 	 */
1282 	if ((s64)delta < 0) {
1283 		sa->last_runnable_update = now;
1284 		return 0;
1285 	}
1286 
1287 	/*
1288 	 * Use 1024ns as the unit of measurement since it's a reasonable
1289 	 * approximation of 1us and fast to compute.
1290 	 */
1291 	delta >>= 10;
1292 	if (!delta)
1293 		return 0;
1294 	sa->last_runnable_update = now;
1295 
1296 	/* delta_w is the amount already accumulated against our next period */
1297 	delta_w = sa->runnable_avg_period % 1024;
1298 	if (delta + delta_w >= 1024) {
1299 		/* period roll-over */
1300 		decayed = 1;
1301 
1302 		/*
1303 		 * Now that we know we're crossing a period boundary, figure
1304 		 * out how much from delta we need to complete the current
1305 		 * period and accrue it.
1306 		 */
1307 		delta_w = 1024 - delta_w;
1308 		if (runnable)
1309 			sa->runnable_avg_sum += delta_w;
1310 		sa->runnable_avg_period += delta_w;
1311 
1312 		delta -= delta_w;
1313 
1314 		/* Figure out how many additional periods this update spans */
1315 		periods = delta / 1024;
1316 		delta %= 1024;
1317 
1318 		sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1319 						  periods + 1);
1320 		sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1321 						     periods + 1);
1322 
1323 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
1324 		runnable_contrib = __compute_runnable_contrib(periods);
1325 		if (runnable)
1326 			sa->runnable_avg_sum += runnable_contrib;
1327 		sa->runnable_avg_period += runnable_contrib;
1328 	}
1329 
1330 	/* Remainder of delta accrued against u_0` */
1331 	if (runnable)
1332 		sa->runnable_avg_sum += delta;
1333 	sa->runnable_avg_period += delta;
1334 
1335 	return decayed;
1336 }
1337 
1338 /* Synchronize an entity's decay with its parenting cfs_rq.*/
1339 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
1340 {
1341 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1342 	u64 decays = atomic64_read(&cfs_rq->decay_counter);
1343 
1344 	decays -= se->avg.decay_count;
1345 	if (!decays)
1346 		return 0;
1347 
1348 	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1349 	se->avg.decay_count = 0;
1350 
1351 	return decays;
1352 }
1353 
1354 #ifdef CONFIG_FAIR_GROUP_SCHED
1355 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1356 						 int force_update)
1357 {
1358 	struct task_group *tg = cfs_rq->tg;
1359 	long tg_contrib;
1360 
1361 	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1362 	tg_contrib -= cfs_rq->tg_load_contrib;
1363 
1364 	if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1365 		atomic_long_add(tg_contrib, &tg->load_avg);
1366 		cfs_rq->tg_load_contrib += tg_contrib;
1367 	}
1368 }
1369 
1370 /*
1371  * Aggregate cfs_rq runnable averages into an equivalent task_group
1372  * representation for computing load contributions.
1373  */
1374 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1375 						  struct cfs_rq *cfs_rq)
1376 {
1377 	struct task_group *tg = cfs_rq->tg;
1378 	long contrib;
1379 
1380 	/* The fraction of a cpu used by this cfs_rq */
1381 	contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1382 			  sa->runnable_avg_period + 1);
1383 	contrib -= cfs_rq->tg_runnable_contrib;
1384 
1385 	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1386 		atomic_add(contrib, &tg->runnable_avg);
1387 		cfs_rq->tg_runnable_contrib += contrib;
1388 	}
1389 }
1390 
1391 static inline void __update_group_entity_contrib(struct sched_entity *se)
1392 {
1393 	struct cfs_rq *cfs_rq = group_cfs_rq(se);
1394 	struct task_group *tg = cfs_rq->tg;
1395 	int runnable_avg;
1396 
1397 	u64 contrib;
1398 
1399 	contrib = cfs_rq->tg_load_contrib * tg->shares;
1400 	se->avg.load_avg_contrib = div_u64(contrib,
1401 				     atomic_long_read(&tg->load_avg) + 1);
1402 
1403 	/*
1404 	 * For group entities we need to compute a correction term in the case
1405 	 * that they are consuming <1 cpu so that we would contribute the same
1406 	 * load as a task of equal weight.
1407 	 *
1408 	 * Explicitly co-ordinating this measurement would be expensive, but
1409 	 * fortunately the sum of each cpus contribution forms a usable
1410 	 * lower-bound on the true value.
1411 	 *
1412 	 * Consider the aggregate of 2 contributions.  Either they are disjoint
1413 	 * (and the sum represents true value) or they are disjoint and we are
1414 	 * understating by the aggregate of their overlap.
1415 	 *
1416 	 * Extending this to N cpus, for a given overlap, the maximum amount we
1417 	 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1418 	 * cpus that overlap for this interval and w_i is the interval width.
1419 	 *
1420 	 * On a small machine; the first term is well-bounded which bounds the
1421 	 * total error since w_i is a subset of the period.  Whereas on a
1422 	 * larger machine, while this first term can be larger, if w_i is the
1423 	 * of consequential size guaranteed to see n_i*w_i quickly converge to
1424 	 * our upper bound of 1-cpu.
1425 	 */
1426 	runnable_avg = atomic_read(&tg->runnable_avg);
1427 	if (runnable_avg < NICE_0_LOAD) {
1428 		se->avg.load_avg_contrib *= runnable_avg;
1429 		se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1430 	}
1431 }
1432 #else
1433 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1434 						 int force_update) {}
1435 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1436 						  struct cfs_rq *cfs_rq) {}
1437 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
1438 #endif
1439 
1440 static inline void __update_task_entity_contrib(struct sched_entity *se)
1441 {
1442 	u32 contrib;
1443 
1444 	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1445 	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1446 	contrib /= (se->avg.runnable_avg_period + 1);
1447 	se->avg.load_avg_contrib = scale_load(contrib);
1448 }
1449 
1450 /* Compute the current contribution to load_avg by se, return any delta */
1451 static long __update_entity_load_avg_contrib(struct sched_entity *se)
1452 {
1453 	long old_contrib = se->avg.load_avg_contrib;
1454 
1455 	if (entity_is_task(se)) {
1456 		__update_task_entity_contrib(se);
1457 	} else {
1458 		__update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
1459 		__update_group_entity_contrib(se);
1460 	}
1461 
1462 	return se->avg.load_avg_contrib - old_contrib;
1463 }
1464 
1465 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1466 						 long load_contrib)
1467 {
1468 	if (likely(load_contrib < cfs_rq->blocked_load_avg))
1469 		cfs_rq->blocked_load_avg -= load_contrib;
1470 	else
1471 		cfs_rq->blocked_load_avg = 0;
1472 }
1473 
1474 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1475 
1476 /* Update a sched_entity's runnable average */
1477 static inline void update_entity_load_avg(struct sched_entity *se,
1478 					  int update_cfs_rq)
1479 {
1480 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1481 	long contrib_delta;
1482 	u64 now;
1483 
1484 	/*
1485 	 * For a group entity we need to use their owned cfs_rq_clock_task() in
1486 	 * case they are the parent of a throttled hierarchy.
1487 	 */
1488 	if (entity_is_task(se))
1489 		now = cfs_rq_clock_task(cfs_rq);
1490 	else
1491 		now = cfs_rq_clock_task(group_cfs_rq(se));
1492 
1493 	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
1494 		return;
1495 
1496 	contrib_delta = __update_entity_load_avg_contrib(se);
1497 
1498 	if (!update_cfs_rq)
1499 		return;
1500 
1501 	if (se->on_rq)
1502 		cfs_rq->runnable_load_avg += contrib_delta;
1503 	else
1504 		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1505 }
1506 
1507 /*
1508  * Decay the load contributed by all blocked children and account this so that
1509  * their contribution may appropriately discounted when they wake up.
1510  */
1511 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1512 {
1513 	u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
1514 	u64 decays;
1515 
1516 	decays = now - cfs_rq->last_decay;
1517 	if (!decays && !force_update)
1518 		return;
1519 
1520 	if (atomic_long_read(&cfs_rq->removed_load)) {
1521 		unsigned long removed_load;
1522 		removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
1523 		subtract_blocked_load_contrib(cfs_rq, removed_load);
1524 	}
1525 
1526 	if (decays) {
1527 		cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1528 						      decays);
1529 		atomic64_add(decays, &cfs_rq->decay_counter);
1530 		cfs_rq->last_decay = now;
1531 	}
1532 
1533 	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1534 }
1535 
1536 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1537 {
1538 	__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
1539 	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
1540 }
1541 
1542 /* Add the load generated by se into cfs_rq's child load-average */
1543 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1544 						  struct sched_entity *se,
1545 						  int wakeup)
1546 {
1547 	/*
1548 	 * We track migrations using entity decay_count <= 0, on a wake-up
1549 	 * migration we use a negative decay count to track the remote decays
1550 	 * accumulated while sleeping.
1551 	 *
1552 	 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1553 	 * are seen by enqueue_entity_load_avg() as a migration with an already
1554 	 * constructed load_avg_contrib.
1555 	 */
1556 	if (unlikely(se->avg.decay_count <= 0)) {
1557 		se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
1558 		if (se->avg.decay_count) {
1559 			/*
1560 			 * In a wake-up migration we have to approximate the
1561 			 * time sleeping.  This is because we can't synchronize
1562 			 * clock_task between the two cpus, and it is not
1563 			 * guaranteed to be read-safe.  Instead, we can
1564 			 * approximate this using our carried decays, which are
1565 			 * explicitly atomically readable.
1566 			 */
1567 			se->avg.last_runnable_update -= (-se->avg.decay_count)
1568 							<< 20;
1569 			update_entity_load_avg(se, 0);
1570 			/* Indicate that we're now synchronized and on-rq */
1571 			se->avg.decay_count = 0;
1572 		}
1573 		wakeup = 0;
1574 	} else {
1575 		/*
1576 		 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1577 		 * would have made count negative); we must be careful to avoid
1578 		 * double-accounting blocked time after synchronizing decays.
1579 		 */
1580 		se->avg.last_runnable_update += __synchronize_entity_decay(se)
1581 							<< 20;
1582 	}
1583 
1584 	/* migrated tasks did not contribute to our blocked load */
1585 	if (wakeup) {
1586 		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
1587 		update_entity_load_avg(se, 0);
1588 	}
1589 
1590 	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
1591 	/* we force update consideration on load-balancer moves */
1592 	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
1593 }
1594 
1595 /*
1596  * Remove se's load from this cfs_rq child load-average, if the entity is
1597  * transitioning to a blocked state we track its projected decay using
1598  * blocked_load_avg.
1599  */
1600 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1601 						  struct sched_entity *se,
1602 						  int sleep)
1603 {
1604 	update_entity_load_avg(se, 1);
1605 	/* we force update consideration on load-balancer moves */
1606 	update_cfs_rq_blocked_load(cfs_rq, !sleep);
1607 
1608 	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
1609 	if (sleep) {
1610 		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1611 		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1612 	} /* migrations, e.g. sleep=0 leave decay_count == 0 */
1613 }
1614 
1615 /*
1616  * Update the rq's load with the elapsed running time before entering
1617  * idle. if the last scheduled task is not a CFS task, idle_enter will
1618  * be the only way to update the runnable statistic.
1619  */
1620 void idle_enter_fair(struct rq *this_rq)
1621 {
1622 	update_rq_runnable_avg(this_rq, 1);
1623 }
1624 
1625 /*
1626  * Update the rq's load with the elapsed idle time before a task is
1627  * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1628  * be the only way to update the runnable statistic.
1629  */
1630 void idle_exit_fair(struct rq *this_rq)
1631 {
1632 	update_rq_runnable_avg(this_rq, 0);
1633 }
1634 
1635 #else
1636 static inline void update_entity_load_avg(struct sched_entity *se,
1637 					  int update_cfs_rq) {}
1638 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
1639 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1640 					   struct sched_entity *se,
1641 					   int wakeup) {}
1642 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1643 					   struct sched_entity *se,
1644 					   int sleep) {}
1645 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1646 					      int force_update) {}
1647 #endif
1648 
1649 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1650 {
1651 #ifdef CONFIG_SCHEDSTATS
1652 	struct task_struct *tsk = NULL;
1653 
1654 	if (entity_is_task(se))
1655 		tsk = task_of(se);
1656 
1657 	if (se->statistics.sleep_start) {
1658 		u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
1659 
1660 		if ((s64)delta < 0)
1661 			delta = 0;
1662 
1663 		if (unlikely(delta > se->statistics.sleep_max))
1664 			se->statistics.sleep_max = delta;
1665 
1666 		se->statistics.sleep_start = 0;
1667 		se->statistics.sum_sleep_runtime += delta;
1668 
1669 		if (tsk) {
1670 			account_scheduler_latency(tsk, delta >> 10, 1);
1671 			trace_sched_stat_sleep(tsk, delta);
1672 		}
1673 	}
1674 	if (se->statistics.block_start) {
1675 		u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
1676 
1677 		if ((s64)delta < 0)
1678 			delta = 0;
1679 
1680 		if (unlikely(delta > se->statistics.block_max))
1681 			se->statistics.block_max = delta;
1682 
1683 		se->statistics.block_start = 0;
1684 		se->statistics.sum_sleep_runtime += delta;
1685 
1686 		if (tsk) {
1687 			if (tsk->in_iowait) {
1688 				se->statistics.iowait_sum += delta;
1689 				se->statistics.iowait_count++;
1690 				trace_sched_stat_iowait(tsk, delta);
1691 			}
1692 
1693 			trace_sched_stat_blocked(tsk, delta);
1694 
1695 			/*
1696 			 * Blocking time is in units of nanosecs, so shift by
1697 			 * 20 to get a milliseconds-range estimation of the
1698 			 * amount of time that the task spent sleeping:
1699 			 */
1700 			if (unlikely(prof_on == SLEEP_PROFILING)) {
1701 				profile_hits(SLEEP_PROFILING,
1702 						(void *)get_wchan(tsk),
1703 						delta >> 20);
1704 			}
1705 			account_scheduler_latency(tsk, delta >> 10, 0);
1706 		}
1707 	}
1708 #endif
1709 }
1710 
1711 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1712 {
1713 #ifdef CONFIG_SCHED_DEBUG
1714 	s64 d = se->vruntime - cfs_rq->min_vruntime;
1715 
1716 	if (d < 0)
1717 		d = -d;
1718 
1719 	if (d > 3*sysctl_sched_latency)
1720 		schedstat_inc(cfs_rq, nr_spread_over);
1721 #endif
1722 }
1723 
1724 static void
1725 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1726 {
1727 	u64 vruntime = cfs_rq->min_vruntime;
1728 
1729 	/*
1730 	 * The 'current' period is already promised to the current tasks,
1731 	 * however the extra weight of the new task will slow them down a
1732 	 * little, place the new task so that it fits in the slot that
1733 	 * stays open at the end.
1734 	 */
1735 	if (initial && sched_feat(START_DEBIT))
1736 		vruntime += sched_vslice(cfs_rq, se);
1737 
1738 	/* sleeps up to a single latency don't count. */
1739 	if (!initial) {
1740 		unsigned long thresh = sysctl_sched_latency;
1741 
1742 		/*
1743 		 * Halve their sleep time's effect, to allow
1744 		 * for a gentler effect of sleepers:
1745 		 */
1746 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
1747 			thresh >>= 1;
1748 
1749 		vruntime -= thresh;
1750 	}
1751 
1752 	/* ensure we never gain time by being placed backwards. */
1753 	se->vruntime = max_vruntime(se->vruntime, vruntime);
1754 }
1755 
1756 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1757 
1758 static void
1759 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1760 {
1761 	/*
1762 	 * Update the normalized vruntime before updating min_vruntime
1763 	 * through calling update_curr().
1764 	 */
1765 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1766 		se->vruntime += cfs_rq->min_vruntime;
1767 
1768 	/*
1769 	 * Update run-time statistics of the 'current'.
1770 	 */
1771 	update_curr(cfs_rq);
1772 	enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1773 	account_entity_enqueue(cfs_rq, se);
1774 	update_cfs_shares(cfs_rq);
1775 
1776 	if (flags & ENQUEUE_WAKEUP) {
1777 		place_entity(cfs_rq, se, 0);
1778 		enqueue_sleeper(cfs_rq, se);
1779 	}
1780 
1781 	update_stats_enqueue(cfs_rq, se);
1782 	check_spread(cfs_rq, se);
1783 	if (se != cfs_rq->curr)
1784 		__enqueue_entity(cfs_rq, se);
1785 	se->on_rq = 1;
1786 
1787 	if (cfs_rq->nr_running == 1) {
1788 		list_add_leaf_cfs_rq(cfs_rq);
1789 		check_enqueue_throttle(cfs_rq);
1790 	}
1791 }
1792 
1793 static void __clear_buddies_last(struct sched_entity *se)
1794 {
1795 	for_each_sched_entity(se) {
1796 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1797 		if (cfs_rq->last == se)
1798 			cfs_rq->last = NULL;
1799 		else
1800 			break;
1801 	}
1802 }
1803 
1804 static void __clear_buddies_next(struct sched_entity *se)
1805 {
1806 	for_each_sched_entity(se) {
1807 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1808 		if (cfs_rq->next == se)
1809 			cfs_rq->next = NULL;
1810 		else
1811 			break;
1812 	}
1813 }
1814 
1815 static void __clear_buddies_skip(struct sched_entity *se)
1816 {
1817 	for_each_sched_entity(se) {
1818 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1819 		if (cfs_rq->skip == se)
1820 			cfs_rq->skip = NULL;
1821 		else
1822 			break;
1823 	}
1824 }
1825 
1826 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1827 {
1828 	if (cfs_rq->last == se)
1829 		__clear_buddies_last(se);
1830 
1831 	if (cfs_rq->next == se)
1832 		__clear_buddies_next(se);
1833 
1834 	if (cfs_rq->skip == se)
1835 		__clear_buddies_skip(se);
1836 }
1837 
1838 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1839 
1840 static void
1841 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1842 {
1843 	/*
1844 	 * Update run-time statistics of the 'current'.
1845 	 */
1846 	update_curr(cfs_rq);
1847 	dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1848 
1849 	update_stats_dequeue(cfs_rq, se);
1850 	if (flags & DEQUEUE_SLEEP) {
1851 #ifdef CONFIG_SCHEDSTATS
1852 		if (entity_is_task(se)) {
1853 			struct task_struct *tsk = task_of(se);
1854 
1855 			if (tsk->state & TASK_INTERRUPTIBLE)
1856 				se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
1857 			if (tsk->state & TASK_UNINTERRUPTIBLE)
1858 				se->statistics.block_start = rq_clock(rq_of(cfs_rq));
1859 		}
1860 #endif
1861 	}
1862 
1863 	clear_buddies(cfs_rq, se);
1864 
1865 	if (se != cfs_rq->curr)
1866 		__dequeue_entity(cfs_rq, se);
1867 	se->on_rq = 0;
1868 	account_entity_dequeue(cfs_rq, se);
1869 
1870 	/*
1871 	 * Normalize the entity after updating the min_vruntime because the
1872 	 * update can refer to the ->curr item and we need to reflect this
1873 	 * movement in our normalized position.
1874 	 */
1875 	if (!(flags & DEQUEUE_SLEEP))
1876 		se->vruntime -= cfs_rq->min_vruntime;
1877 
1878 	/* return excess runtime on last dequeue */
1879 	return_cfs_rq_runtime(cfs_rq);
1880 
1881 	update_min_vruntime(cfs_rq);
1882 	update_cfs_shares(cfs_rq);
1883 }
1884 
1885 /*
1886  * Preempt the current task with a newly woken task if needed:
1887  */
1888 static void
1889 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1890 {
1891 	unsigned long ideal_runtime, delta_exec;
1892 	struct sched_entity *se;
1893 	s64 delta;
1894 
1895 	ideal_runtime = sched_slice(cfs_rq, curr);
1896 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1897 	if (delta_exec > ideal_runtime) {
1898 		resched_task(rq_of(cfs_rq)->curr);
1899 		/*
1900 		 * The current task ran long enough, ensure it doesn't get
1901 		 * re-elected due to buddy favours.
1902 		 */
1903 		clear_buddies(cfs_rq, curr);
1904 		return;
1905 	}
1906 
1907 	/*
1908 	 * Ensure that a task that missed wakeup preemption by a
1909 	 * narrow margin doesn't have to wait for a full slice.
1910 	 * This also mitigates buddy induced latencies under load.
1911 	 */
1912 	if (delta_exec < sysctl_sched_min_granularity)
1913 		return;
1914 
1915 	se = __pick_first_entity(cfs_rq);
1916 	delta = curr->vruntime - se->vruntime;
1917 
1918 	if (delta < 0)
1919 		return;
1920 
1921 	if (delta > ideal_runtime)
1922 		resched_task(rq_of(cfs_rq)->curr);
1923 }
1924 
1925 static void
1926 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1927 {
1928 	/* 'current' is not kept within the tree. */
1929 	if (se->on_rq) {
1930 		/*
1931 		 * Any task has to be enqueued before it get to execute on
1932 		 * a CPU. So account for the time it spent waiting on the
1933 		 * runqueue.
1934 		 */
1935 		update_stats_wait_end(cfs_rq, se);
1936 		__dequeue_entity(cfs_rq, se);
1937 	}
1938 
1939 	update_stats_curr_start(cfs_rq, se);
1940 	cfs_rq->curr = se;
1941 #ifdef CONFIG_SCHEDSTATS
1942 	/*
1943 	 * Track our maximum slice length, if the CPU's load is at
1944 	 * least twice that of our own weight (i.e. dont track it
1945 	 * when there are only lesser-weight tasks around):
1946 	 */
1947 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1948 		se->statistics.slice_max = max(se->statistics.slice_max,
1949 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
1950 	}
1951 #endif
1952 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
1953 }
1954 
1955 static int
1956 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1957 
1958 /*
1959  * Pick the next process, keeping these things in mind, in this order:
1960  * 1) keep things fair between processes/task groups
1961  * 2) pick the "next" process, since someone really wants that to run
1962  * 3) pick the "last" process, for cache locality
1963  * 4) do not run the "skip" process, if something else is available
1964  */
1965 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1966 {
1967 	struct sched_entity *se = __pick_first_entity(cfs_rq);
1968 	struct sched_entity *left = se;
1969 
1970 	/*
1971 	 * Avoid running the skip buddy, if running something else can
1972 	 * be done without getting too unfair.
1973 	 */
1974 	if (cfs_rq->skip == se) {
1975 		struct sched_entity *second = __pick_next_entity(se);
1976 		if (second && wakeup_preempt_entity(second, left) < 1)
1977 			se = second;
1978 	}
1979 
1980 	/*
1981 	 * Prefer last buddy, try to return the CPU to a preempted task.
1982 	 */
1983 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1984 		se = cfs_rq->last;
1985 
1986 	/*
1987 	 * Someone really wants this to run. If it's not unfair, run it.
1988 	 */
1989 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1990 		se = cfs_rq->next;
1991 
1992 	clear_buddies(cfs_rq, se);
1993 
1994 	return se;
1995 }
1996 
1997 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1998 
1999 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2000 {
2001 	/*
2002 	 * If still on the runqueue then deactivate_task()
2003 	 * was not called and update_curr() has to be done:
2004 	 */
2005 	if (prev->on_rq)
2006 		update_curr(cfs_rq);
2007 
2008 	/* throttle cfs_rqs exceeding runtime */
2009 	check_cfs_rq_runtime(cfs_rq);
2010 
2011 	check_spread(cfs_rq, prev);
2012 	if (prev->on_rq) {
2013 		update_stats_wait_start(cfs_rq, prev);
2014 		/* Put 'current' back into the tree. */
2015 		__enqueue_entity(cfs_rq, prev);
2016 		/* in !on_rq case, update occurred at dequeue */
2017 		update_entity_load_avg(prev, 1);
2018 	}
2019 	cfs_rq->curr = NULL;
2020 }
2021 
2022 static void
2023 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2024 {
2025 	/*
2026 	 * Update run-time statistics of the 'current'.
2027 	 */
2028 	update_curr(cfs_rq);
2029 
2030 	/*
2031 	 * Ensure that runnable average is periodically updated.
2032 	 */
2033 	update_entity_load_avg(curr, 1);
2034 	update_cfs_rq_blocked_load(cfs_rq, 1);
2035 
2036 #ifdef CONFIG_SCHED_HRTICK
2037 	/*
2038 	 * queued ticks are scheduled to match the slice, so don't bother
2039 	 * validating it and just reschedule.
2040 	 */
2041 	if (queued) {
2042 		resched_task(rq_of(cfs_rq)->curr);
2043 		return;
2044 	}
2045 	/*
2046 	 * don't let the period tick interfere with the hrtick preemption
2047 	 */
2048 	if (!sched_feat(DOUBLE_TICK) &&
2049 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2050 		return;
2051 #endif
2052 
2053 	if (cfs_rq->nr_running > 1)
2054 		check_preempt_tick(cfs_rq, curr);
2055 }
2056 
2057 
2058 /**************************************************
2059  * CFS bandwidth control machinery
2060  */
2061 
2062 #ifdef CONFIG_CFS_BANDWIDTH
2063 
2064 #ifdef HAVE_JUMP_LABEL
2065 static struct static_key __cfs_bandwidth_used;
2066 
2067 static inline bool cfs_bandwidth_used(void)
2068 {
2069 	return static_key_false(&__cfs_bandwidth_used);
2070 }
2071 
2072 void account_cfs_bandwidth_used(int enabled, int was_enabled)
2073 {
2074 	/* only need to count groups transitioning between enabled/!enabled */
2075 	if (enabled && !was_enabled)
2076 		static_key_slow_inc(&__cfs_bandwidth_used);
2077 	else if (!enabled && was_enabled)
2078 		static_key_slow_dec(&__cfs_bandwidth_used);
2079 }
2080 #else /* HAVE_JUMP_LABEL */
2081 static bool cfs_bandwidth_used(void)
2082 {
2083 	return true;
2084 }
2085 
2086 void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2087 #endif /* HAVE_JUMP_LABEL */
2088 
2089 /*
2090  * default period for cfs group bandwidth.
2091  * default: 0.1s, units: nanoseconds
2092  */
2093 static inline u64 default_cfs_period(void)
2094 {
2095 	return 100000000ULL;
2096 }
2097 
2098 static inline u64 sched_cfs_bandwidth_slice(void)
2099 {
2100 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2101 }
2102 
2103 /*
2104  * Replenish runtime according to assigned quota and update expiration time.
2105  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2106  * additional synchronization around rq->lock.
2107  *
2108  * requires cfs_b->lock
2109  */
2110 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
2111 {
2112 	u64 now;
2113 
2114 	if (cfs_b->quota == RUNTIME_INF)
2115 		return;
2116 
2117 	now = sched_clock_cpu(smp_processor_id());
2118 	cfs_b->runtime = cfs_b->quota;
2119 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2120 }
2121 
2122 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2123 {
2124 	return &tg->cfs_bandwidth;
2125 }
2126 
2127 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2128 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2129 {
2130 	if (unlikely(cfs_rq->throttle_count))
2131 		return cfs_rq->throttled_clock_task;
2132 
2133 	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
2134 }
2135 
2136 /* returns 0 on failure to allocate runtime */
2137 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2138 {
2139 	struct task_group *tg = cfs_rq->tg;
2140 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
2141 	u64 amount = 0, min_amount, expires;
2142 
2143 	/* note: this is a positive sum as runtime_remaining <= 0 */
2144 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2145 
2146 	raw_spin_lock(&cfs_b->lock);
2147 	if (cfs_b->quota == RUNTIME_INF)
2148 		amount = min_amount;
2149 	else {
2150 		/*
2151 		 * If the bandwidth pool has become inactive, then at least one
2152 		 * period must have elapsed since the last consumption.
2153 		 * Refresh the global state and ensure bandwidth timer becomes
2154 		 * active.
2155 		 */
2156 		if (!cfs_b->timer_active) {
2157 			__refill_cfs_bandwidth_runtime(cfs_b);
2158 			__start_cfs_bandwidth(cfs_b);
2159 		}
2160 
2161 		if (cfs_b->runtime > 0) {
2162 			amount = min(cfs_b->runtime, min_amount);
2163 			cfs_b->runtime -= amount;
2164 			cfs_b->idle = 0;
2165 		}
2166 	}
2167 	expires = cfs_b->runtime_expires;
2168 	raw_spin_unlock(&cfs_b->lock);
2169 
2170 	cfs_rq->runtime_remaining += amount;
2171 	/*
2172 	 * we may have advanced our local expiration to account for allowed
2173 	 * spread between our sched_clock and the one on which runtime was
2174 	 * issued.
2175 	 */
2176 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2177 		cfs_rq->runtime_expires = expires;
2178 
2179 	return cfs_rq->runtime_remaining > 0;
2180 }
2181 
2182 /*
2183  * Note: This depends on the synchronization provided by sched_clock and the
2184  * fact that rq->clock snapshots this value.
2185  */
2186 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2187 {
2188 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2189 
2190 	/* if the deadline is ahead of our clock, nothing to do */
2191 	if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
2192 		return;
2193 
2194 	if (cfs_rq->runtime_remaining < 0)
2195 		return;
2196 
2197 	/*
2198 	 * If the local deadline has passed we have to consider the
2199 	 * possibility that our sched_clock is 'fast' and the global deadline
2200 	 * has not truly expired.
2201 	 *
2202 	 * Fortunately we can check determine whether this the case by checking
2203 	 * whether the global deadline has advanced.
2204 	 */
2205 
2206 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2207 		/* extend local deadline, drift is bounded above by 2 ticks */
2208 		cfs_rq->runtime_expires += TICK_NSEC;
2209 	} else {
2210 		/* global deadline is ahead, expiration has passed */
2211 		cfs_rq->runtime_remaining = 0;
2212 	}
2213 }
2214 
2215 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2216 				     unsigned long delta_exec)
2217 {
2218 	/* dock delta_exec before expiring quota (as it could span periods) */
2219 	cfs_rq->runtime_remaining -= delta_exec;
2220 	expire_cfs_rq_runtime(cfs_rq);
2221 
2222 	if (likely(cfs_rq->runtime_remaining > 0))
2223 		return;
2224 
2225 	/*
2226 	 * if we're unable to extend our runtime we resched so that the active
2227 	 * hierarchy can be throttled
2228 	 */
2229 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2230 		resched_task(rq_of(cfs_rq)->curr);
2231 }
2232 
2233 static __always_inline
2234 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
2235 {
2236 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
2237 		return;
2238 
2239 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
2240 }
2241 
2242 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2243 {
2244 	return cfs_bandwidth_used() && cfs_rq->throttled;
2245 }
2246 
2247 /* check whether cfs_rq, or any parent, is throttled */
2248 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2249 {
2250 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
2251 }
2252 
2253 /*
2254  * Ensure that neither of the group entities corresponding to src_cpu or
2255  * dest_cpu are members of a throttled hierarchy when performing group
2256  * load-balance operations.
2257  */
2258 static inline int throttled_lb_pair(struct task_group *tg,
2259 				    int src_cpu, int dest_cpu)
2260 {
2261 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2262 
2263 	src_cfs_rq = tg->cfs_rq[src_cpu];
2264 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
2265 
2266 	return throttled_hierarchy(src_cfs_rq) ||
2267 	       throttled_hierarchy(dest_cfs_rq);
2268 }
2269 
2270 /* updated child weight may affect parent so we have to do this bottom up */
2271 static int tg_unthrottle_up(struct task_group *tg, void *data)
2272 {
2273 	struct rq *rq = data;
2274 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2275 
2276 	cfs_rq->throttle_count--;
2277 #ifdef CONFIG_SMP
2278 	if (!cfs_rq->throttle_count) {
2279 		/* adjust cfs_rq_clock_task() */
2280 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
2281 					     cfs_rq->throttled_clock_task;
2282 	}
2283 #endif
2284 
2285 	return 0;
2286 }
2287 
2288 static int tg_throttle_down(struct task_group *tg, void *data)
2289 {
2290 	struct rq *rq = data;
2291 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2292 
2293 	/* group is entering throttled state, stop time */
2294 	if (!cfs_rq->throttle_count)
2295 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
2296 	cfs_rq->throttle_count++;
2297 
2298 	return 0;
2299 }
2300 
2301 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
2302 {
2303 	struct rq *rq = rq_of(cfs_rq);
2304 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2305 	struct sched_entity *se;
2306 	long task_delta, dequeue = 1;
2307 
2308 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2309 
2310 	/* freeze hierarchy runnable averages while throttled */
2311 	rcu_read_lock();
2312 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2313 	rcu_read_unlock();
2314 
2315 	task_delta = cfs_rq->h_nr_running;
2316 	for_each_sched_entity(se) {
2317 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2318 		/* throttled entity or throttle-on-deactivate */
2319 		if (!se->on_rq)
2320 			break;
2321 
2322 		if (dequeue)
2323 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2324 		qcfs_rq->h_nr_running -= task_delta;
2325 
2326 		if (qcfs_rq->load.weight)
2327 			dequeue = 0;
2328 	}
2329 
2330 	if (!se)
2331 		rq->nr_running -= task_delta;
2332 
2333 	cfs_rq->throttled = 1;
2334 	cfs_rq->throttled_clock = rq_clock(rq);
2335 	raw_spin_lock(&cfs_b->lock);
2336 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2337 	raw_spin_unlock(&cfs_b->lock);
2338 }
2339 
2340 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2341 {
2342 	struct rq *rq = rq_of(cfs_rq);
2343 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2344 	struct sched_entity *se;
2345 	int enqueue = 1;
2346 	long task_delta;
2347 
2348 	se = cfs_rq->tg->se[cpu_of(rq)];
2349 
2350 	cfs_rq->throttled = 0;
2351 
2352 	update_rq_clock(rq);
2353 
2354 	raw_spin_lock(&cfs_b->lock);
2355 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
2356 	list_del_rcu(&cfs_rq->throttled_list);
2357 	raw_spin_unlock(&cfs_b->lock);
2358 
2359 	/* update hierarchical throttle state */
2360 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2361 
2362 	if (!cfs_rq->load.weight)
2363 		return;
2364 
2365 	task_delta = cfs_rq->h_nr_running;
2366 	for_each_sched_entity(se) {
2367 		if (se->on_rq)
2368 			enqueue = 0;
2369 
2370 		cfs_rq = cfs_rq_of(se);
2371 		if (enqueue)
2372 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2373 		cfs_rq->h_nr_running += task_delta;
2374 
2375 		if (cfs_rq_throttled(cfs_rq))
2376 			break;
2377 	}
2378 
2379 	if (!se)
2380 		rq->nr_running += task_delta;
2381 
2382 	/* determine whether we need to wake up potentially idle cpu */
2383 	if (rq->curr == rq->idle && rq->cfs.nr_running)
2384 		resched_task(rq->curr);
2385 }
2386 
2387 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2388 		u64 remaining, u64 expires)
2389 {
2390 	struct cfs_rq *cfs_rq;
2391 	u64 runtime = remaining;
2392 
2393 	rcu_read_lock();
2394 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2395 				throttled_list) {
2396 		struct rq *rq = rq_of(cfs_rq);
2397 
2398 		raw_spin_lock(&rq->lock);
2399 		if (!cfs_rq_throttled(cfs_rq))
2400 			goto next;
2401 
2402 		runtime = -cfs_rq->runtime_remaining + 1;
2403 		if (runtime > remaining)
2404 			runtime = remaining;
2405 		remaining -= runtime;
2406 
2407 		cfs_rq->runtime_remaining += runtime;
2408 		cfs_rq->runtime_expires = expires;
2409 
2410 		/* we check whether we're throttled above */
2411 		if (cfs_rq->runtime_remaining > 0)
2412 			unthrottle_cfs_rq(cfs_rq);
2413 
2414 next:
2415 		raw_spin_unlock(&rq->lock);
2416 
2417 		if (!remaining)
2418 			break;
2419 	}
2420 	rcu_read_unlock();
2421 
2422 	return remaining;
2423 }
2424 
2425 /*
2426  * Responsible for refilling a task_group's bandwidth and unthrottling its
2427  * cfs_rqs as appropriate. If there has been no activity within the last
2428  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2429  * used to track this state.
2430  */
2431 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2432 {
2433 	u64 runtime, runtime_expires;
2434 	int idle = 1, throttled;
2435 
2436 	raw_spin_lock(&cfs_b->lock);
2437 	/* no need to continue the timer with no bandwidth constraint */
2438 	if (cfs_b->quota == RUNTIME_INF)
2439 		goto out_unlock;
2440 
2441 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2442 	/* idle depends on !throttled (for the case of a large deficit) */
2443 	idle = cfs_b->idle && !throttled;
2444 	cfs_b->nr_periods += overrun;
2445 
2446 	/* if we're going inactive then everything else can be deferred */
2447 	if (idle)
2448 		goto out_unlock;
2449 
2450 	__refill_cfs_bandwidth_runtime(cfs_b);
2451 
2452 	if (!throttled) {
2453 		/* mark as potentially idle for the upcoming period */
2454 		cfs_b->idle = 1;
2455 		goto out_unlock;
2456 	}
2457 
2458 	/* account preceding periods in which throttling occurred */
2459 	cfs_b->nr_throttled += overrun;
2460 
2461 	/*
2462 	 * There are throttled entities so we must first use the new bandwidth
2463 	 * to unthrottle them before making it generally available.  This
2464 	 * ensures that all existing debts will be paid before a new cfs_rq is
2465 	 * allowed to run.
2466 	 */
2467 	runtime = cfs_b->runtime;
2468 	runtime_expires = cfs_b->runtime_expires;
2469 	cfs_b->runtime = 0;
2470 
2471 	/*
2472 	 * This check is repeated as we are holding onto the new bandwidth
2473 	 * while we unthrottle.  This can potentially race with an unthrottled
2474 	 * group trying to acquire new bandwidth from the global pool.
2475 	 */
2476 	while (throttled && runtime > 0) {
2477 		raw_spin_unlock(&cfs_b->lock);
2478 		/* we can't nest cfs_b->lock while distributing bandwidth */
2479 		runtime = distribute_cfs_runtime(cfs_b, runtime,
2480 						 runtime_expires);
2481 		raw_spin_lock(&cfs_b->lock);
2482 
2483 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2484 	}
2485 
2486 	/* return (any) remaining runtime */
2487 	cfs_b->runtime = runtime;
2488 	/*
2489 	 * While we are ensured activity in the period following an
2490 	 * unthrottle, this also covers the case in which the new bandwidth is
2491 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
2492 	 * timer to remain active while there are any throttled entities.)
2493 	 */
2494 	cfs_b->idle = 0;
2495 out_unlock:
2496 	if (idle)
2497 		cfs_b->timer_active = 0;
2498 	raw_spin_unlock(&cfs_b->lock);
2499 
2500 	return idle;
2501 }
2502 
2503 /* a cfs_rq won't donate quota below this amount */
2504 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2505 /* minimum remaining period time to redistribute slack quota */
2506 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2507 /* how long we wait to gather additional slack before distributing */
2508 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2509 
2510 /* are we near the end of the current quota period? */
2511 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2512 {
2513 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
2514 	u64 remaining;
2515 
2516 	/* if the call-back is running a quota refresh is already occurring */
2517 	if (hrtimer_callback_running(refresh_timer))
2518 		return 1;
2519 
2520 	/* is a quota refresh about to occur? */
2521 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2522 	if (remaining < min_expire)
2523 		return 1;
2524 
2525 	return 0;
2526 }
2527 
2528 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2529 {
2530 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2531 
2532 	/* if there's a quota refresh soon don't bother with slack */
2533 	if (runtime_refresh_within(cfs_b, min_left))
2534 		return;
2535 
2536 	start_bandwidth_timer(&cfs_b->slack_timer,
2537 				ns_to_ktime(cfs_bandwidth_slack_period));
2538 }
2539 
2540 /* we know any runtime found here is valid as update_curr() precedes return */
2541 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2542 {
2543 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2544 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2545 
2546 	if (slack_runtime <= 0)
2547 		return;
2548 
2549 	raw_spin_lock(&cfs_b->lock);
2550 	if (cfs_b->quota != RUNTIME_INF &&
2551 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2552 		cfs_b->runtime += slack_runtime;
2553 
2554 		/* we are under rq->lock, defer unthrottling using a timer */
2555 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2556 		    !list_empty(&cfs_b->throttled_cfs_rq))
2557 			start_cfs_slack_bandwidth(cfs_b);
2558 	}
2559 	raw_spin_unlock(&cfs_b->lock);
2560 
2561 	/* even if it's not valid for return we don't want to try again */
2562 	cfs_rq->runtime_remaining -= slack_runtime;
2563 }
2564 
2565 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2566 {
2567 	if (!cfs_bandwidth_used())
2568 		return;
2569 
2570 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
2571 		return;
2572 
2573 	__return_cfs_rq_runtime(cfs_rq);
2574 }
2575 
2576 /*
2577  * This is done with a timer (instead of inline with bandwidth return) since
2578  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2579  */
2580 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2581 {
2582 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2583 	u64 expires;
2584 
2585 	/* confirm we're still not at a refresh boundary */
2586 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2587 		return;
2588 
2589 	raw_spin_lock(&cfs_b->lock);
2590 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2591 		runtime = cfs_b->runtime;
2592 		cfs_b->runtime = 0;
2593 	}
2594 	expires = cfs_b->runtime_expires;
2595 	raw_spin_unlock(&cfs_b->lock);
2596 
2597 	if (!runtime)
2598 		return;
2599 
2600 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2601 
2602 	raw_spin_lock(&cfs_b->lock);
2603 	if (expires == cfs_b->runtime_expires)
2604 		cfs_b->runtime = runtime;
2605 	raw_spin_unlock(&cfs_b->lock);
2606 }
2607 
2608 /*
2609  * When a group wakes up we want to make sure that its quota is not already
2610  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2611  * runtime as update_curr() throttling can not not trigger until it's on-rq.
2612  */
2613 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2614 {
2615 	if (!cfs_bandwidth_used())
2616 		return;
2617 
2618 	/* an active group must be handled by the update_curr()->put() path */
2619 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2620 		return;
2621 
2622 	/* ensure the group is not already throttled */
2623 	if (cfs_rq_throttled(cfs_rq))
2624 		return;
2625 
2626 	/* update runtime allocation */
2627 	account_cfs_rq_runtime(cfs_rq, 0);
2628 	if (cfs_rq->runtime_remaining <= 0)
2629 		throttle_cfs_rq(cfs_rq);
2630 }
2631 
2632 /* conditionally throttle active cfs_rq's from put_prev_entity() */
2633 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2634 {
2635 	if (!cfs_bandwidth_used())
2636 		return;
2637 
2638 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2639 		return;
2640 
2641 	/*
2642 	 * it's possible for a throttled entity to be forced into a running
2643 	 * state (e.g. set_curr_task), in this case we're finished.
2644 	 */
2645 	if (cfs_rq_throttled(cfs_rq))
2646 		return;
2647 
2648 	throttle_cfs_rq(cfs_rq);
2649 }
2650 
2651 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2652 {
2653 	struct cfs_bandwidth *cfs_b =
2654 		container_of(timer, struct cfs_bandwidth, slack_timer);
2655 	do_sched_cfs_slack_timer(cfs_b);
2656 
2657 	return HRTIMER_NORESTART;
2658 }
2659 
2660 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2661 {
2662 	struct cfs_bandwidth *cfs_b =
2663 		container_of(timer, struct cfs_bandwidth, period_timer);
2664 	ktime_t now;
2665 	int overrun;
2666 	int idle = 0;
2667 
2668 	for (;;) {
2669 		now = hrtimer_cb_get_time(timer);
2670 		overrun = hrtimer_forward(timer, now, cfs_b->period);
2671 
2672 		if (!overrun)
2673 			break;
2674 
2675 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
2676 	}
2677 
2678 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2679 }
2680 
2681 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2682 {
2683 	raw_spin_lock_init(&cfs_b->lock);
2684 	cfs_b->runtime = 0;
2685 	cfs_b->quota = RUNTIME_INF;
2686 	cfs_b->period = ns_to_ktime(default_cfs_period());
2687 
2688 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2689 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2690 	cfs_b->period_timer.function = sched_cfs_period_timer;
2691 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2692 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
2693 }
2694 
2695 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2696 {
2697 	cfs_rq->runtime_enabled = 0;
2698 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
2699 }
2700 
2701 /* requires cfs_b->lock, may release to reprogram timer */
2702 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2703 {
2704 	/*
2705 	 * The timer may be active because we're trying to set a new bandwidth
2706 	 * period or because we're racing with the tear-down path
2707 	 * (timer_active==0 becomes visible before the hrtimer call-back
2708 	 * terminates).  In either case we ensure that it's re-programmed
2709 	 */
2710 	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2711 		raw_spin_unlock(&cfs_b->lock);
2712 		/* ensure cfs_b->lock is available while we wait */
2713 		hrtimer_cancel(&cfs_b->period_timer);
2714 
2715 		raw_spin_lock(&cfs_b->lock);
2716 		/* if someone else restarted the timer then we're done */
2717 		if (cfs_b->timer_active)
2718 			return;
2719 	}
2720 
2721 	cfs_b->timer_active = 1;
2722 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2723 }
2724 
2725 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2726 {
2727 	hrtimer_cancel(&cfs_b->period_timer);
2728 	hrtimer_cancel(&cfs_b->slack_timer);
2729 }
2730 
2731 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2732 {
2733 	struct cfs_rq *cfs_rq;
2734 
2735 	for_each_leaf_cfs_rq(rq, cfs_rq) {
2736 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2737 
2738 		if (!cfs_rq->runtime_enabled)
2739 			continue;
2740 
2741 		/*
2742 		 * clock_task is not advancing so we just need to make sure
2743 		 * there's some valid quota amount
2744 		 */
2745 		cfs_rq->runtime_remaining = cfs_b->quota;
2746 		if (cfs_rq_throttled(cfs_rq))
2747 			unthrottle_cfs_rq(cfs_rq);
2748 	}
2749 }
2750 
2751 #else /* CONFIG_CFS_BANDWIDTH */
2752 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2753 {
2754 	return rq_clock_task(rq_of(cfs_rq));
2755 }
2756 
2757 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2758 				     unsigned long delta_exec) {}
2759 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2760 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2761 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2762 
2763 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2764 {
2765 	return 0;
2766 }
2767 
2768 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2769 {
2770 	return 0;
2771 }
2772 
2773 static inline int throttled_lb_pair(struct task_group *tg,
2774 				    int src_cpu, int dest_cpu)
2775 {
2776 	return 0;
2777 }
2778 
2779 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2780 
2781 #ifdef CONFIG_FAIR_GROUP_SCHED
2782 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2783 #endif
2784 
2785 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2786 {
2787 	return NULL;
2788 }
2789 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2790 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2791 
2792 #endif /* CONFIG_CFS_BANDWIDTH */
2793 
2794 /**************************************************
2795  * CFS operations on tasks:
2796  */
2797 
2798 #ifdef CONFIG_SCHED_HRTICK
2799 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2800 {
2801 	struct sched_entity *se = &p->se;
2802 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2803 
2804 	WARN_ON(task_rq(p) != rq);
2805 
2806 	if (cfs_rq->nr_running > 1) {
2807 		u64 slice = sched_slice(cfs_rq, se);
2808 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2809 		s64 delta = slice - ran;
2810 
2811 		if (delta < 0) {
2812 			if (rq->curr == p)
2813 				resched_task(p);
2814 			return;
2815 		}
2816 
2817 		/*
2818 		 * Don't schedule slices shorter than 10000ns, that just
2819 		 * doesn't make sense. Rely on vruntime for fairness.
2820 		 */
2821 		if (rq->curr != p)
2822 			delta = max_t(s64, 10000LL, delta);
2823 
2824 		hrtick_start(rq, delta);
2825 	}
2826 }
2827 
2828 /*
2829  * called from enqueue/dequeue and updates the hrtick when the
2830  * current task is from our class and nr_running is low enough
2831  * to matter.
2832  */
2833 static void hrtick_update(struct rq *rq)
2834 {
2835 	struct task_struct *curr = rq->curr;
2836 
2837 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2838 		return;
2839 
2840 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2841 		hrtick_start_fair(rq, curr);
2842 }
2843 #else /* !CONFIG_SCHED_HRTICK */
2844 static inline void
2845 hrtick_start_fair(struct rq *rq, struct task_struct *p)
2846 {
2847 }
2848 
2849 static inline void hrtick_update(struct rq *rq)
2850 {
2851 }
2852 #endif
2853 
2854 /*
2855  * The enqueue_task method is called before nr_running is
2856  * increased. Here we update the fair scheduling stats and
2857  * then put the task into the rbtree:
2858  */
2859 static void
2860 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2861 {
2862 	struct cfs_rq *cfs_rq;
2863 	struct sched_entity *se = &p->se;
2864 
2865 	for_each_sched_entity(se) {
2866 		if (se->on_rq)
2867 			break;
2868 		cfs_rq = cfs_rq_of(se);
2869 		enqueue_entity(cfs_rq, se, flags);
2870 
2871 		/*
2872 		 * end evaluation on encountering a throttled cfs_rq
2873 		 *
2874 		 * note: in the case of encountering a throttled cfs_rq we will
2875 		 * post the final h_nr_running increment below.
2876 		*/
2877 		if (cfs_rq_throttled(cfs_rq))
2878 			break;
2879 		cfs_rq->h_nr_running++;
2880 
2881 		flags = ENQUEUE_WAKEUP;
2882 	}
2883 
2884 	for_each_sched_entity(se) {
2885 		cfs_rq = cfs_rq_of(se);
2886 		cfs_rq->h_nr_running++;
2887 
2888 		if (cfs_rq_throttled(cfs_rq))
2889 			break;
2890 
2891 		update_cfs_shares(cfs_rq);
2892 		update_entity_load_avg(se, 1);
2893 	}
2894 
2895 	if (!se) {
2896 		update_rq_runnable_avg(rq, rq->nr_running);
2897 		inc_nr_running(rq);
2898 	}
2899 	hrtick_update(rq);
2900 }
2901 
2902 static void set_next_buddy(struct sched_entity *se);
2903 
2904 /*
2905  * The dequeue_task method is called before nr_running is
2906  * decreased. We remove the task from the rbtree and
2907  * update the fair scheduling stats:
2908  */
2909 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2910 {
2911 	struct cfs_rq *cfs_rq;
2912 	struct sched_entity *se = &p->se;
2913 	int task_sleep = flags & DEQUEUE_SLEEP;
2914 
2915 	for_each_sched_entity(se) {
2916 		cfs_rq = cfs_rq_of(se);
2917 		dequeue_entity(cfs_rq, se, flags);
2918 
2919 		/*
2920 		 * end evaluation on encountering a throttled cfs_rq
2921 		 *
2922 		 * note: in the case of encountering a throttled cfs_rq we will
2923 		 * post the final h_nr_running decrement below.
2924 		*/
2925 		if (cfs_rq_throttled(cfs_rq))
2926 			break;
2927 		cfs_rq->h_nr_running--;
2928 
2929 		/* Don't dequeue parent if it has other entities besides us */
2930 		if (cfs_rq->load.weight) {
2931 			/*
2932 			 * Bias pick_next to pick a task from this cfs_rq, as
2933 			 * p is sleeping when it is within its sched_slice.
2934 			 */
2935 			if (task_sleep && parent_entity(se))
2936 				set_next_buddy(parent_entity(se));
2937 
2938 			/* avoid re-evaluating load for this entity */
2939 			se = parent_entity(se);
2940 			break;
2941 		}
2942 		flags |= DEQUEUE_SLEEP;
2943 	}
2944 
2945 	for_each_sched_entity(se) {
2946 		cfs_rq = cfs_rq_of(se);
2947 		cfs_rq->h_nr_running--;
2948 
2949 		if (cfs_rq_throttled(cfs_rq))
2950 			break;
2951 
2952 		update_cfs_shares(cfs_rq);
2953 		update_entity_load_avg(se, 1);
2954 	}
2955 
2956 	if (!se) {
2957 		dec_nr_running(rq);
2958 		update_rq_runnable_avg(rq, 1);
2959 	}
2960 	hrtick_update(rq);
2961 }
2962 
2963 #ifdef CONFIG_SMP
2964 /* Used instead of source_load when we know the type == 0 */
2965 static unsigned long weighted_cpuload(const int cpu)
2966 {
2967 	return cpu_rq(cpu)->cfs.runnable_load_avg;
2968 }
2969 
2970 /*
2971  * Return a low guess at the load of a migration-source cpu weighted
2972  * according to the scheduling class and "nice" value.
2973  *
2974  * We want to under-estimate the load of migration sources, to
2975  * balance conservatively.
2976  */
2977 static unsigned long source_load(int cpu, int type)
2978 {
2979 	struct rq *rq = cpu_rq(cpu);
2980 	unsigned long total = weighted_cpuload(cpu);
2981 
2982 	if (type == 0 || !sched_feat(LB_BIAS))
2983 		return total;
2984 
2985 	return min(rq->cpu_load[type-1], total);
2986 }
2987 
2988 /*
2989  * Return a high guess at the load of a migration-target cpu weighted
2990  * according to the scheduling class and "nice" value.
2991  */
2992 static unsigned long target_load(int cpu, int type)
2993 {
2994 	struct rq *rq = cpu_rq(cpu);
2995 	unsigned long total = weighted_cpuload(cpu);
2996 
2997 	if (type == 0 || !sched_feat(LB_BIAS))
2998 		return total;
2999 
3000 	return max(rq->cpu_load[type-1], total);
3001 }
3002 
3003 static unsigned long power_of(int cpu)
3004 {
3005 	return cpu_rq(cpu)->cpu_power;
3006 }
3007 
3008 static unsigned long cpu_avg_load_per_task(int cpu)
3009 {
3010 	struct rq *rq = cpu_rq(cpu);
3011 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
3012 	unsigned long load_avg = rq->cfs.runnable_load_avg;
3013 
3014 	if (nr_running)
3015 		return load_avg / nr_running;
3016 
3017 	return 0;
3018 }
3019 
3020 
3021 static void task_waking_fair(struct task_struct *p)
3022 {
3023 	struct sched_entity *se = &p->se;
3024 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3025 	u64 min_vruntime;
3026 
3027 #ifndef CONFIG_64BIT
3028 	u64 min_vruntime_copy;
3029 
3030 	do {
3031 		min_vruntime_copy = cfs_rq->min_vruntime_copy;
3032 		smp_rmb();
3033 		min_vruntime = cfs_rq->min_vruntime;
3034 	} while (min_vruntime != min_vruntime_copy);
3035 #else
3036 	min_vruntime = cfs_rq->min_vruntime;
3037 #endif
3038 
3039 	se->vruntime -= min_vruntime;
3040 }
3041 
3042 #ifdef CONFIG_FAIR_GROUP_SCHED
3043 /*
3044  * effective_load() calculates the load change as seen from the root_task_group
3045  *
3046  * Adding load to a group doesn't make a group heavier, but can cause movement
3047  * of group shares between cpus. Assuming the shares were perfectly aligned one
3048  * can calculate the shift in shares.
3049  *
3050  * Calculate the effective load difference if @wl is added (subtracted) to @tg
3051  * on this @cpu and results in a total addition (subtraction) of @wg to the
3052  * total group weight.
3053  *
3054  * Given a runqueue weight distribution (rw_i) we can compute a shares
3055  * distribution (s_i) using:
3056  *
3057  *   s_i = rw_i / \Sum rw_j						(1)
3058  *
3059  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3060  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3061  * shares distribution (s_i):
3062  *
3063  *   rw_i = {   2,   4,   1,   0 }
3064  *   s_i  = { 2/7, 4/7, 1/7,   0 }
3065  *
3066  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3067  * task used to run on and the CPU the waker is running on), we need to
3068  * compute the effect of waking a task on either CPU and, in case of a sync
3069  * wakeup, compute the effect of the current task going to sleep.
3070  *
3071  * So for a change of @wl to the local @cpu with an overall group weight change
3072  * of @wl we can compute the new shares distribution (s'_i) using:
3073  *
3074  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
3075  *
3076  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3077  * differences in waking a task to CPU 0. The additional task changes the
3078  * weight and shares distributions like:
3079  *
3080  *   rw'_i = {   3,   4,   1,   0 }
3081  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
3082  *
3083  * We can then compute the difference in effective weight by using:
3084  *
3085  *   dw_i = S * (s'_i - s_i)						(3)
3086  *
3087  * Where 'S' is the group weight as seen by its parent.
3088  *
3089  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3090  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3091  * 4/7) times the weight of the group.
3092  */
3093 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3094 {
3095 	struct sched_entity *se = tg->se[cpu];
3096 
3097 	if (!tg->parent)	/* the trivial, non-cgroup case */
3098 		return wl;
3099 
3100 	for_each_sched_entity(se) {
3101 		long w, W;
3102 
3103 		tg = se->my_q->tg;
3104 
3105 		/*
3106 		 * W = @wg + \Sum rw_j
3107 		 */
3108 		W = wg + calc_tg_weight(tg, se->my_q);
3109 
3110 		/*
3111 		 * w = rw_i + @wl
3112 		 */
3113 		w = se->my_q->load.weight + wl;
3114 
3115 		/*
3116 		 * wl = S * s'_i; see (2)
3117 		 */
3118 		if (W > 0 && w < W)
3119 			wl = (w * tg->shares) / W;
3120 		else
3121 			wl = tg->shares;
3122 
3123 		/*
3124 		 * Per the above, wl is the new se->load.weight value; since
3125 		 * those are clipped to [MIN_SHARES, ...) do so now. See
3126 		 * calc_cfs_shares().
3127 		 */
3128 		if (wl < MIN_SHARES)
3129 			wl = MIN_SHARES;
3130 
3131 		/*
3132 		 * wl = dw_i = S * (s'_i - s_i); see (3)
3133 		 */
3134 		wl -= se->load.weight;
3135 
3136 		/*
3137 		 * Recursively apply this logic to all parent groups to compute
3138 		 * the final effective load change on the root group. Since
3139 		 * only the @tg group gets extra weight, all parent groups can
3140 		 * only redistribute existing shares. @wl is the shift in shares
3141 		 * resulting from this level per the above.
3142 		 */
3143 		wg = 0;
3144 	}
3145 
3146 	return wl;
3147 }
3148 #else
3149 
3150 static inline unsigned long effective_load(struct task_group *tg, int cpu,
3151 		unsigned long wl, unsigned long wg)
3152 {
3153 	return wl;
3154 }
3155 
3156 #endif
3157 
3158 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3159 {
3160 	s64 this_load, load;
3161 	int idx, this_cpu, prev_cpu;
3162 	unsigned long tl_per_task;
3163 	struct task_group *tg;
3164 	unsigned long weight;
3165 	int balanced;
3166 
3167 	idx	  = sd->wake_idx;
3168 	this_cpu  = smp_processor_id();
3169 	prev_cpu  = task_cpu(p);
3170 	load	  = source_load(prev_cpu, idx);
3171 	this_load = target_load(this_cpu, idx);
3172 
3173 	/*
3174 	 * If sync wakeup then subtract the (maximum possible)
3175 	 * effect of the currently running task from the load
3176 	 * of the current CPU:
3177 	 */
3178 	if (sync) {
3179 		tg = task_group(current);
3180 		weight = current->se.load.weight;
3181 
3182 		this_load += effective_load(tg, this_cpu, -weight, -weight);
3183 		load += effective_load(tg, prev_cpu, 0, -weight);
3184 	}
3185 
3186 	tg = task_group(p);
3187 	weight = p->se.load.weight;
3188 
3189 	/*
3190 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
3191 	 * due to the sync cause above having dropped this_load to 0, we'll
3192 	 * always have an imbalance, but there's really nothing you can do
3193 	 * about that, so that's good too.
3194 	 *
3195 	 * Otherwise check if either cpus are near enough in load to allow this
3196 	 * task to be woken on this_cpu.
3197 	 */
3198 	if (this_load > 0) {
3199 		s64 this_eff_load, prev_eff_load;
3200 
3201 		this_eff_load = 100;
3202 		this_eff_load *= power_of(prev_cpu);
3203 		this_eff_load *= this_load +
3204 			effective_load(tg, this_cpu, weight, weight);
3205 
3206 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3207 		prev_eff_load *= power_of(this_cpu);
3208 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3209 
3210 		balanced = this_eff_load <= prev_eff_load;
3211 	} else
3212 		balanced = true;
3213 
3214 	/*
3215 	 * If the currently running task will sleep within
3216 	 * a reasonable amount of time then attract this newly
3217 	 * woken task:
3218 	 */
3219 	if (sync && balanced)
3220 		return 1;
3221 
3222 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
3223 	tl_per_task = cpu_avg_load_per_task(this_cpu);
3224 
3225 	if (balanced ||
3226 	    (this_load <= load &&
3227 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
3228 		/*
3229 		 * This domain has SD_WAKE_AFFINE and
3230 		 * p is cache cold in this domain, and
3231 		 * there is no bad imbalance.
3232 		 */
3233 		schedstat_inc(sd, ttwu_move_affine);
3234 		schedstat_inc(p, se.statistics.nr_wakeups_affine);
3235 
3236 		return 1;
3237 	}
3238 	return 0;
3239 }
3240 
3241 /*
3242  * find_idlest_group finds and returns the least busy CPU group within the
3243  * domain.
3244  */
3245 static struct sched_group *
3246 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
3247 		  int this_cpu, int load_idx)
3248 {
3249 	struct sched_group *idlest = NULL, *group = sd->groups;
3250 	unsigned long min_load = ULONG_MAX, this_load = 0;
3251 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
3252 
3253 	do {
3254 		unsigned long load, avg_load;
3255 		int local_group;
3256 		int i;
3257 
3258 		/* Skip over this group if it has no CPUs allowed */
3259 		if (!cpumask_intersects(sched_group_cpus(group),
3260 					tsk_cpus_allowed(p)))
3261 			continue;
3262 
3263 		local_group = cpumask_test_cpu(this_cpu,
3264 					       sched_group_cpus(group));
3265 
3266 		/* Tally up the load of all CPUs in the group */
3267 		avg_load = 0;
3268 
3269 		for_each_cpu(i, sched_group_cpus(group)) {
3270 			/* Bias balancing toward cpus of our domain */
3271 			if (local_group)
3272 				load = source_load(i, load_idx);
3273 			else
3274 				load = target_load(i, load_idx);
3275 
3276 			avg_load += load;
3277 		}
3278 
3279 		/* Adjust by relative CPU power of the group */
3280 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
3281 
3282 		if (local_group) {
3283 			this_load = avg_load;
3284 		} else if (avg_load < min_load) {
3285 			min_load = avg_load;
3286 			idlest = group;
3287 		}
3288 	} while (group = group->next, group != sd->groups);
3289 
3290 	if (!idlest || 100*this_load < imbalance*min_load)
3291 		return NULL;
3292 	return idlest;
3293 }
3294 
3295 /*
3296  * find_idlest_cpu - find the idlest cpu among the cpus in group.
3297  */
3298 static int
3299 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3300 {
3301 	unsigned long load, min_load = ULONG_MAX;
3302 	int idlest = -1;
3303 	int i;
3304 
3305 	/* Traverse only the allowed CPUs */
3306 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
3307 		load = weighted_cpuload(i);
3308 
3309 		if (load < min_load || (load == min_load && i == this_cpu)) {
3310 			min_load = load;
3311 			idlest = i;
3312 		}
3313 	}
3314 
3315 	return idlest;
3316 }
3317 
3318 /*
3319  * Try and locate an idle CPU in the sched_domain.
3320  */
3321 static int select_idle_sibling(struct task_struct *p, int target)
3322 {
3323 	struct sched_domain *sd;
3324 	struct sched_group *sg;
3325 	int i = task_cpu(p);
3326 
3327 	if (idle_cpu(target))
3328 		return target;
3329 
3330 	/*
3331 	 * If the prevous cpu is cache affine and idle, don't be stupid.
3332 	 */
3333 	if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3334 		return i;
3335 
3336 	/*
3337 	 * Otherwise, iterate the domains and find an elegible idle cpu.
3338 	 */
3339 	sd = rcu_dereference(per_cpu(sd_llc, target));
3340 	for_each_lower_domain(sd) {
3341 		sg = sd->groups;
3342 		do {
3343 			if (!cpumask_intersects(sched_group_cpus(sg),
3344 						tsk_cpus_allowed(p)))
3345 				goto next;
3346 
3347 			for_each_cpu(i, sched_group_cpus(sg)) {
3348 				if (i == target || !idle_cpu(i))
3349 					goto next;
3350 			}
3351 
3352 			target = cpumask_first_and(sched_group_cpus(sg),
3353 					tsk_cpus_allowed(p));
3354 			goto done;
3355 next:
3356 			sg = sg->next;
3357 		} while (sg != sd->groups);
3358 	}
3359 done:
3360 	return target;
3361 }
3362 
3363 /*
3364  * sched_balance_self: balance the current task (running on cpu) in domains
3365  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3366  * SD_BALANCE_EXEC.
3367  *
3368  * Balance, ie. select the least loaded group.
3369  *
3370  * Returns the target CPU number, or the same CPU if no balancing is needed.
3371  *
3372  * preempt must be disabled.
3373  */
3374 static int
3375 select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
3376 {
3377 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
3378 	int cpu = smp_processor_id();
3379 	int prev_cpu = task_cpu(p);
3380 	int new_cpu = cpu;
3381 	int want_affine = 0;
3382 	int sync = wake_flags & WF_SYNC;
3383 
3384 	if (p->nr_cpus_allowed == 1)
3385 		return prev_cpu;
3386 
3387 	if (sd_flag & SD_BALANCE_WAKE) {
3388 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
3389 			want_affine = 1;
3390 		new_cpu = prev_cpu;
3391 	}
3392 
3393 	rcu_read_lock();
3394 	for_each_domain(cpu, tmp) {
3395 		if (!(tmp->flags & SD_LOAD_BALANCE))
3396 			continue;
3397 
3398 		/*
3399 		 * If both cpu and prev_cpu are part of this domain,
3400 		 * cpu is a valid SD_WAKE_AFFINE target.
3401 		 */
3402 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3403 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3404 			affine_sd = tmp;
3405 			break;
3406 		}
3407 
3408 		if (tmp->flags & sd_flag)
3409 			sd = tmp;
3410 	}
3411 
3412 	if (affine_sd) {
3413 		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
3414 			prev_cpu = cpu;
3415 
3416 		new_cpu = select_idle_sibling(p, prev_cpu);
3417 		goto unlock;
3418 	}
3419 
3420 	while (sd) {
3421 		int load_idx = sd->forkexec_idx;
3422 		struct sched_group *group;
3423 		int weight;
3424 
3425 		if (!(sd->flags & sd_flag)) {
3426 			sd = sd->child;
3427 			continue;
3428 		}
3429 
3430 		if (sd_flag & SD_BALANCE_WAKE)
3431 			load_idx = sd->wake_idx;
3432 
3433 		group = find_idlest_group(sd, p, cpu, load_idx);
3434 		if (!group) {
3435 			sd = sd->child;
3436 			continue;
3437 		}
3438 
3439 		new_cpu = find_idlest_cpu(group, p, cpu);
3440 		if (new_cpu == -1 || new_cpu == cpu) {
3441 			/* Now try balancing at a lower domain level of cpu */
3442 			sd = sd->child;
3443 			continue;
3444 		}
3445 
3446 		/* Now try balancing at a lower domain level of new_cpu */
3447 		cpu = new_cpu;
3448 		weight = sd->span_weight;
3449 		sd = NULL;
3450 		for_each_domain(cpu, tmp) {
3451 			if (weight <= tmp->span_weight)
3452 				break;
3453 			if (tmp->flags & sd_flag)
3454 				sd = tmp;
3455 		}
3456 		/* while loop will break here if sd == NULL */
3457 	}
3458 unlock:
3459 	rcu_read_unlock();
3460 
3461 	return new_cpu;
3462 }
3463 
3464 /*
3465  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3466  * cfs_rq_of(p) references at time of call are still valid and identify the
3467  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
3468  * other assumptions, including the state of rq->lock, should be made.
3469  */
3470 static void
3471 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3472 {
3473 	struct sched_entity *se = &p->se;
3474 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3475 
3476 	/*
3477 	 * Load tracking: accumulate removed load so that it can be processed
3478 	 * when we next update owning cfs_rq under rq->lock.  Tasks contribute
3479 	 * to blocked load iff they have a positive decay-count.  It can never
3480 	 * be negative here since on-rq tasks have decay-count == 0.
3481 	 */
3482 	if (se->avg.decay_count) {
3483 		se->avg.decay_count = -__synchronize_entity_decay(se);
3484 		atomic_long_add(se->avg.load_avg_contrib,
3485 						&cfs_rq->removed_load);
3486 	}
3487 }
3488 #endif /* CONFIG_SMP */
3489 
3490 static unsigned long
3491 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
3492 {
3493 	unsigned long gran = sysctl_sched_wakeup_granularity;
3494 
3495 	/*
3496 	 * Since its curr running now, convert the gran from real-time
3497 	 * to virtual-time in his units.
3498 	 *
3499 	 * By using 'se' instead of 'curr' we penalize light tasks, so
3500 	 * they get preempted easier. That is, if 'se' < 'curr' then
3501 	 * the resulting gran will be larger, therefore penalizing the
3502 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3503 	 * be smaller, again penalizing the lighter task.
3504 	 *
3505 	 * This is especially important for buddies when the leftmost
3506 	 * task is higher priority than the buddy.
3507 	 */
3508 	return calc_delta_fair(gran, se);
3509 }
3510 
3511 /*
3512  * Should 'se' preempt 'curr'.
3513  *
3514  *             |s1
3515  *        |s2
3516  *   |s3
3517  *         g
3518  *      |<--->|c
3519  *
3520  *  w(c, s1) = -1
3521  *  w(c, s2) =  0
3522  *  w(c, s3) =  1
3523  *
3524  */
3525 static int
3526 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3527 {
3528 	s64 gran, vdiff = curr->vruntime - se->vruntime;
3529 
3530 	if (vdiff <= 0)
3531 		return -1;
3532 
3533 	gran = wakeup_gran(curr, se);
3534 	if (vdiff > gran)
3535 		return 1;
3536 
3537 	return 0;
3538 }
3539 
3540 static void set_last_buddy(struct sched_entity *se)
3541 {
3542 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3543 		return;
3544 
3545 	for_each_sched_entity(se)
3546 		cfs_rq_of(se)->last = se;
3547 }
3548 
3549 static void set_next_buddy(struct sched_entity *se)
3550 {
3551 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3552 		return;
3553 
3554 	for_each_sched_entity(se)
3555 		cfs_rq_of(se)->next = se;
3556 }
3557 
3558 static void set_skip_buddy(struct sched_entity *se)
3559 {
3560 	for_each_sched_entity(se)
3561 		cfs_rq_of(se)->skip = se;
3562 }
3563 
3564 /*
3565  * Preempt the current task with a newly woken task if needed:
3566  */
3567 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
3568 {
3569 	struct task_struct *curr = rq->curr;
3570 	struct sched_entity *se = &curr->se, *pse = &p->se;
3571 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3572 	int scale = cfs_rq->nr_running >= sched_nr_latency;
3573 	int next_buddy_marked = 0;
3574 
3575 	if (unlikely(se == pse))
3576 		return;
3577 
3578 	/*
3579 	 * This is possible from callers such as move_task(), in which we
3580 	 * unconditionally check_prempt_curr() after an enqueue (which may have
3581 	 * lead to a throttle).  This both saves work and prevents false
3582 	 * next-buddy nomination below.
3583 	 */
3584 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3585 		return;
3586 
3587 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3588 		set_next_buddy(pse);
3589 		next_buddy_marked = 1;
3590 	}
3591 
3592 	/*
3593 	 * We can come here with TIF_NEED_RESCHED already set from new task
3594 	 * wake up path.
3595 	 *
3596 	 * Note: this also catches the edge-case of curr being in a throttled
3597 	 * group (e.g. via set_curr_task), since update_curr() (in the
3598 	 * enqueue of curr) will have resulted in resched being set.  This
3599 	 * prevents us from potentially nominating it as a false LAST_BUDDY
3600 	 * below.
3601 	 */
3602 	if (test_tsk_need_resched(curr))
3603 		return;
3604 
3605 	/* Idle tasks are by definition preempted by non-idle tasks. */
3606 	if (unlikely(curr->policy == SCHED_IDLE) &&
3607 	    likely(p->policy != SCHED_IDLE))
3608 		goto preempt;
3609 
3610 	/*
3611 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3612 	 * is driven by the tick):
3613 	 */
3614 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
3615 		return;
3616 
3617 	find_matching_se(&se, &pse);
3618 	update_curr(cfs_rq_of(se));
3619 	BUG_ON(!pse);
3620 	if (wakeup_preempt_entity(se, pse) == 1) {
3621 		/*
3622 		 * Bias pick_next to pick the sched entity that is
3623 		 * triggering this preemption.
3624 		 */
3625 		if (!next_buddy_marked)
3626 			set_next_buddy(pse);
3627 		goto preempt;
3628 	}
3629 
3630 	return;
3631 
3632 preempt:
3633 	resched_task(curr);
3634 	/*
3635 	 * Only set the backward buddy when the current task is still
3636 	 * on the rq. This can happen when a wakeup gets interleaved
3637 	 * with schedule on the ->pre_schedule() or idle_balance()
3638 	 * point, either of which can * drop the rq lock.
3639 	 *
3640 	 * Also, during early boot the idle thread is in the fair class,
3641 	 * for obvious reasons its a bad idea to schedule back to it.
3642 	 */
3643 	if (unlikely(!se->on_rq || curr == rq->idle))
3644 		return;
3645 
3646 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3647 		set_last_buddy(se);
3648 }
3649 
3650 static struct task_struct *pick_next_task_fair(struct rq *rq)
3651 {
3652 	struct task_struct *p;
3653 	struct cfs_rq *cfs_rq = &rq->cfs;
3654 	struct sched_entity *se;
3655 
3656 	if (!cfs_rq->nr_running)
3657 		return NULL;
3658 
3659 	do {
3660 		se = pick_next_entity(cfs_rq);
3661 		set_next_entity(cfs_rq, se);
3662 		cfs_rq = group_cfs_rq(se);
3663 	} while (cfs_rq);
3664 
3665 	p = task_of(se);
3666 	if (hrtick_enabled(rq))
3667 		hrtick_start_fair(rq, p);
3668 
3669 	return p;
3670 }
3671 
3672 /*
3673  * Account for a descheduled task:
3674  */
3675 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3676 {
3677 	struct sched_entity *se = &prev->se;
3678 	struct cfs_rq *cfs_rq;
3679 
3680 	for_each_sched_entity(se) {
3681 		cfs_rq = cfs_rq_of(se);
3682 		put_prev_entity(cfs_rq, se);
3683 	}
3684 }
3685 
3686 /*
3687  * sched_yield() is very simple
3688  *
3689  * The magic of dealing with the ->skip buddy is in pick_next_entity.
3690  */
3691 static void yield_task_fair(struct rq *rq)
3692 {
3693 	struct task_struct *curr = rq->curr;
3694 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3695 	struct sched_entity *se = &curr->se;
3696 
3697 	/*
3698 	 * Are we the only task in the tree?
3699 	 */
3700 	if (unlikely(rq->nr_running == 1))
3701 		return;
3702 
3703 	clear_buddies(cfs_rq, se);
3704 
3705 	if (curr->policy != SCHED_BATCH) {
3706 		update_rq_clock(rq);
3707 		/*
3708 		 * Update run-time statistics of the 'current'.
3709 		 */
3710 		update_curr(cfs_rq);
3711 		/*
3712 		 * Tell update_rq_clock() that we've just updated,
3713 		 * so we don't do microscopic update in schedule()
3714 		 * and double the fastpath cost.
3715 		 */
3716 		 rq->skip_clock_update = 1;
3717 	}
3718 
3719 	set_skip_buddy(se);
3720 }
3721 
3722 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3723 {
3724 	struct sched_entity *se = &p->se;
3725 
3726 	/* throttled hierarchies are not runnable */
3727 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3728 		return false;
3729 
3730 	/* Tell the scheduler that we'd really like pse to run next. */
3731 	set_next_buddy(se);
3732 
3733 	yield_task_fair(rq);
3734 
3735 	return true;
3736 }
3737 
3738 #ifdef CONFIG_SMP
3739 /**************************************************
3740  * Fair scheduling class load-balancing methods.
3741  *
3742  * BASICS
3743  *
3744  * The purpose of load-balancing is to achieve the same basic fairness the
3745  * per-cpu scheduler provides, namely provide a proportional amount of compute
3746  * time to each task. This is expressed in the following equation:
3747  *
3748  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
3749  *
3750  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3751  * W_i,0 is defined as:
3752  *
3753  *   W_i,0 = \Sum_j w_i,j                                             (2)
3754  *
3755  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3756  * is derived from the nice value as per prio_to_weight[].
3757  *
3758  * The weight average is an exponential decay average of the instantaneous
3759  * weight:
3760  *
3761  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
3762  *
3763  * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3764  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3765  * can also include other factors [XXX].
3766  *
3767  * To achieve this balance we define a measure of imbalance which follows
3768  * directly from (1):
3769  *
3770  *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
3771  *
3772  * We them move tasks around to minimize the imbalance. In the continuous
3773  * function space it is obvious this converges, in the discrete case we get
3774  * a few fun cases generally called infeasible weight scenarios.
3775  *
3776  * [XXX expand on:
3777  *     - infeasible weights;
3778  *     - local vs global optima in the discrete case. ]
3779  *
3780  *
3781  * SCHED DOMAINS
3782  *
3783  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3784  * for all i,j solution, we create a tree of cpus that follows the hardware
3785  * topology where each level pairs two lower groups (or better). This results
3786  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3787  * tree to only the first of the previous level and we decrease the frequency
3788  * of load-balance at each level inv. proportional to the number of cpus in
3789  * the groups.
3790  *
3791  * This yields:
3792  *
3793  *     log_2 n     1     n
3794  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
3795  *     i = 0      2^i   2^i
3796  *                               `- size of each group
3797  *         |         |     `- number of cpus doing load-balance
3798  *         |         `- freq
3799  *         `- sum over all levels
3800  *
3801  * Coupled with a limit on how many tasks we can migrate every balance pass,
3802  * this makes (5) the runtime complexity of the balancer.
3803  *
3804  * An important property here is that each CPU is still (indirectly) connected
3805  * to every other cpu in at most O(log n) steps:
3806  *
3807  * The adjacency matrix of the resulting graph is given by:
3808  *
3809  *             log_2 n
3810  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
3811  *             k = 0
3812  *
3813  * And you'll find that:
3814  *
3815  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
3816  *
3817  * Showing there's indeed a path between every cpu in at most O(log n) steps.
3818  * The task movement gives a factor of O(m), giving a convergence complexity
3819  * of:
3820  *
3821  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
3822  *
3823  *
3824  * WORK CONSERVING
3825  *
3826  * In order to avoid CPUs going idle while there's still work to do, new idle
3827  * balancing is more aggressive and has the newly idle cpu iterate up the domain
3828  * tree itself instead of relying on other CPUs to bring it work.
3829  *
3830  * This adds some complexity to both (5) and (8) but it reduces the total idle
3831  * time.
3832  *
3833  * [XXX more?]
3834  *
3835  *
3836  * CGROUPS
3837  *
3838  * Cgroups make a horror show out of (2), instead of a simple sum we get:
3839  *
3840  *                                s_k,i
3841  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
3842  *                                 S_k
3843  *
3844  * Where
3845  *
3846  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
3847  *
3848  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3849  *
3850  * The big problem is S_k, its a global sum needed to compute a local (W_i)
3851  * property.
3852  *
3853  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3854  *      rewrite all of this once again.]
3855  */
3856 
3857 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3858 
3859 #define LBF_ALL_PINNED	0x01
3860 #define LBF_NEED_BREAK	0x02
3861 #define LBF_SOME_PINNED 0x04
3862 
3863 struct lb_env {
3864 	struct sched_domain	*sd;
3865 
3866 	struct rq		*src_rq;
3867 	int			src_cpu;
3868 
3869 	int			dst_cpu;
3870 	struct rq		*dst_rq;
3871 
3872 	struct cpumask		*dst_grpmask;
3873 	int			new_dst_cpu;
3874 	enum cpu_idle_type	idle;
3875 	long			imbalance;
3876 	/* The set of CPUs under consideration for load-balancing */
3877 	struct cpumask		*cpus;
3878 
3879 	unsigned int		flags;
3880 
3881 	unsigned int		loop;
3882 	unsigned int		loop_break;
3883 	unsigned int		loop_max;
3884 };
3885 
3886 /*
3887  * move_task - move a task from one runqueue to another runqueue.
3888  * Both runqueues must be locked.
3889  */
3890 static void move_task(struct task_struct *p, struct lb_env *env)
3891 {
3892 	deactivate_task(env->src_rq, p, 0);
3893 	set_task_cpu(p, env->dst_cpu);
3894 	activate_task(env->dst_rq, p, 0);
3895 	check_preempt_curr(env->dst_rq, p, 0);
3896 }
3897 
3898 /*
3899  * Is this task likely cache-hot:
3900  */
3901 static int
3902 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3903 {
3904 	s64 delta;
3905 
3906 	if (p->sched_class != &fair_sched_class)
3907 		return 0;
3908 
3909 	if (unlikely(p->policy == SCHED_IDLE))
3910 		return 0;
3911 
3912 	/*
3913 	 * Buddy candidates are cache hot:
3914 	 */
3915 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3916 			(&p->se == cfs_rq_of(&p->se)->next ||
3917 			 &p->se == cfs_rq_of(&p->se)->last))
3918 		return 1;
3919 
3920 	if (sysctl_sched_migration_cost == -1)
3921 		return 1;
3922 	if (sysctl_sched_migration_cost == 0)
3923 		return 0;
3924 
3925 	delta = now - p->se.exec_start;
3926 
3927 	return delta < (s64)sysctl_sched_migration_cost;
3928 }
3929 
3930 /*
3931  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3932  */
3933 static
3934 int can_migrate_task(struct task_struct *p, struct lb_env *env)
3935 {
3936 	int tsk_cache_hot = 0;
3937 	/*
3938 	 * We do not migrate tasks that are:
3939 	 * 1) throttled_lb_pair, or
3940 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3941 	 * 3) running (obviously), or
3942 	 * 4) are cache-hot on their current CPU.
3943 	 */
3944 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3945 		return 0;
3946 
3947 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3948 		int cpu;
3949 
3950 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3951 
3952 		/*
3953 		 * Remember if this task can be migrated to any other cpu in
3954 		 * our sched_group. We may want to revisit it if we couldn't
3955 		 * meet load balance goals by pulling other tasks on src_cpu.
3956 		 *
3957 		 * Also avoid computing new_dst_cpu if we have already computed
3958 		 * one in current iteration.
3959 		 */
3960 		if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3961 			return 0;
3962 
3963 		/* Prevent to re-select dst_cpu via env's cpus */
3964 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3965 			if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3966 				env->flags |= LBF_SOME_PINNED;
3967 				env->new_dst_cpu = cpu;
3968 				break;
3969 			}
3970 		}
3971 
3972 		return 0;
3973 	}
3974 
3975 	/* Record that we found atleast one task that could run on dst_cpu */
3976 	env->flags &= ~LBF_ALL_PINNED;
3977 
3978 	if (task_running(env->src_rq, p)) {
3979 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3980 		return 0;
3981 	}
3982 
3983 	/*
3984 	 * Aggressive migration if:
3985 	 * 1) task is cache cold, or
3986 	 * 2) too many balance attempts have failed.
3987 	 */
3988 
3989 	tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
3990 	if (!tsk_cache_hot ||
3991 		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3992 
3993 		if (tsk_cache_hot) {
3994 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3995 			schedstat_inc(p, se.statistics.nr_forced_migrations);
3996 		}
3997 
3998 		return 1;
3999 	}
4000 
4001 	schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4002 	return 0;
4003 }
4004 
4005 /*
4006  * move_one_task tries to move exactly one task from busiest to this_rq, as
4007  * part of active balancing operations within "domain".
4008  * Returns 1 if successful and 0 otherwise.
4009  *
4010  * Called with both runqueues locked.
4011  */
4012 static int move_one_task(struct lb_env *env)
4013 {
4014 	struct task_struct *p, *n;
4015 
4016 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
4017 		if (!can_migrate_task(p, env))
4018 			continue;
4019 
4020 		move_task(p, env);
4021 		/*
4022 		 * Right now, this is only the second place move_task()
4023 		 * is called, so we can safely collect move_task()
4024 		 * stats here rather than inside move_task().
4025 		 */
4026 		schedstat_inc(env->sd, lb_gained[env->idle]);
4027 		return 1;
4028 	}
4029 	return 0;
4030 }
4031 
4032 static unsigned long task_h_load(struct task_struct *p);
4033 
4034 static const unsigned int sched_nr_migrate_break = 32;
4035 
4036 /*
4037  * move_tasks tries to move up to imbalance weighted load from busiest to
4038  * this_rq, as part of a balancing operation within domain "sd".
4039  * Returns 1 if successful and 0 otherwise.
4040  *
4041  * Called with both runqueues locked.
4042  */
4043 static int move_tasks(struct lb_env *env)
4044 {
4045 	struct list_head *tasks = &env->src_rq->cfs_tasks;
4046 	struct task_struct *p;
4047 	unsigned long load;
4048 	int pulled = 0;
4049 
4050 	if (env->imbalance <= 0)
4051 		return 0;
4052 
4053 	while (!list_empty(tasks)) {
4054 		p = list_first_entry(tasks, struct task_struct, se.group_node);
4055 
4056 		env->loop++;
4057 		/* We've more or less seen every task there is, call it quits */
4058 		if (env->loop > env->loop_max)
4059 			break;
4060 
4061 		/* take a breather every nr_migrate tasks */
4062 		if (env->loop > env->loop_break) {
4063 			env->loop_break += sched_nr_migrate_break;
4064 			env->flags |= LBF_NEED_BREAK;
4065 			break;
4066 		}
4067 
4068 		if (!can_migrate_task(p, env))
4069 			goto next;
4070 
4071 		load = task_h_load(p);
4072 
4073 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
4074 			goto next;
4075 
4076 		if ((load / 2) > env->imbalance)
4077 			goto next;
4078 
4079 		move_task(p, env);
4080 		pulled++;
4081 		env->imbalance -= load;
4082 
4083 #ifdef CONFIG_PREEMPT
4084 		/*
4085 		 * NEWIDLE balancing is a source of latency, so preemptible
4086 		 * kernels will stop after the first task is pulled to minimize
4087 		 * the critical section.
4088 		 */
4089 		if (env->idle == CPU_NEWLY_IDLE)
4090 			break;
4091 #endif
4092 
4093 		/*
4094 		 * We only want to steal up to the prescribed amount of
4095 		 * weighted load.
4096 		 */
4097 		if (env->imbalance <= 0)
4098 			break;
4099 
4100 		continue;
4101 next:
4102 		list_move_tail(&p->se.group_node, tasks);
4103 	}
4104 
4105 	/*
4106 	 * Right now, this is one of only two places move_task() is called,
4107 	 * so we can safely collect move_task() stats here rather than
4108 	 * inside move_task().
4109 	 */
4110 	schedstat_add(env->sd, lb_gained[env->idle], pulled);
4111 
4112 	return pulled;
4113 }
4114 
4115 #ifdef CONFIG_FAIR_GROUP_SCHED
4116 /*
4117  * update tg->load_weight by folding this cpu's load_avg
4118  */
4119 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
4120 {
4121 	struct sched_entity *se = tg->se[cpu];
4122 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
4123 
4124 	/* throttled entities do not contribute to load */
4125 	if (throttled_hierarchy(cfs_rq))
4126 		return;
4127 
4128 	update_cfs_rq_blocked_load(cfs_rq, 1);
4129 
4130 	if (se) {
4131 		update_entity_load_avg(se, 1);
4132 		/*
4133 		 * We pivot on our runnable average having decayed to zero for
4134 		 * list removal.  This generally implies that all our children
4135 		 * have also been removed (modulo rounding error or bandwidth
4136 		 * control); however, such cases are rare and we can fix these
4137 		 * at enqueue.
4138 		 *
4139 		 * TODO: fix up out-of-order children on enqueue.
4140 		 */
4141 		if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4142 			list_del_leaf_cfs_rq(cfs_rq);
4143 	} else {
4144 		struct rq *rq = rq_of(cfs_rq);
4145 		update_rq_runnable_avg(rq, rq->nr_running);
4146 	}
4147 }
4148 
4149 static void update_blocked_averages(int cpu)
4150 {
4151 	struct rq *rq = cpu_rq(cpu);
4152 	struct cfs_rq *cfs_rq;
4153 	unsigned long flags;
4154 
4155 	raw_spin_lock_irqsave(&rq->lock, flags);
4156 	update_rq_clock(rq);
4157 	/*
4158 	 * Iterates the task_group tree in a bottom up fashion, see
4159 	 * list_add_leaf_cfs_rq() for details.
4160 	 */
4161 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4162 		/*
4163 		 * Note: We may want to consider periodically releasing
4164 		 * rq->lock about these updates so that creating many task
4165 		 * groups does not result in continually extending hold time.
4166 		 */
4167 		__update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
4168 	}
4169 
4170 	raw_spin_unlock_irqrestore(&rq->lock, flags);
4171 }
4172 
4173 /*
4174  * Compute the cpu's hierarchical load factor for each task group.
4175  * This needs to be done in a top-down fashion because the load of a child
4176  * group is a fraction of its parents load.
4177  */
4178 static int tg_load_down(struct task_group *tg, void *data)
4179 {
4180 	unsigned long load;
4181 	long cpu = (long)data;
4182 
4183 	if (!tg->parent) {
4184 		load = cpu_rq(cpu)->avg.load_avg_contrib;
4185 	} else {
4186 		load = tg->parent->cfs_rq[cpu]->h_load;
4187 		load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
4188 				tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
4189 	}
4190 
4191 	tg->cfs_rq[cpu]->h_load = load;
4192 
4193 	return 0;
4194 }
4195 
4196 static void update_h_load(long cpu)
4197 {
4198 	struct rq *rq = cpu_rq(cpu);
4199 	unsigned long now = jiffies;
4200 
4201 	if (rq->h_load_throttle == now)
4202 		return;
4203 
4204 	rq->h_load_throttle = now;
4205 
4206 	rcu_read_lock();
4207 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
4208 	rcu_read_unlock();
4209 }
4210 
4211 static unsigned long task_h_load(struct task_struct *p)
4212 {
4213 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
4214 
4215 	return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4216 			cfs_rq->runnable_load_avg + 1);
4217 }
4218 #else
4219 static inline void update_blocked_averages(int cpu)
4220 {
4221 }
4222 
4223 static inline void update_h_load(long cpu)
4224 {
4225 }
4226 
4227 static unsigned long task_h_load(struct task_struct *p)
4228 {
4229 	return p->se.avg.load_avg_contrib;
4230 }
4231 #endif
4232 
4233 /********** Helpers for find_busiest_group ************************/
4234 /*
4235  * sd_lb_stats - Structure to store the statistics of a sched_domain
4236  * 		during load balancing.
4237  */
4238 struct sd_lb_stats {
4239 	struct sched_group *busiest; /* Busiest group in this sd */
4240 	struct sched_group *this;  /* Local group in this sd */
4241 	unsigned long total_load;  /* Total load of all groups in sd */
4242 	unsigned long total_pwr;   /*	Total power of all groups in sd */
4243 	unsigned long avg_load;	   /* Average load across all groups in sd */
4244 
4245 	/** Statistics of this group */
4246 	unsigned long this_load;
4247 	unsigned long this_load_per_task;
4248 	unsigned long this_nr_running;
4249 	unsigned long this_has_capacity;
4250 	unsigned int  this_idle_cpus;
4251 
4252 	/* Statistics of the busiest group */
4253 	unsigned int  busiest_idle_cpus;
4254 	unsigned long max_load;
4255 	unsigned long busiest_load_per_task;
4256 	unsigned long busiest_nr_running;
4257 	unsigned long busiest_group_capacity;
4258 	unsigned long busiest_has_capacity;
4259 	unsigned int  busiest_group_weight;
4260 
4261 	int group_imb; /* Is there imbalance in this sd */
4262 };
4263 
4264 /*
4265  * sg_lb_stats - stats of a sched_group required for load_balancing
4266  */
4267 struct sg_lb_stats {
4268 	unsigned long avg_load; /*Avg load across the CPUs of the group */
4269 	unsigned long group_load; /* Total load over the CPUs of the group */
4270 	unsigned long sum_nr_running; /* Nr tasks running in the group */
4271 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4272 	unsigned long group_capacity;
4273 	unsigned long idle_cpus;
4274 	unsigned long group_weight;
4275 	int group_imb; /* Is there an imbalance in the group ? */
4276 	int group_has_capacity; /* Is there extra capacity in the group? */
4277 };
4278 
4279 /**
4280  * get_sd_load_idx - Obtain the load index for a given sched domain.
4281  * @sd: The sched_domain whose load_idx is to be obtained.
4282  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4283  */
4284 static inline int get_sd_load_idx(struct sched_domain *sd,
4285 					enum cpu_idle_type idle)
4286 {
4287 	int load_idx;
4288 
4289 	switch (idle) {
4290 	case CPU_NOT_IDLE:
4291 		load_idx = sd->busy_idx;
4292 		break;
4293 
4294 	case CPU_NEWLY_IDLE:
4295 		load_idx = sd->newidle_idx;
4296 		break;
4297 	default:
4298 		load_idx = sd->idle_idx;
4299 		break;
4300 	}
4301 
4302 	return load_idx;
4303 }
4304 
4305 static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
4306 {
4307 	return SCHED_POWER_SCALE;
4308 }
4309 
4310 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4311 {
4312 	return default_scale_freq_power(sd, cpu);
4313 }
4314 
4315 static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
4316 {
4317 	unsigned long weight = sd->span_weight;
4318 	unsigned long smt_gain = sd->smt_gain;
4319 
4320 	smt_gain /= weight;
4321 
4322 	return smt_gain;
4323 }
4324 
4325 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4326 {
4327 	return default_scale_smt_power(sd, cpu);
4328 }
4329 
4330 static unsigned long scale_rt_power(int cpu)
4331 {
4332 	struct rq *rq = cpu_rq(cpu);
4333 	u64 total, available, age_stamp, avg;
4334 
4335 	/*
4336 	 * Since we're reading these variables without serialization make sure
4337 	 * we read them once before doing sanity checks on them.
4338 	 */
4339 	age_stamp = ACCESS_ONCE(rq->age_stamp);
4340 	avg = ACCESS_ONCE(rq->rt_avg);
4341 
4342 	total = sched_avg_period() + (rq_clock(rq) - age_stamp);
4343 
4344 	if (unlikely(total < avg)) {
4345 		/* Ensures that power won't end up being negative */
4346 		available = 0;
4347 	} else {
4348 		available = total - avg;
4349 	}
4350 
4351 	if (unlikely((s64)total < SCHED_POWER_SCALE))
4352 		total = SCHED_POWER_SCALE;
4353 
4354 	total >>= SCHED_POWER_SHIFT;
4355 
4356 	return div_u64(available, total);
4357 }
4358 
4359 static void update_cpu_power(struct sched_domain *sd, int cpu)
4360 {
4361 	unsigned long weight = sd->span_weight;
4362 	unsigned long power = SCHED_POWER_SCALE;
4363 	struct sched_group *sdg = sd->groups;
4364 
4365 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4366 		if (sched_feat(ARCH_POWER))
4367 			power *= arch_scale_smt_power(sd, cpu);
4368 		else
4369 			power *= default_scale_smt_power(sd, cpu);
4370 
4371 		power >>= SCHED_POWER_SHIFT;
4372 	}
4373 
4374 	sdg->sgp->power_orig = power;
4375 
4376 	if (sched_feat(ARCH_POWER))
4377 		power *= arch_scale_freq_power(sd, cpu);
4378 	else
4379 		power *= default_scale_freq_power(sd, cpu);
4380 
4381 	power >>= SCHED_POWER_SHIFT;
4382 
4383 	power *= scale_rt_power(cpu);
4384 	power >>= SCHED_POWER_SHIFT;
4385 
4386 	if (!power)
4387 		power = 1;
4388 
4389 	cpu_rq(cpu)->cpu_power = power;
4390 	sdg->sgp->power = power;
4391 }
4392 
4393 void update_group_power(struct sched_domain *sd, int cpu)
4394 {
4395 	struct sched_domain *child = sd->child;
4396 	struct sched_group *group, *sdg = sd->groups;
4397 	unsigned long power;
4398 	unsigned long interval;
4399 
4400 	interval = msecs_to_jiffies(sd->balance_interval);
4401 	interval = clamp(interval, 1UL, max_load_balance_interval);
4402 	sdg->sgp->next_update = jiffies + interval;
4403 
4404 	if (!child) {
4405 		update_cpu_power(sd, cpu);
4406 		return;
4407 	}
4408 
4409 	power = 0;
4410 
4411 	if (child->flags & SD_OVERLAP) {
4412 		/*
4413 		 * SD_OVERLAP domains cannot assume that child groups
4414 		 * span the current group.
4415 		 */
4416 
4417 		for_each_cpu(cpu, sched_group_cpus(sdg))
4418 			power += power_of(cpu);
4419 	} else  {
4420 		/*
4421 		 * !SD_OVERLAP domains can assume that child groups
4422 		 * span the current group.
4423 		 */
4424 
4425 		group = child->groups;
4426 		do {
4427 			power += group->sgp->power;
4428 			group = group->next;
4429 		} while (group != child->groups);
4430 	}
4431 
4432 	sdg->sgp->power_orig = sdg->sgp->power = power;
4433 }
4434 
4435 /*
4436  * Try and fix up capacity for tiny siblings, this is needed when
4437  * things like SD_ASYM_PACKING need f_b_g to select another sibling
4438  * which on its own isn't powerful enough.
4439  *
4440  * See update_sd_pick_busiest() and check_asym_packing().
4441  */
4442 static inline int
4443 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4444 {
4445 	/*
4446 	 * Only siblings can have significantly less than SCHED_POWER_SCALE
4447 	 */
4448 	if (!(sd->flags & SD_SHARE_CPUPOWER))
4449 		return 0;
4450 
4451 	/*
4452 	 * If ~90% of the cpu_power is still there, we're good.
4453 	 */
4454 	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
4455 		return 1;
4456 
4457 	return 0;
4458 }
4459 
4460 /**
4461  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
4462  * @env: The load balancing environment.
4463  * @group: sched_group whose statistics are to be updated.
4464  * @load_idx: Load index of sched_domain of this_cpu for load calc.
4465  * @local_group: Does group contain this_cpu.
4466  * @balance: Should we balance.
4467  * @sgs: variable to hold the statistics for this group.
4468  */
4469 static inline void update_sg_lb_stats(struct lb_env *env,
4470 			struct sched_group *group, int load_idx,
4471 			int local_group, int *balance, struct sg_lb_stats *sgs)
4472 {
4473 	unsigned long nr_running, max_nr_running, min_nr_running;
4474 	unsigned long load, max_cpu_load, min_cpu_load;
4475 	unsigned int balance_cpu = -1, first_idle_cpu = 0;
4476 	unsigned long avg_load_per_task = 0;
4477 	int i;
4478 
4479 	if (local_group)
4480 		balance_cpu = group_balance_cpu(group);
4481 
4482 	/* Tally up the load of all CPUs in the group */
4483 	max_cpu_load = 0;
4484 	min_cpu_load = ~0UL;
4485 	max_nr_running = 0;
4486 	min_nr_running = ~0UL;
4487 
4488 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
4489 		struct rq *rq = cpu_rq(i);
4490 
4491 		nr_running = rq->nr_running;
4492 
4493 		/* Bias balancing toward cpus of our domain */
4494 		if (local_group) {
4495 			if (idle_cpu(i) && !first_idle_cpu &&
4496 					cpumask_test_cpu(i, sched_group_mask(group))) {
4497 				first_idle_cpu = 1;
4498 				balance_cpu = i;
4499 			}
4500 
4501 			load = target_load(i, load_idx);
4502 		} else {
4503 			load = source_load(i, load_idx);
4504 			if (load > max_cpu_load)
4505 				max_cpu_load = load;
4506 			if (min_cpu_load > load)
4507 				min_cpu_load = load;
4508 
4509 			if (nr_running > max_nr_running)
4510 				max_nr_running = nr_running;
4511 			if (min_nr_running > nr_running)
4512 				min_nr_running = nr_running;
4513 		}
4514 
4515 		sgs->group_load += load;
4516 		sgs->sum_nr_running += nr_running;
4517 		sgs->sum_weighted_load += weighted_cpuload(i);
4518 		if (idle_cpu(i))
4519 			sgs->idle_cpus++;
4520 	}
4521 
4522 	/*
4523 	 * First idle cpu or the first cpu(busiest) in this sched group
4524 	 * is eligible for doing load balancing at this and above
4525 	 * domains. In the newly idle case, we will allow all the cpu's
4526 	 * to do the newly idle load balance.
4527 	 */
4528 	if (local_group) {
4529 		if (env->idle != CPU_NEWLY_IDLE) {
4530 			if (balance_cpu != env->dst_cpu) {
4531 				*balance = 0;
4532 				return;
4533 			}
4534 			update_group_power(env->sd, env->dst_cpu);
4535 		} else if (time_after_eq(jiffies, group->sgp->next_update))
4536 			update_group_power(env->sd, env->dst_cpu);
4537 	}
4538 
4539 	/* Adjust by relative CPU power of the group */
4540 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
4541 
4542 	/*
4543 	 * Consider the group unbalanced when the imbalance is larger
4544 	 * than the average weight of a task.
4545 	 *
4546 	 * APZ: with cgroup the avg task weight can vary wildly and
4547 	 *      might not be a suitable number - should we keep a
4548 	 *      normalized nr_running number somewhere that negates
4549 	 *      the hierarchy?
4550 	 */
4551 	if (sgs->sum_nr_running)
4552 		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4553 
4554 	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4555 	    (max_nr_running - min_nr_running) > 1)
4556 		sgs->group_imb = 1;
4557 
4558 	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
4559 						SCHED_POWER_SCALE);
4560 	if (!sgs->group_capacity)
4561 		sgs->group_capacity = fix_small_capacity(env->sd, group);
4562 	sgs->group_weight = group->group_weight;
4563 
4564 	if (sgs->group_capacity > sgs->sum_nr_running)
4565 		sgs->group_has_capacity = 1;
4566 }
4567 
4568 /**
4569  * update_sd_pick_busiest - return 1 on busiest group
4570  * @env: The load balancing environment.
4571  * @sds: sched_domain statistics
4572  * @sg: sched_group candidate to be checked for being the busiest
4573  * @sgs: sched_group statistics
4574  *
4575  * Determine if @sg is a busier group than the previously selected
4576  * busiest group.
4577  */
4578 static bool update_sd_pick_busiest(struct lb_env *env,
4579 				   struct sd_lb_stats *sds,
4580 				   struct sched_group *sg,
4581 				   struct sg_lb_stats *sgs)
4582 {
4583 	if (sgs->avg_load <= sds->max_load)
4584 		return false;
4585 
4586 	if (sgs->sum_nr_running > sgs->group_capacity)
4587 		return true;
4588 
4589 	if (sgs->group_imb)
4590 		return true;
4591 
4592 	/*
4593 	 * ASYM_PACKING needs to move all the work to the lowest
4594 	 * numbered CPUs in the group, therefore mark all groups
4595 	 * higher than ourself as busy.
4596 	 */
4597 	if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4598 	    env->dst_cpu < group_first_cpu(sg)) {
4599 		if (!sds->busiest)
4600 			return true;
4601 
4602 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4603 			return true;
4604 	}
4605 
4606 	return false;
4607 }
4608 
4609 /**
4610  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
4611  * @env: The load balancing environment.
4612  * @balance: Should we balance.
4613  * @sds: variable to hold the statistics for this sched_domain.
4614  */
4615 static inline void update_sd_lb_stats(struct lb_env *env,
4616 					int *balance, struct sd_lb_stats *sds)
4617 {
4618 	struct sched_domain *child = env->sd->child;
4619 	struct sched_group *sg = env->sd->groups;
4620 	struct sg_lb_stats sgs;
4621 	int load_idx, prefer_sibling = 0;
4622 
4623 	if (child && child->flags & SD_PREFER_SIBLING)
4624 		prefer_sibling = 1;
4625 
4626 	load_idx = get_sd_load_idx(env->sd, env->idle);
4627 
4628 	do {
4629 		int local_group;
4630 
4631 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
4632 		memset(&sgs, 0, sizeof(sgs));
4633 		update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
4634 
4635 		if (local_group && !(*balance))
4636 			return;
4637 
4638 		sds->total_load += sgs.group_load;
4639 		sds->total_pwr += sg->sgp->power;
4640 
4641 		/*
4642 		 * In case the child domain prefers tasks go to siblings
4643 		 * first, lower the sg capacity to one so that we'll try
4644 		 * and move all the excess tasks away. We lower the capacity
4645 		 * of a group only if the local group has the capacity to fit
4646 		 * these excess tasks, i.e. nr_running < group_capacity. The
4647 		 * extra check prevents the case where you always pull from the
4648 		 * heaviest group when it is already under-utilized (possible
4649 		 * with a large weight task outweighs the tasks on the system).
4650 		 */
4651 		if (prefer_sibling && !local_group && sds->this_has_capacity)
4652 			sgs.group_capacity = min(sgs.group_capacity, 1UL);
4653 
4654 		if (local_group) {
4655 			sds->this_load = sgs.avg_load;
4656 			sds->this = sg;
4657 			sds->this_nr_running = sgs.sum_nr_running;
4658 			sds->this_load_per_task = sgs.sum_weighted_load;
4659 			sds->this_has_capacity = sgs.group_has_capacity;
4660 			sds->this_idle_cpus = sgs.idle_cpus;
4661 		} else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
4662 			sds->max_load = sgs.avg_load;
4663 			sds->busiest = sg;
4664 			sds->busiest_nr_running = sgs.sum_nr_running;
4665 			sds->busiest_idle_cpus = sgs.idle_cpus;
4666 			sds->busiest_group_capacity = sgs.group_capacity;
4667 			sds->busiest_load_per_task = sgs.sum_weighted_load;
4668 			sds->busiest_has_capacity = sgs.group_has_capacity;
4669 			sds->busiest_group_weight = sgs.group_weight;
4670 			sds->group_imb = sgs.group_imb;
4671 		}
4672 
4673 		sg = sg->next;
4674 	} while (sg != env->sd->groups);
4675 }
4676 
4677 /**
4678  * check_asym_packing - Check to see if the group is packed into the
4679  *			sched doman.
4680  *
4681  * This is primarily intended to used at the sibling level.  Some
4682  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
4683  * case of POWER7, it can move to lower SMT modes only when higher
4684  * threads are idle.  When in lower SMT modes, the threads will
4685  * perform better since they share less core resources.  Hence when we
4686  * have idle threads, we want them to be the higher ones.
4687  *
4688  * This packing function is run on idle threads.  It checks to see if
4689  * the busiest CPU in this domain (core in the P7 case) has a higher
4690  * CPU number than the packing function is being run on.  Here we are
4691  * assuming lower CPU number will be equivalent to lower a SMT thread
4692  * number.
4693  *
4694  * Returns 1 when packing is required and a task should be moved to
4695  * this CPU.  The amount of the imbalance is returned in *imbalance.
4696  *
4697  * @env: The load balancing environment.
4698  * @sds: Statistics of the sched_domain which is to be packed
4699  */
4700 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
4701 {
4702 	int busiest_cpu;
4703 
4704 	if (!(env->sd->flags & SD_ASYM_PACKING))
4705 		return 0;
4706 
4707 	if (!sds->busiest)
4708 		return 0;
4709 
4710 	busiest_cpu = group_first_cpu(sds->busiest);
4711 	if (env->dst_cpu > busiest_cpu)
4712 		return 0;
4713 
4714 	env->imbalance = DIV_ROUND_CLOSEST(
4715 		sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4716 
4717 	return 1;
4718 }
4719 
4720 /**
4721  * fix_small_imbalance - Calculate the minor imbalance that exists
4722  *			amongst the groups of a sched_domain, during
4723  *			load balancing.
4724  * @env: The load balancing environment.
4725  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4726  */
4727 static inline
4728 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4729 {
4730 	unsigned long tmp, pwr_now = 0, pwr_move = 0;
4731 	unsigned int imbn = 2;
4732 	unsigned long scaled_busy_load_per_task;
4733 
4734 	if (sds->this_nr_running) {
4735 		sds->this_load_per_task /= sds->this_nr_running;
4736 		if (sds->busiest_load_per_task >
4737 				sds->this_load_per_task)
4738 			imbn = 1;
4739 	} else {
4740 		sds->this_load_per_task =
4741 			cpu_avg_load_per_task(env->dst_cpu);
4742 	}
4743 
4744 	scaled_busy_load_per_task = sds->busiest_load_per_task
4745 					 * SCHED_POWER_SCALE;
4746 	scaled_busy_load_per_task /= sds->busiest->sgp->power;
4747 
4748 	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4749 			(scaled_busy_load_per_task * imbn)) {
4750 		env->imbalance = sds->busiest_load_per_task;
4751 		return;
4752 	}
4753 
4754 	/*
4755 	 * OK, we don't have enough imbalance to justify moving tasks,
4756 	 * however we may be able to increase total CPU power used by
4757 	 * moving them.
4758 	 */
4759 
4760 	pwr_now += sds->busiest->sgp->power *
4761 			min(sds->busiest_load_per_task, sds->max_load);
4762 	pwr_now += sds->this->sgp->power *
4763 			min(sds->this_load_per_task, sds->this_load);
4764 	pwr_now /= SCHED_POWER_SCALE;
4765 
4766 	/* Amount of load we'd subtract */
4767 	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4768 		sds->busiest->sgp->power;
4769 	if (sds->max_load > tmp)
4770 		pwr_move += sds->busiest->sgp->power *
4771 			min(sds->busiest_load_per_task, sds->max_load - tmp);
4772 
4773 	/* Amount of load we'd add */
4774 	if (sds->max_load * sds->busiest->sgp->power <
4775 		sds->busiest_load_per_task * SCHED_POWER_SCALE)
4776 		tmp = (sds->max_load * sds->busiest->sgp->power) /
4777 			sds->this->sgp->power;
4778 	else
4779 		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4780 			sds->this->sgp->power;
4781 	pwr_move += sds->this->sgp->power *
4782 			min(sds->this_load_per_task, sds->this_load + tmp);
4783 	pwr_move /= SCHED_POWER_SCALE;
4784 
4785 	/* Move if we gain throughput */
4786 	if (pwr_move > pwr_now)
4787 		env->imbalance = sds->busiest_load_per_task;
4788 }
4789 
4790 /**
4791  * calculate_imbalance - Calculate the amount of imbalance present within the
4792  *			 groups of a given sched_domain during load balance.
4793  * @env: load balance environment
4794  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4795  */
4796 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4797 {
4798 	unsigned long max_pull, load_above_capacity = ~0UL;
4799 
4800 	sds->busiest_load_per_task /= sds->busiest_nr_running;
4801 	if (sds->group_imb) {
4802 		sds->busiest_load_per_task =
4803 			min(sds->busiest_load_per_task, sds->avg_load);
4804 	}
4805 
4806 	/*
4807 	 * In the presence of smp nice balancing, certain scenarios can have
4808 	 * max load less than avg load(as we skip the groups at or below
4809 	 * its cpu_power, while calculating max_load..)
4810 	 */
4811 	if (sds->max_load < sds->avg_load) {
4812 		env->imbalance = 0;
4813 		return fix_small_imbalance(env, sds);
4814 	}
4815 
4816 	if (!sds->group_imb) {
4817 		/*
4818 		 * Don't want to pull so many tasks that a group would go idle.
4819 		 */
4820 		load_above_capacity = (sds->busiest_nr_running -
4821 						sds->busiest_group_capacity);
4822 
4823 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4824 
4825 		load_above_capacity /= sds->busiest->sgp->power;
4826 	}
4827 
4828 	/*
4829 	 * We're trying to get all the cpus to the average_load, so we don't
4830 	 * want to push ourselves above the average load, nor do we wish to
4831 	 * reduce the max loaded cpu below the average load. At the same time,
4832 	 * we also don't want to reduce the group load below the group capacity
4833 	 * (so that we can implement power-savings policies etc). Thus we look
4834 	 * for the minimum possible imbalance.
4835 	 * Be careful of negative numbers as they'll appear as very large values
4836 	 * with unsigned longs.
4837 	 */
4838 	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4839 
4840 	/* How much load to actually move to equalise the imbalance */
4841 	env->imbalance = min(max_pull * sds->busiest->sgp->power,
4842 		(sds->avg_load - sds->this_load) * sds->this->sgp->power)
4843 			/ SCHED_POWER_SCALE;
4844 
4845 	/*
4846 	 * if *imbalance is less than the average load per runnable task
4847 	 * there is no guarantee that any tasks will be moved so we'll have
4848 	 * a think about bumping its value to force at least one task to be
4849 	 * moved
4850 	 */
4851 	if (env->imbalance < sds->busiest_load_per_task)
4852 		return fix_small_imbalance(env, sds);
4853 
4854 }
4855 
4856 /******* find_busiest_group() helpers end here *********************/
4857 
4858 /**
4859  * find_busiest_group - Returns the busiest group within the sched_domain
4860  * if there is an imbalance. If there isn't an imbalance, and
4861  * the user has opted for power-savings, it returns a group whose
4862  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4863  * such a group exists.
4864  *
4865  * Also calculates the amount of weighted load which should be moved
4866  * to restore balance.
4867  *
4868  * @env: The load balancing environment.
4869  * @balance: Pointer to a variable indicating if this_cpu
4870  *	is the appropriate cpu to perform load balancing at this_level.
4871  *
4872  * Returns:	- the busiest group if imbalance exists.
4873  *		- If no imbalance and user has opted for power-savings balance,
4874  *		   return the least loaded group whose CPUs can be
4875  *		   put to idle by rebalancing its tasks onto our group.
4876  */
4877 static struct sched_group *
4878 find_busiest_group(struct lb_env *env, int *balance)
4879 {
4880 	struct sd_lb_stats sds;
4881 
4882 	memset(&sds, 0, sizeof(sds));
4883 
4884 	/*
4885 	 * Compute the various statistics relavent for load balancing at
4886 	 * this level.
4887 	 */
4888 	update_sd_lb_stats(env, balance, &sds);
4889 
4890 	/*
4891 	 * this_cpu is not the appropriate cpu to perform load balancing at
4892 	 * this level.
4893 	 */
4894 	if (!(*balance))
4895 		goto ret;
4896 
4897 	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4898 	    check_asym_packing(env, &sds))
4899 		return sds.busiest;
4900 
4901 	/* There is no busy sibling group to pull tasks from */
4902 	if (!sds.busiest || sds.busiest_nr_running == 0)
4903 		goto out_balanced;
4904 
4905 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4906 
4907 	/*
4908 	 * If the busiest group is imbalanced the below checks don't
4909 	 * work because they assumes all things are equal, which typically
4910 	 * isn't true due to cpus_allowed constraints and the like.
4911 	 */
4912 	if (sds.group_imb)
4913 		goto force_balance;
4914 
4915 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4916 	if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4917 			!sds.busiest_has_capacity)
4918 		goto force_balance;
4919 
4920 	/*
4921 	 * If the local group is more busy than the selected busiest group
4922 	 * don't try and pull any tasks.
4923 	 */
4924 	if (sds.this_load >= sds.max_load)
4925 		goto out_balanced;
4926 
4927 	/*
4928 	 * Don't pull any tasks if this group is already above the domain
4929 	 * average load.
4930 	 */
4931 	if (sds.this_load >= sds.avg_load)
4932 		goto out_balanced;
4933 
4934 	if (env->idle == CPU_IDLE) {
4935 		/*
4936 		 * This cpu is idle. If the busiest group load doesn't
4937 		 * have more tasks than the number of available cpu's and
4938 		 * there is no imbalance between this and busiest group
4939 		 * wrt to idle cpu's, it is balanced.
4940 		 */
4941 		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4942 		    sds.busiest_nr_running <= sds.busiest_group_weight)
4943 			goto out_balanced;
4944 	} else {
4945 		/*
4946 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4947 		 * imbalance_pct to be conservative.
4948 		 */
4949 		if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
4950 			goto out_balanced;
4951 	}
4952 
4953 force_balance:
4954 	/* Looks like there is an imbalance. Compute it */
4955 	calculate_imbalance(env, &sds);
4956 	return sds.busiest;
4957 
4958 out_balanced:
4959 ret:
4960 	env->imbalance = 0;
4961 	return NULL;
4962 }
4963 
4964 /*
4965  * find_busiest_queue - find the busiest runqueue among the cpus in group.
4966  */
4967 static struct rq *find_busiest_queue(struct lb_env *env,
4968 				     struct sched_group *group)
4969 {
4970 	struct rq *busiest = NULL, *rq;
4971 	unsigned long max_load = 0;
4972 	int i;
4973 
4974 	for_each_cpu(i, sched_group_cpus(group)) {
4975 		unsigned long power = power_of(i);
4976 		unsigned long capacity = DIV_ROUND_CLOSEST(power,
4977 							   SCHED_POWER_SCALE);
4978 		unsigned long wl;
4979 
4980 		if (!capacity)
4981 			capacity = fix_small_capacity(env->sd, group);
4982 
4983 		if (!cpumask_test_cpu(i, env->cpus))
4984 			continue;
4985 
4986 		rq = cpu_rq(i);
4987 		wl = weighted_cpuload(i);
4988 
4989 		/*
4990 		 * When comparing with imbalance, use weighted_cpuload()
4991 		 * which is not scaled with the cpu power.
4992 		 */
4993 		if (capacity && rq->nr_running == 1 && wl > env->imbalance)
4994 			continue;
4995 
4996 		/*
4997 		 * For the load comparisons with the other cpu's, consider
4998 		 * the weighted_cpuload() scaled with the cpu power, so that
4999 		 * the load can be moved away from the cpu that is potentially
5000 		 * running at a lower capacity.
5001 		 */
5002 		wl = (wl * SCHED_POWER_SCALE) / power;
5003 
5004 		if (wl > max_load) {
5005 			max_load = wl;
5006 			busiest = rq;
5007 		}
5008 	}
5009 
5010 	return busiest;
5011 }
5012 
5013 /*
5014  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5015  * so long as it is large enough.
5016  */
5017 #define MAX_PINNED_INTERVAL	512
5018 
5019 /* Working cpumask for load_balance and load_balance_newidle. */
5020 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5021 
5022 static int need_active_balance(struct lb_env *env)
5023 {
5024 	struct sched_domain *sd = env->sd;
5025 
5026 	if (env->idle == CPU_NEWLY_IDLE) {
5027 
5028 		/*
5029 		 * ASYM_PACKING needs to force migrate tasks from busy but
5030 		 * higher numbered CPUs in order to pack all tasks in the
5031 		 * lowest numbered CPUs.
5032 		 */
5033 		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
5034 			return 1;
5035 	}
5036 
5037 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5038 }
5039 
5040 static int active_load_balance_cpu_stop(void *data);
5041 
5042 /*
5043  * Check this_cpu to ensure it is balanced within domain. Attempt to move
5044  * tasks if there is an imbalance.
5045  */
5046 static int load_balance(int this_cpu, struct rq *this_rq,
5047 			struct sched_domain *sd, enum cpu_idle_type idle,
5048 			int *balance)
5049 {
5050 	int ld_moved, cur_ld_moved, active_balance = 0;
5051 	struct sched_group *group;
5052 	struct rq *busiest;
5053 	unsigned long flags;
5054 	struct cpumask *cpus = __get_cpu_var(load_balance_mask);
5055 
5056 	struct lb_env env = {
5057 		.sd		= sd,
5058 		.dst_cpu	= this_cpu,
5059 		.dst_rq		= this_rq,
5060 		.dst_grpmask    = sched_group_cpus(sd->groups),
5061 		.idle		= idle,
5062 		.loop_break	= sched_nr_migrate_break,
5063 		.cpus		= cpus,
5064 	};
5065 
5066 	/*
5067 	 * For NEWLY_IDLE load_balancing, we don't need to consider
5068 	 * other cpus in our group
5069 	 */
5070 	if (idle == CPU_NEWLY_IDLE)
5071 		env.dst_grpmask = NULL;
5072 
5073 	cpumask_copy(cpus, cpu_active_mask);
5074 
5075 	schedstat_inc(sd, lb_count[idle]);
5076 
5077 redo:
5078 	group = find_busiest_group(&env, balance);
5079 
5080 	if (*balance == 0)
5081 		goto out_balanced;
5082 
5083 	if (!group) {
5084 		schedstat_inc(sd, lb_nobusyg[idle]);
5085 		goto out_balanced;
5086 	}
5087 
5088 	busiest = find_busiest_queue(&env, group);
5089 	if (!busiest) {
5090 		schedstat_inc(sd, lb_nobusyq[idle]);
5091 		goto out_balanced;
5092 	}
5093 
5094 	BUG_ON(busiest == env.dst_rq);
5095 
5096 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
5097 
5098 	ld_moved = 0;
5099 	if (busiest->nr_running > 1) {
5100 		/*
5101 		 * Attempt to move tasks. If find_busiest_group has found
5102 		 * an imbalance but busiest->nr_running <= 1, the group is
5103 		 * still unbalanced. ld_moved simply stays zero, so it is
5104 		 * correctly treated as an imbalance.
5105 		 */
5106 		env.flags |= LBF_ALL_PINNED;
5107 		env.src_cpu   = busiest->cpu;
5108 		env.src_rq    = busiest;
5109 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
5110 
5111 		update_h_load(env.src_cpu);
5112 more_balance:
5113 		local_irq_save(flags);
5114 		double_rq_lock(env.dst_rq, busiest);
5115 
5116 		/*
5117 		 * cur_ld_moved - load moved in current iteration
5118 		 * ld_moved     - cumulative load moved across iterations
5119 		 */
5120 		cur_ld_moved = move_tasks(&env);
5121 		ld_moved += cur_ld_moved;
5122 		double_rq_unlock(env.dst_rq, busiest);
5123 		local_irq_restore(flags);
5124 
5125 		/*
5126 		 * some other cpu did the load balance for us.
5127 		 */
5128 		if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5129 			resched_cpu(env.dst_cpu);
5130 
5131 		if (env.flags & LBF_NEED_BREAK) {
5132 			env.flags &= ~LBF_NEED_BREAK;
5133 			goto more_balance;
5134 		}
5135 
5136 		/*
5137 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5138 		 * us and move them to an alternate dst_cpu in our sched_group
5139 		 * where they can run. The upper limit on how many times we
5140 		 * iterate on same src_cpu is dependent on number of cpus in our
5141 		 * sched_group.
5142 		 *
5143 		 * This changes load balance semantics a bit on who can move
5144 		 * load to a given_cpu. In addition to the given_cpu itself
5145 		 * (or a ilb_cpu acting on its behalf where given_cpu is
5146 		 * nohz-idle), we now have balance_cpu in a position to move
5147 		 * load to given_cpu. In rare situations, this may cause
5148 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5149 		 * _independently_ and at _same_ time to move some load to
5150 		 * given_cpu) causing exceess load to be moved to given_cpu.
5151 		 * This however should not happen so much in practice and
5152 		 * moreover subsequent load balance cycles should correct the
5153 		 * excess load moved.
5154 		 */
5155 		if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5156 
5157 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
5158 			env.dst_cpu	 = env.new_dst_cpu;
5159 			env.flags	&= ~LBF_SOME_PINNED;
5160 			env.loop	 = 0;
5161 			env.loop_break	 = sched_nr_migrate_break;
5162 
5163 			/* Prevent to re-select dst_cpu via env's cpus */
5164 			cpumask_clear_cpu(env.dst_cpu, env.cpus);
5165 
5166 			/*
5167 			 * Go back to "more_balance" rather than "redo" since we
5168 			 * need to continue with same src_cpu.
5169 			 */
5170 			goto more_balance;
5171 		}
5172 
5173 		/* All tasks on this runqueue were pinned by CPU affinity */
5174 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
5175 			cpumask_clear_cpu(cpu_of(busiest), cpus);
5176 			if (!cpumask_empty(cpus)) {
5177 				env.loop = 0;
5178 				env.loop_break = sched_nr_migrate_break;
5179 				goto redo;
5180 			}
5181 			goto out_balanced;
5182 		}
5183 	}
5184 
5185 	if (!ld_moved) {
5186 		schedstat_inc(sd, lb_failed[idle]);
5187 		/*
5188 		 * Increment the failure counter only on periodic balance.
5189 		 * We do not want newidle balance, which can be very
5190 		 * frequent, pollute the failure counter causing
5191 		 * excessive cache_hot migrations and active balances.
5192 		 */
5193 		if (idle != CPU_NEWLY_IDLE)
5194 			sd->nr_balance_failed++;
5195 
5196 		if (need_active_balance(&env)) {
5197 			raw_spin_lock_irqsave(&busiest->lock, flags);
5198 
5199 			/* don't kick the active_load_balance_cpu_stop,
5200 			 * if the curr task on busiest cpu can't be
5201 			 * moved to this_cpu
5202 			 */
5203 			if (!cpumask_test_cpu(this_cpu,
5204 					tsk_cpus_allowed(busiest->curr))) {
5205 				raw_spin_unlock_irqrestore(&busiest->lock,
5206 							    flags);
5207 				env.flags |= LBF_ALL_PINNED;
5208 				goto out_one_pinned;
5209 			}
5210 
5211 			/*
5212 			 * ->active_balance synchronizes accesses to
5213 			 * ->active_balance_work.  Once set, it's cleared
5214 			 * only after active load balance is finished.
5215 			 */
5216 			if (!busiest->active_balance) {
5217 				busiest->active_balance = 1;
5218 				busiest->push_cpu = this_cpu;
5219 				active_balance = 1;
5220 			}
5221 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
5222 
5223 			if (active_balance) {
5224 				stop_one_cpu_nowait(cpu_of(busiest),
5225 					active_load_balance_cpu_stop, busiest,
5226 					&busiest->active_balance_work);
5227 			}
5228 
5229 			/*
5230 			 * We've kicked active balancing, reset the failure
5231 			 * counter.
5232 			 */
5233 			sd->nr_balance_failed = sd->cache_nice_tries+1;
5234 		}
5235 	} else
5236 		sd->nr_balance_failed = 0;
5237 
5238 	if (likely(!active_balance)) {
5239 		/* We were unbalanced, so reset the balancing interval */
5240 		sd->balance_interval = sd->min_interval;
5241 	} else {
5242 		/*
5243 		 * If we've begun active balancing, start to back off. This
5244 		 * case may not be covered by the all_pinned logic if there
5245 		 * is only 1 task on the busy runqueue (because we don't call
5246 		 * move_tasks).
5247 		 */
5248 		if (sd->balance_interval < sd->max_interval)
5249 			sd->balance_interval *= 2;
5250 	}
5251 
5252 	goto out;
5253 
5254 out_balanced:
5255 	schedstat_inc(sd, lb_balanced[idle]);
5256 
5257 	sd->nr_balance_failed = 0;
5258 
5259 out_one_pinned:
5260 	/* tune up the balancing interval */
5261 	if (((env.flags & LBF_ALL_PINNED) &&
5262 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
5263 			(sd->balance_interval < sd->max_interval))
5264 		sd->balance_interval *= 2;
5265 
5266 	ld_moved = 0;
5267 out:
5268 	return ld_moved;
5269 }
5270 
5271 /*
5272  * idle_balance is called by schedule() if this_cpu is about to become
5273  * idle. Attempts to pull tasks from other CPUs.
5274  */
5275 void idle_balance(int this_cpu, struct rq *this_rq)
5276 {
5277 	struct sched_domain *sd;
5278 	int pulled_task = 0;
5279 	unsigned long next_balance = jiffies + HZ;
5280 
5281 	this_rq->idle_stamp = rq_clock(this_rq);
5282 
5283 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
5284 		return;
5285 
5286 	/*
5287 	 * Drop the rq->lock, but keep IRQ/preempt disabled.
5288 	 */
5289 	raw_spin_unlock(&this_rq->lock);
5290 
5291 	update_blocked_averages(this_cpu);
5292 	rcu_read_lock();
5293 	for_each_domain(this_cpu, sd) {
5294 		unsigned long interval;
5295 		int balance = 1;
5296 
5297 		if (!(sd->flags & SD_LOAD_BALANCE))
5298 			continue;
5299 
5300 		if (sd->flags & SD_BALANCE_NEWIDLE) {
5301 			/* If we've pulled tasks over stop searching: */
5302 			pulled_task = load_balance(this_cpu, this_rq,
5303 						   sd, CPU_NEWLY_IDLE, &balance);
5304 		}
5305 
5306 		interval = msecs_to_jiffies(sd->balance_interval);
5307 		if (time_after(next_balance, sd->last_balance + interval))
5308 			next_balance = sd->last_balance + interval;
5309 		if (pulled_task) {
5310 			this_rq->idle_stamp = 0;
5311 			break;
5312 		}
5313 	}
5314 	rcu_read_unlock();
5315 
5316 	raw_spin_lock(&this_rq->lock);
5317 
5318 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5319 		/*
5320 		 * We are going idle. next_balance may be set based on
5321 		 * a busy processor. So reset next_balance.
5322 		 */
5323 		this_rq->next_balance = next_balance;
5324 	}
5325 }
5326 
5327 /*
5328  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5329  * running tasks off the busiest CPU onto idle CPUs. It requires at
5330  * least 1 task to be running on each physical CPU where possible, and
5331  * avoids physical / logical imbalances.
5332  */
5333 static int active_load_balance_cpu_stop(void *data)
5334 {
5335 	struct rq *busiest_rq = data;
5336 	int busiest_cpu = cpu_of(busiest_rq);
5337 	int target_cpu = busiest_rq->push_cpu;
5338 	struct rq *target_rq = cpu_rq(target_cpu);
5339 	struct sched_domain *sd;
5340 
5341 	raw_spin_lock_irq(&busiest_rq->lock);
5342 
5343 	/* make sure the requested cpu hasn't gone down in the meantime */
5344 	if (unlikely(busiest_cpu != smp_processor_id() ||
5345 		     !busiest_rq->active_balance))
5346 		goto out_unlock;
5347 
5348 	/* Is there any task to move? */
5349 	if (busiest_rq->nr_running <= 1)
5350 		goto out_unlock;
5351 
5352 	/*
5353 	 * This condition is "impossible", if it occurs
5354 	 * we need to fix it. Originally reported by
5355 	 * Bjorn Helgaas on a 128-cpu setup.
5356 	 */
5357 	BUG_ON(busiest_rq == target_rq);
5358 
5359 	/* move a task from busiest_rq to target_rq */
5360 	double_lock_balance(busiest_rq, target_rq);
5361 
5362 	/* Search for an sd spanning us and the target CPU. */
5363 	rcu_read_lock();
5364 	for_each_domain(target_cpu, sd) {
5365 		if ((sd->flags & SD_LOAD_BALANCE) &&
5366 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5367 				break;
5368 	}
5369 
5370 	if (likely(sd)) {
5371 		struct lb_env env = {
5372 			.sd		= sd,
5373 			.dst_cpu	= target_cpu,
5374 			.dst_rq		= target_rq,
5375 			.src_cpu	= busiest_rq->cpu,
5376 			.src_rq		= busiest_rq,
5377 			.idle		= CPU_IDLE,
5378 		};
5379 
5380 		schedstat_inc(sd, alb_count);
5381 
5382 		if (move_one_task(&env))
5383 			schedstat_inc(sd, alb_pushed);
5384 		else
5385 			schedstat_inc(sd, alb_failed);
5386 	}
5387 	rcu_read_unlock();
5388 	double_unlock_balance(busiest_rq, target_rq);
5389 out_unlock:
5390 	busiest_rq->active_balance = 0;
5391 	raw_spin_unlock_irq(&busiest_rq->lock);
5392 	return 0;
5393 }
5394 
5395 #ifdef CONFIG_NO_HZ_COMMON
5396 /*
5397  * idle load balancing details
5398  * - When one of the busy CPUs notice that there may be an idle rebalancing
5399  *   needed, they will kick the idle load balancer, which then does idle
5400  *   load balancing for all the idle CPUs.
5401  */
5402 static struct {
5403 	cpumask_var_t idle_cpus_mask;
5404 	atomic_t nr_cpus;
5405 	unsigned long next_balance;     /* in jiffy units */
5406 } nohz ____cacheline_aligned;
5407 
5408 static inline int find_new_ilb(int call_cpu)
5409 {
5410 	int ilb = cpumask_first(nohz.idle_cpus_mask);
5411 
5412 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
5413 		return ilb;
5414 
5415 	return nr_cpu_ids;
5416 }
5417 
5418 /*
5419  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5420  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5421  * CPU (if there is one).
5422  */
5423 static void nohz_balancer_kick(int cpu)
5424 {
5425 	int ilb_cpu;
5426 
5427 	nohz.next_balance++;
5428 
5429 	ilb_cpu = find_new_ilb(cpu);
5430 
5431 	if (ilb_cpu >= nr_cpu_ids)
5432 		return;
5433 
5434 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
5435 		return;
5436 	/*
5437 	 * Use smp_send_reschedule() instead of resched_cpu().
5438 	 * This way we generate a sched IPI on the target cpu which
5439 	 * is idle. And the softirq performing nohz idle load balance
5440 	 * will be run before returning from the IPI.
5441 	 */
5442 	smp_send_reschedule(ilb_cpu);
5443 	return;
5444 }
5445 
5446 static inline void nohz_balance_exit_idle(int cpu)
5447 {
5448 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5449 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5450 		atomic_dec(&nohz.nr_cpus);
5451 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5452 	}
5453 }
5454 
5455 static inline void set_cpu_sd_state_busy(void)
5456 {
5457 	struct sched_domain *sd;
5458 
5459 	rcu_read_lock();
5460 	sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5461 
5462 	if (!sd || !sd->nohz_idle)
5463 		goto unlock;
5464 	sd->nohz_idle = 0;
5465 
5466 	for (; sd; sd = sd->parent)
5467 		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5468 unlock:
5469 	rcu_read_unlock();
5470 }
5471 
5472 void set_cpu_sd_state_idle(void)
5473 {
5474 	struct sched_domain *sd;
5475 
5476 	rcu_read_lock();
5477 	sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5478 
5479 	if (!sd || sd->nohz_idle)
5480 		goto unlock;
5481 	sd->nohz_idle = 1;
5482 
5483 	for (; sd; sd = sd->parent)
5484 		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5485 unlock:
5486 	rcu_read_unlock();
5487 }
5488 
5489 /*
5490  * This routine will record that the cpu is going idle with tick stopped.
5491  * This info will be used in performing idle load balancing in the future.
5492  */
5493 void nohz_balance_enter_idle(int cpu)
5494 {
5495 	/*
5496 	 * If this cpu is going down, then nothing needs to be done.
5497 	 */
5498 	if (!cpu_active(cpu))
5499 		return;
5500 
5501 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5502 		return;
5503 
5504 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5505 	atomic_inc(&nohz.nr_cpus);
5506 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5507 }
5508 
5509 static int sched_ilb_notifier(struct notifier_block *nfb,
5510 					unsigned long action, void *hcpu)
5511 {
5512 	switch (action & ~CPU_TASKS_FROZEN) {
5513 	case CPU_DYING:
5514 		nohz_balance_exit_idle(smp_processor_id());
5515 		return NOTIFY_OK;
5516 	default:
5517 		return NOTIFY_DONE;
5518 	}
5519 }
5520 #endif
5521 
5522 static DEFINE_SPINLOCK(balancing);
5523 
5524 /*
5525  * Scale the max load_balance interval with the number of CPUs in the system.
5526  * This trades load-balance latency on larger machines for less cross talk.
5527  */
5528 void update_max_interval(void)
5529 {
5530 	max_load_balance_interval = HZ*num_online_cpus()/10;
5531 }
5532 
5533 /*
5534  * It checks each scheduling domain to see if it is due to be balanced,
5535  * and initiates a balancing operation if so.
5536  *
5537  * Balancing parameters are set up in init_sched_domains.
5538  */
5539 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5540 {
5541 	int balance = 1;
5542 	struct rq *rq = cpu_rq(cpu);
5543 	unsigned long interval;
5544 	struct sched_domain *sd;
5545 	/* Earliest time when we have to do rebalance again */
5546 	unsigned long next_balance = jiffies + 60*HZ;
5547 	int update_next_balance = 0;
5548 	int need_serialize;
5549 
5550 	update_blocked_averages(cpu);
5551 
5552 	rcu_read_lock();
5553 	for_each_domain(cpu, sd) {
5554 		if (!(sd->flags & SD_LOAD_BALANCE))
5555 			continue;
5556 
5557 		interval = sd->balance_interval;
5558 		if (idle != CPU_IDLE)
5559 			interval *= sd->busy_factor;
5560 
5561 		/* scale ms to jiffies */
5562 		interval = msecs_to_jiffies(interval);
5563 		interval = clamp(interval, 1UL, max_load_balance_interval);
5564 
5565 		need_serialize = sd->flags & SD_SERIALIZE;
5566 
5567 		if (need_serialize) {
5568 			if (!spin_trylock(&balancing))
5569 				goto out;
5570 		}
5571 
5572 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
5573 			if (load_balance(cpu, rq, sd, idle, &balance)) {
5574 				/*
5575 				 * The LBF_SOME_PINNED logic could have changed
5576 				 * env->dst_cpu, so we can't know our idle
5577 				 * state even if we migrated tasks. Update it.
5578 				 */
5579 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
5580 			}
5581 			sd->last_balance = jiffies;
5582 		}
5583 		if (need_serialize)
5584 			spin_unlock(&balancing);
5585 out:
5586 		if (time_after(next_balance, sd->last_balance + interval)) {
5587 			next_balance = sd->last_balance + interval;
5588 			update_next_balance = 1;
5589 		}
5590 
5591 		/*
5592 		 * Stop the load balance at this level. There is another
5593 		 * CPU in our sched group which is doing load balancing more
5594 		 * actively.
5595 		 */
5596 		if (!balance)
5597 			break;
5598 	}
5599 	rcu_read_unlock();
5600 
5601 	/*
5602 	 * next_balance will be updated only when there is a need.
5603 	 * When the cpu is attached to null domain for ex, it will not be
5604 	 * updated.
5605 	 */
5606 	if (likely(update_next_balance))
5607 		rq->next_balance = next_balance;
5608 }
5609 
5610 #ifdef CONFIG_NO_HZ_COMMON
5611 /*
5612  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
5613  * rebalancing for all the cpus for whom scheduler ticks are stopped.
5614  */
5615 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5616 {
5617 	struct rq *this_rq = cpu_rq(this_cpu);
5618 	struct rq *rq;
5619 	int balance_cpu;
5620 
5621 	if (idle != CPU_IDLE ||
5622 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5623 		goto end;
5624 
5625 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5626 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
5627 			continue;
5628 
5629 		/*
5630 		 * If this cpu gets work to do, stop the load balancing
5631 		 * work being done for other cpus. Next load
5632 		 * balancing owner will pick it up.
5633 		 */
5634 		if (need_resched())
5635 			break;
5636 
5637 		rq = cpu_rq(balance_cpu);
5638 
5639 		raw_spin_lock_irq(&rq->lock);
5640 		update_rq_clock(rq);
5641 		update_idle_cpu_load(rq);
5642 		raw_spin_unlock_irq(&rq->lock);
5643 
5644 		rebalance_domains(balance_cpu, CPU_IDLE);
5645 
5646 		if (time_after(this_rq->next_balance, rq->next_balance))
5647 			this_rq->next_balance = rq->next_balance;
5648 	}
5649 	nohz.next_balance = this_rq->next_balance;
5650 end:
5651 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
5652 }
5653 
5654 /*
5655  * Current heuristic for kicking the idle load balancer in the presence
5656  * of an idle cpu is the system.
5657  *   - This rq has more than one task.
5658  *   - At any scheduler domain level, this cpu's scheduler group has multiple
5659  *     busy cpu's exceeding the group's power.
5660  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5661  *     domain span are idle.
5662  */
5663 static inline int nohz_kick_needed(struct rq *rq, int cpu)
5664 {
5665 	unsigned long now = jiffies;
5666 	struct sched_domain *sd;
5667 
5668 	if (unlikely(idle_cpu(cpu)))
5669 		return 0;
5670 
5671        /*
5672 	* We may be recently in ticked or tickless idle mode. At the first
5673 	* busy tick after returning from idle, we will update the busy stats.
5674 	*/
5675 	set_cpu_sd_state_busy();
5676 	nohz_balance_exit_idle(cpu);
5677 
5678 	/*
5679 	 * None are in tickless mode and hence no need for NOHZ idle load
5680 	 * balancing.
5681 	 */
5682 	if (likely(!atomic_read(&nohz.nr_cpus)))
5683 		return 0;
5684 
5685 	if (time_before(now, nohz.next_balance))
5686 		return 0;
5687 
5688 	if (rq->nr_running >= 2)
5689 		goto need_kick;
5690 
5691 	rcu_read_lock();
5692 	for_each_domain(cpu, sd) {
5693 		struct sched_group *sg = sd->groups;
5694 		struct sched_group_power *sgp = sg->sgp;
5695 		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5696 
5697 		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5698 			goto need_kick_unlock;
5699 
5700 		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5701 		    && (cpumask_first_and(nohz.idle_cpus_mask,
5702 					  sched_domain_span(sd)) < cpu))
5703 			goto need_kick_unlock;
5704 
5705 		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5706 			break;
5707 	}
5708 	rcu_read_unlock();
5709 	return 0;
5710 
5711 need_kick_unlock:
5712 	rcu_read_unlock();
5713 need_kick:
5714 	return 1;
5715 }
5716 #else
5717 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5718 #endif
5719 
5720 /*
5721  * run_rebalance_domains is triggered when needed from the scheduler tick.
5722  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5723  */
5724 static void run_rebalance_domains(struct softirq_action *h)
5725 {
5726 	int this_cpu = smp_processor_id();
5727 	struct rq *this_rq = cpu_rq(this_cpu);
5728 	enum cpu_idle_type idle = this_rq->idle_balance ?
5729 						CPU_IDLE : CPU_NOT_IDLE;
5730 
5731 	rebalance_domains(this_cpu, idle);
5732 
5733 	/*
5734 	 * If this cpu has a pending nohz_balance_kick, then do the
5735 	 * balancing on behalf of the other idle cpus whose ticks are
5736 	 * stopped.
5737 	 */
5738 	nohz_idle_balance(this_cpu, idle);
5739 }
5740 
5741 static inline int on_null_domain(int cpu)
5742 {
5743 	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5744 }
5745 
5746 /*
5747  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5748  */
5749 void trigger_load_balance(struct rq *rq, int cpu)
5750 {
5751 	/* Don't need to rebalance while attached to NULL domain */
5752 	if (time_after_eq(jiffies, rq->next_balance) &&
5753 	    likely(!on_null_domain(cpu)))
5754 		raise_softirq(SCHED_SOFTIRQ);
5755 #ifdef CONFIG_NO_HZ_COMMON
5756 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5757 		nohz_balancer_kick(cpu);
5758 #endif
5759 }
5760 
5761 static void rq_online_fair(struct rq *rq)
5762 {
5763 	update_sysctl();
5764 }
5765 
5766 static void rq_offline_fair(struct rq *rq)
5767 {
5768 	update_sysctl();
5769 
5770 	/* Ensure any throttled groups are reachable by pick_next_task */
5771 	unthrottle_offline_cfs_rqs(rq);
5772 }
5773 
5774 #endif /* CONFIG_SMP */
5775 
5776 /*
5777  * scheduler tick hitting a task of our scheduling class:
5778  */
5779 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5780 {
5781 	struct cfs_rq *cfs_rq;
5782 	struct sched_entity *se = &curr->se;
5783 
5784 	for_each_sched_entity(se) {
5785 		cfs_rq = cfs_rq_of(se);
5786 		entity_tick(cfs_rq, se, queued);
5787 	}
5788 
5789 	if (numabalancing_enabled)
5790 		task_tick_numa(rq, curr);
5791 
5792 	update_rq_runnable_avg(rq, 1);
5793 }
5794 
5795 /*
5796  * called on fork with the child task as argument from the parent's context
5797  *  - child not yet on the tasklist
5798  *  - preemption disabled
5799  */
5800 static void task_fork_fair(struct task_struct *p)
5801 {
5802 	struct cfs_rq *cfs_rq;
5803 	struct sched_entity *se = &p->se, *curr;
5804 	int this_cpu = smp_processor_id();
5805 	struct rq *rq = this_rq();
5806 	unsigned long flags;
5807 
5808 	raw_spin_lock_irqsave(&rq->lock, flags);
5809 
5810 	update_rq_clock(rq);
5811 
5812 	cfs_rq = task_cfs_rq(current);
5813 	curr = cfs_rq->curr;
5814 
5815 	if (unlikely(task_cpu(p) != this_cpu)) {
5816 		rcu_read_lock();
5817 		__set_task_cpu(p, this_cpu);
5818 		rcu_read_unlock();
5819 	}
5820 
5821 	update_curr(cfs_rq);
5822 
5823 	if (curr)
5824 		se->vruntime = curr->vruntime;
5825 	place_entity(cfs_rq, se, 1);
5826 
5827 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5828 		/*
5829 		 * Upon rescheduling, sched_class::put_prev_task() will place
5830 		 * 'current' within the tree based on its new key value.
5831 		 */
5832 		swap(curr->vruntime, se->vruntime);
5833 		resched_task(rq->curr);
5834 	}
5835 
5836 	se->vruntime -= cfs_rq->min_vruntime;
5837 
5838 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5839 }
5840 
5841 /*
5842  * Priority of the task has changed. Check to see if we preempt
5843  * the current task.
5844  */
5845 static void
5846 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5847 {
5848 	if (!p->se.on_rq)
5849 		return;
5850 
5851 	/*
5852 	 * Reschedule if we are currently running on this runqueue and
5853 	 * our priority decreased, or if we are not currently running on
5854 	 * this runqueue and our priority is higher than the current's
5855 	 */
5856 	if (rq->curr == p) {
5857 		if (p->prio > oldprio)
5858 			resched_task(rq->curr);
5859 	} else
5860 		check_preempt_curr(rq, p, 0);
5861 }
5862 
5863 static void switched_from_fair(struct rq *rq, struct task_struct *p)
5864 {
5865 	struct sched_entity *se = &p->se;
5866 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
5867 
5868 	/*
5869 	 * Ensure the task's vruntime is normalized, so that when its
5870 	 * switched back to the fair class the enqueue_entity(.flags=0) will
5871 	 * do the right thing.
5872 	 *
5873 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5874 	 * have normalized the vruntime, if it was !on_rq, then only when
5875 	 * the task is sleeping will it still have non-normalized vruntime.
5876 	 */
5877 	if (!se->on_rq && p->state != TASK_RUNNING) {
5878 		/*
5879 		 * Fix up our vruntime so that the current sleep doesn't
5880 		 * cause 'unlimited' sleep bonus.
5881 		 */
5882 		place_entity(cfs_rq, se, 0);
5883 		se->vruntime -= cfs_rq->min_vruntime;
5884 	}
5885 
5886 #ifdef CONFIG_SMP
5887 	/*
5888 	* Remove our load from contribution when we leave sched_fair
5889 	* and ensure we don't carry in an old decay_count if we
5890 	* switch back.
5891 	*/
5892 	if (p->se.avg.decay_count) {
5893 		struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5894 		__synchronize_entity_decay(&p->se);
5895 		subtract_blocked_load_contrib(cfs_rq,
5896 				p->se.avg.load_avg_contrib);
5897 	}
5898 #endif
5899 }
5900 
5901 /*
5902  * We switched to the sched_fair class.
5903  */
5904 static void switched_to_fair(struct rq *rq, struct task_struct *p)
5905 {
5906 	if (!p->se.on_rq)
5907 		return;
5908 
5909 	/*
5910 	 * We were most likely switched from sched_rt, so
5911 	 * kick off the schedule if running, otherwise just see
5912 	 * if we can still preempt the current task.
5913 	 */
5914 	if (rq->curr == p)
5915 		resched_task(rq->curr);
5916 	else
5917 		check_preempt_curr(rq, p, 0);
5918 }
5919 
5920 /* Account for a task changing its policy or group.
5921  *
5922  * This routine is mostly called to set cfs_rq->curr field when a task
5923  * migrates between groups/classes.
5924  */
5925 static void set_curr_task_fair(struct rq *rq)
5926 {
5927 	struct sched_entity *se = &rq->curr->se;
5928 
5929 	for_each_sched_entity(se) {
5930 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5931 
5932 		set_next_entity(cfs_rq, se);
5933 		/* ensure bandwidth has been allocated on our new cfs_rq */
5934 		account_cfs_rq_runtime(cfs_rq, 0);
5935 	}
5936 }
5937 
5938 void init_cfs_rq(struct cfs_rq *cfs_rq)
5939 {
5940 	cfs_rq->tasks_timeline = RB_ROOT;
5941 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5942 #ifndef CONFIG_64BIT
5943 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5944 #endif
5945 #ifdef CONFIG_SMP
5946 	atomic64_set(&cfs_rq->decay_counter, 1);
5947 	atomic_long_set(&cfs_rq->removed_load, 0);
5948 #endif
5949 }
5950 
5951 #ifdef CONFIG_FAIR_GROUP_SCHED
5952 static void task_move_group_fair(struct task_struct *p, int on_rq)
5953 {
5954 	struct cfs_rq *cfs_rq;
5955 	/*
5956 	 * If the task was not on the rq at the time of this cgroup movement
5957 	 * it must have been asleep, sleeping tasks keep their ->vruntime
5958 	 * absolute on their old rq until wakeup (needed for the fair sleeper
5959 	 * bonus in place_entity()).
5960 	 *
5961 	 * If it was on the rq, we've just 'preempted' it, which does convert
5962 	 * ->vruntime to a relative base.
5963 	 *
5964 	 * Make sure both cases convert their relative position when migrating
5965 	 * to another cgroup's rq. This does somewhat interfere with the
5966 	 * fair sleeper stuff for the first placement, but who cares.
5967 	 */
5968 	/*
5969 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
5970 	 * But there are some cases where it has already been normalized:
5971 	 *
5972 	 * - Moving a forked child which is waiting for being woken up by
5973 	 *   wake_up_new_task().
5974 	 * - Moving a task which has been woken up by try_to_wake_up() and
5975 	 *   waiting for actually being woken up by sched_ttwu_pending().
5976 	 *
5977 	 * To prevent boost or penalty in the new cfs_rq caused by delta
5978 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5979 	 */
5980 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5981 		on_rq = 1;
5982 
5983 	if (!on_rq)
5984 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5985 	set_task_rq(p, task_cpu(p));
5986 	if (!on_rq) {
5987 		cfs_rq = cfs_rq_of(&p->se);
5988 		p->se.vruntime += cfs_rq->min_vruntime;
5989 #ifdef CONFIG_SMP
5990 		/*
5991 		 * migrate_task_rq_fair() will have removed our previous
5992 		 * contribution, but we must synchronize for ongoing future
5993 		 * decay.
5994 		 */
5995 		p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
5996 		cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
5997 #endif
5998 	}
5999 }
6000 
6001 void free_fair_sched_group(struct task_group *tg)
6002 {
6003 	int i;
6004 
6005 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6006 
6007 	for_each_possible_cpu(i) {
6008 		if (tg->cfs_rq)
6009 			kfree(tg->cfs_rq[i]);
6010 		if (tg->se)
6011 			kfree(tg->se[i]);
6012 	}
6013 
6014 	kfree(tg->cfs_rq);
6015 	kfree(tg->se);
6016 }
6017 
6018 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6019 {
6020 	struct cfs_rq *cfs_rq;
6021 	struct sched_entity *se;
6022 	int i;
6023 
6024 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6025 	if (!tg->cfs_rq)
6026 		goto err;
6027 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6028 	if (!tg->se)
6029 		goto err;
6030 
6031 	tg->shares = NICE_0_LOAD;
6032 
6033 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6034 
6035 	for_each_possible_cpu(i) {
6036 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6037 				      GFP_KERNEL, cpu_to_node(i));
6038 		if (!cfs_rq)
6039 			goto err;
6040 
6041 		se = kzalloc_node(sizeof(struct sched_entity),
6042 				  GFP_KERNEL, cpu_to_node(i));
6043 		if (!se)
6044 			goto err_free_rq;
6045 
6046 		init_cfs_rq(cfs_rq);
6047 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6048 	}
6049 
6050 	return 1;
6051 
6052 err_free_rq:
6053 	kfree(cfs_rq);
6054 err:
6055 	return 0;
6056 }
6057 
6058 void unregister_fair_sched_group(struct task_group *tg, int cpu)
6059 {
6060 	struct rq *rq = cpu_rq(cpu);
6061 	unsigned long flags;
6062 
6063 	/*
6064 	* Only empty task groups can be destroyed; so we can speculatively
6065 	* check on_list without danger of it being re-added.
6066 	*/
6067 	if (!tg->cfs_rq[cpu]->on_list)
6068 		return;
6069 
6070 	raw_spin_lock_irqsave(&rq->lock, flags);
6071 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6072 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6073 }
6074 
6075 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6076 			struct sched_entity *se, int cpu,
6077 			struct sched_entity *parent)
6078 {
6079 	struct rq *rq = cpu_rq(cpu);
6080 
6081 	cfs_rq->tg = tg;
6082 	cfs_rq->rq = rq;
6083 	init_cfs_rq_runtime(cfs_rq);
6084 
6085 	tg->cfs_rq[cpu] = cfs_rq;
6086 	tg->se[cpu] = se;
6087 
6088 	/* se could be NULL for root_task_group */
6089 	if (!se)
6090 		return;
6091 
6092 	if (!parent)
6093 		se->cfs_rq = &rq->cfs;
6094 	else
6095 		se->cfs_rq = parent->my_q;
6096 
6097 	se->my_q = cfs_rq;
6098 	update_load_set(&se->load, 0);
6099 	se->parent = parent;
6100 }
6101 
6102 static DEFINE_MUTEX(shares_mutex);
6103 
6104 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6105 {
6106 	int i;
6107 	unsigned long flags;
6108 
6109 	/*
6110 	 * We can't change the weight of the root cgroup.
6111 	 */
6112 	if (!tg->se[0])
6113 		return -EINVAL;
6114 
6115 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6116 
6117 	mutex_lock(&shares_mutex);
6118 	if (tg->shares == shares)
6119 		goto done;
6120 
6121 	tg->shares = shares;
6122 	for_each_possible_cpu(i) {
6123 		struct rq *rq = cpu_rq(i);
6124 		struct sched_entity *se;
6125 
6126 		se = tg->se[i];
6127 		/* Propagate contribution to hierarchy */
6128 		raw_spin_lock_irqsave(&rq->lock, flags);
6129 
6130 		/* Possible calls to update_curr() need rq clock */
6131 		update_rq_clock(rq);
6132 		for_each_sched_entity(se)
6133 			update_cfs_shares(group_cfs_rq(se));
6134 		raw_spin_unlock_irqrestore(&rq->lock, flags);
6135 	}
6136 
6137 done:
6138 	mutex_unlock(&shares_mutex);
6139 	return 0;
6140 }
6141 #else /* CONFIG_FAIR_GROUP_SCHED */
6142 
6143 void free_fair_sched_group(struct task_group *tg) { }
6144 
6145 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6146 {
6147 	return 1;
6148 }
6149 
6150 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6151 
6152 #endif /* CONFIG_FAIR_GROUP_SCHED */
6153 
6154 
6155 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
6156 {
6157 	struct sched_entity *se = &task->se;
6158 	unsigned int rr_interval = 0;
6159 
6160 	/*
6161 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6162 	 * idle runqueue:
6163 	 */
6164 	if (rq->cfs.load.weight)
6165 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
6166 
6167 	return rr_interval;
6168 }
6169 
6170 /*
6171  * All the scheduling class methods:
6172  */
6173 const struct sched_class fair_sched_class = {
6174 	.next			= &idle_sched_class,
6175 	.enqueue_task		= enqueue_task_fair,
6176 	.dequeue_task		= dequeue_task_fair,
6177 	.yield_task		= yield_task_fair,
6178 	.yield_to_task		= yield_to_task_fair,
6179 
6180 	.check_preempt_curr	= check_preempt_wakeup,
6181 
6182 	.pick_next_task		= pick_next_task_fair,
6183 	.put_prev_task		= put_prev_task_fair,
6184 
6185 #ifdef CONFIG_SMP
6186 	.select_task_rq		= select_task_rq_fair,
6187 	.migrate_task_rq	= migrate_task_rq_fair,
6188 
6189 	.rq_online		= rq_online_fair,
6190 	.rq_offline		= rq_offline_fair,
6191 
6192 	.task_waking		= task_waking_fair,
6193 #endif
6194 
6195 	.set_curr_task          = set_curr_task_fair,
6196 	.task_tick		= task_tick_fair,
6197 	.task_fork		= task_fork_fair,
6198 
6199 	.prio_changed		= prio_changed_fair,
6200 	.switched_from		= switched_from_fair,
6201 	.switched_to		= switched_to_fair,
6202 
6203 	.get_rr_interval	= get_rr_interval_fair,
6204 
6205 #ifdef CONFIG_FAIR_GROUP_SCHED
6206 	.task_move_group	= task_move_group_fair,
6207 #endif
6208 };
6209 
6210 #ifdef CONFIG_SCHED_DEBUG
6211 void print_cfs_stats(struct seq_file *m, int cpu)
6212 {
6213 	struct cfs_rq *cfs_rq;
6214 
6215 	rcu_read_lock();
6216 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
6217 		print_cfs_rq(m, cpu, cfs_rq);
6218 	rcu_read_unlock();
6219 }
6220 #endif
6221 
6222 __init void init_sched_fair_class(void)
6223 {
6224 #ifdef CONFIG_SMP
6225 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6226 
6227 #ifdef CONFIG_NO_HZ_COMMON
6228 	nohz.next_balance = jiffies;
6229 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
6230 	cpu_notifier(sched_ilb_notifier, 0);
6231 #endif
6232 #endif /* SMP */
6233 
6234 }
6235