xref: /openbmc/linux/kernel/sched/fair.c (revision 80ecbd24)
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22 
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 #include <linux/mempolicy.h>
30 #include <linux/migrate.h>
31 #include <linux/task_work.h>
32 
33 #include <trace/events/sched.h>
34 
35 #include "sched.h"
36 
37 /*
38  * Targeted preemption latency for CPU-bound tasks:
39  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
40  *
41  * NOTE: this latency value is not the same as the concept of
42  * 'timeslice length' - timeslices in CFS are of variable length
43  * and have no persistent notion like in traditional, time-slice
44  * based scheduling concepts.
45  *
46  * (to see the precise effective timeslice length of your workload,
47  *  run vmstat and monitor the context-switches (cs) field)
48  */
49 unsigned int sysctl_sched_latency = 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
51 
52 /*
53  * The initial- and re-scaling of tunables is configurable
54  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55  *
56  * Options are:
57  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60  */
61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 	= SCHED_TUNABLESCALING_LOG;
63 
64 /*
65  * Minimal preemption granularity for CPU-bound tasks:
66  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
67  */
68 unsigned int sysctl_sched_min_granularity = 750000ULL;
69 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
70 
71 /*
72  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73  */
74 static unsigned int sched_nr_latency = 8;
75 
76 /*
77  * After fork, child runs first. If set to 0 (default) then
78  * parent will (try to) run first.
79  */
80 unsigned int sysctl_sched_child_runs_first __read_mostly;
81 
82 /*
83  * SCHED_OTHER wake-up granularity.
84  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
85  *
86  * This option delays the preemption effects of decoupled workloads
87  * and reduces their over-scheduling. Synchronous workloads will still
88  * have immediate wakeup/sleep latencies.
89  */
90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
92 
93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94 
95 /*
96  * The exponential sliding  window over which load is averaged for shares
97  * distribution.
98  * (default: 10msec)
99  */
100 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101 
102 #ifdef CONFIG_CFS_BANDWIDTH
103 /*
104  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105  * each time a cfs_rq requests quota.
106  *
107  * Note: in the case that the slice exceeds the runtime remaining (either due
108  * to consumption or the quota being specified to be smaller than the slice)
109  * we will always only issue the remaining available time.
110  *
111  * default: 5 msec, units: microseconds
112   */
113 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114 #endif
115 
116 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117 {
118 	lw->weight += inc;
119 	lw->inv_weight = 0;
120 }
121 
122 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123 {
124 	lw->weight -= dec;
125 	lw->inv_weight = 0;
126 }
127 
128 static inline void update_load_set(struct load_weight *lw, unsigned long w)
129 {
130 	lw->weight = w;
131 	lw->inv_weight = 0;
132 }
133 
134 /*
135  * Increase the granularity value when there are more CPUs,
136  * because with more CPUs the 'effective latency' as visible
137  * to users decreases. But the relationship is not linear,
138  * so pick a second-best guess by going with the log2 of the
139  * number of CPUs.
140  *
141  * This idea comes from the SD scheduler of Con Kolivas:
142  */
143 static int get_update_sysctl_factor(void)
144 {
145 	unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 	unsigned int factor;
147 
148 	switch (sysctl_sched_tunable_scaling) {
149 	case SCHED_TUNABLESCALING_NONE:
150 		factor = 1;
151 		break;
152 	case SCHED_TUNABLESCALING_LINEAR:
153 		factor = cpus;
154 		break;
155 	case SCHED_TUNABLESCALING_LOG:
156 	default:
157 		factor = 1 + ilog2(cpus);
158 		break;
159 	}
160 
161 	return factor;
162 }
163 
164 static void update_sysctl(void)
165 {
166 	unsigned int factor = get_update_sysctl_factor();
167 
168 #define SET_SYSCTL(name) \
169 	(sysctl_##name = (factor) * normalized_sysctl_##name)
170 	SET_SYSCTL(sched_min_granularity);
171 	SET_SYSCTL(sched_latency);
172 	SET_SYSCTL(sched_wakeup_granularity);
173 #undef SET_SYSCTL
174 }
175 
176 void sched_init_granularity(void)
177 {
178 	update_sysctl();
179 }
180 
181 #if BITS_PER_LONG == 32
182 # define WMULT_CONST	(~0UL)
183 #else
184 # define WMULT_CONST	(1UL << 32)
185 #endif
186 
187 #define WMULT_SHIFT	32
188 
189 /*
190  * Shift right and round:
191  */
192 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193 
194 /*
195  * delta *= weight / lw
196  */
197 static unsigned long
198 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 		struct load_weight *lw)
200 {
201 	u64 tmp;
202 
203 	/*
204 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 	 * 2^SCHED_LOAD_RESOLUTION.
207 	 */
208 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 		tmp = (u64)delta_exec * scale_load_down(weight);
210 	else
211 		tmp = (u64)delta_exec;
212 
213 	if (!lw->inv_weight) {
214 		unsigned long w = scale_load_down(lw->weight);
215 
216 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 			lw->inv_weight = 1;
218 		else if (unlikely(!w))
219 			lw->inv_weight = WMULT_CONST;
220 		else
221 			lw->inv_weight = WMULT_CONST / w;
222 	}
223 
224 	/*
225 	 * Check whether we'd overflow the 64-bit multiplication:
226 	 */
227 	if (unlikely(tmp > WMULT_CONST))
228 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 			WMULT_SHIFT/2);
230 	else
231 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232 
233 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234 }
235 
236 
237 const struct sched_class fair_sched_class;
238 
239 /**************************************************************
240  * CFS operations on generic schedulable entities:
241  */
242 
243 #ifdef CONFIG_FAIR_GROUP_SCHED
244 
245 /* cpu runqueue to which this cfs_rq is attached */
246 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247 {
248 	return cfs_rq->rq;
249 }
250 
251 /* An entity is a task if it doesn't "own" a runqueue */
252 #define entity_is_task(se)	(!se->my_q)
253 
254 static inline struct task_struct *task_of(struct sched_entity *se)
255 {
256 #ifdef CONFIG_SCHED_DEBUG
257 	WARN_ON_ONCE(!entity_is_task(se));
258 #endif
259 	return container_of(se, struct task_struct, se);
260 }
261 
262 /* Walk up scheduling entities hierarchy */
263 #define for_each_sched_entity(se) \
264 		for (; se; se = se->parent)
265 
266 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267 {
268 	return p->se.cfs_rq;
269 }
270 
271 /* runqueue on which this entity is (to be) queued */
272 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273 {
274 	return se->cfs_rq;
275 }
276 
277 /* runqueue "owned" by this group */
278 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279 {
280 	return grp->my_q;
281 }
282 
283 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 				       int force_update);
285 
286 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287 {
288 	if (!cfs_rq->on_list) {
289 		/*
290 		 * Ensure we either appear before our parent (if already
291 		 * enqueued) or force our parent to appear after us when it is
292 		 * enqueued.  The fact that we always enqueue bottom-up
293 		 * reduces this to two cases.
294 		 */
295 		if (cfs_rq->tg->parent &&
296 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
299 		} else {
300 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
302 		}
303 
304 		cfs_rq->on_list = 1;
305 		/* We should have no load, but we need to update last_decay. */
306 		update_cfs_rq_blocked_load(cfs_rq, 0);
307 	}
308 }
309 
310 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311 {
312 	if (cfs_rq->on_list) {
313 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 		cfs_rq->on_list = 0;
315 	}
316 }
317 
318 /* Iterate thr' all leaf cfs_rq's on a runqueue */
319 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321 
322 /* Do the two (enqueued) entities belong to the same group ? */
323 static inline int
324 is_same_group(struct sched_entity *se, struct sched_entity *pse)
325 {
326 	if (se->cfs_rq == pse->cfs_rq)
327 		return 1;
328 
329 	return 0;
330 }
331 
332 static inline struct sched_entity *parent_entity(struct sched_entity *se)
333 {
334 	return se->parent;
335 }
336 
337 /* return depth at which a sched entity is present in the hierarchy */
338 static inline int depth_se(struct sched_entity *se)
339 {
340 	int depth = 0;
341 
342 	for_each_sched_entity(se)
343 		depth++;
344 
345 	return depth;
346 }
347 
348 static void
349 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350 {
351 	int se_depth, pse_depth;
352 
353 	/*
354 	 * preemption test can be made between sibling entities who are in the
355 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 	 * both tasks until we find their ancestors who are siblings of common
357 	 * parent.
358 	 */
359 
360 	/* First walk up until both entities are at same depth */
361 	se_depth = depth_se(*se);
362 	pse_depth = depth_se(*pse);
363 
364 	while (se_depth > pse_depth) {
365 		se_depth--;
366 		*se = parent_entity(*se);
367 	}
368 
369 	while (pse_depth > se_depth) {
370 		pse_depth--;
371 		*pse = parent_entity(*pse);
372 	}
373 
374 	while (!is_same_group(*se, *pse)) {
375 		*se = parent_entity(*se);
376 		*pse = parent_entity(*pse);
377 	}
378 }
379 
380 #else	/* !CONFIG_FAIR_GROUP_SCHED */
381 
382 static inline struct task_struct *task_of(struct sched_entity *se)
383 {
384 	return container_of(se, struct task_struct, se);
385 }
386 
387 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388 {
389 	return container_of(cfs_rq, struct rq, cfs);
390 }
391 
392 #define entity_is_task(se)	1
393 
394 #define for_each_sched_entity(se) \
395 		for (; se; se = NULL)
396 
397 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
398 {
399 	return &task_rq(p)->cfs;
400 }
401 
402 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403 {
404 	struct task_struct *p = task_of(se);
405 	struct rq *rq = task_rq(p);
406 
407 	return &rq->cfs;
408 }
409 
410 /* runqueue "owned" by this group */
411 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412 {
413 	return NULL;
414 }
415 
416 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417 {
418 }
419 
420 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421 {
422 }
423 
424 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426 
427 static inline int
428 is_same_group(struct sched_entity *se, struct sched_entity *pse)
429 {
430 	return 1;
431 }
432 
433 static inline struct sched_entity *parent_entity(struct sched_entity *se)
434 {
435 	return NULL;
436 }
437 
438 static inline void
439 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440 {
441 }
442 
443 #endif	/* CONFIG_FAIR_GROUP_SCHED */
444 
445 static __always_inline
446 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
447 
448 /**************************************************************
449  * Scheduling class tree data structure manipulation methods:
450  */
451 
452 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
453 {
454 	s64 delta = (s64)(vruntime - max_vruntime);
455 	if (delta > 0)
456 		max_vruntime = vruntime;
457 
458 	return max_vruntime;
459 }
460 
461 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
462 {
463 	s64 delta = (s64)(vruntime - min_vruntime);
464 	if (delta < 0)
465 		min_vruntime = vruntime;
466 
467 	return min_vruntime;
468 }
469 
470 static inline int entity_before(struct sched_entity *a,
471 				struct sched_entity *b)
472 {
473 	return (s64)(a->vruntime - b->vruntime) < 0;
474 }
475 
476 static void update_min_vruntime(struct cfs_rq *cfs_rq)
477 {
478 	u64 vruntime = cfs_rq->min_vruntime;
479 
480 	if (cfs_rq->curr)
481 		vruntime = cfs_rq->curr->vruntime;
482 
483 	if (cfs_rq->rb_leftmost) {
484 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 						   struct sched_entity,
486 						   run_node);
487 
488 		if (!cfs_rq->curr)
489 			vruntime = se->vruntime;
490 		else
491 			vruntime = min_vruntime(vruntime, se->vruntime);
492 	}
493 
494 	/* ensure we never gain time by being placed backwards. */
495 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
496 #ifndef CONFIG_64BIT
497 	smp_wmb();
498 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499 #endif
500 }
501 
502 /*
503  * Enqueue an entity into the rb-tree:
504  */
505 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
506 {
507 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 	struct rb_node *parent = NULL;
509 	struct sched_entity *entry;
510 	int leftmost = 1;
511 
512 	/*
513 	 * Find the right place in the rbtree:
514 	 */
515 	while (*link) {
516 		parent = *link;
517 		entry = rb_entry(parent, struct sched_entity, run_node);
518 		/*
519 		 * We dont care about collisions. Nodes with
520 		 * the same key stay together.
521 		 */
522 		if (entity_before(se, entry)) {
523 			link = &parent->rb_left;
524 		} else {
525 			link = &parent->rb_right;
526 			leftmost = 0;
527 		}
528 	}
529 
530 	/*
531 	 * Maintain a cache of leftmost tree entries (it is frequently
532 	 * used):
533 	 */
534 	if (leftmost)
535 		cfs_rq->rb_leftmost = &se->run_node;
536 
537 	rb_link_node(&se->run_node, parent, link);
538 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
539 }
540 
541 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
542 {
543 	if (cfs_rq->rb_leftmost == &se->run_node) {
544 		struct rb_node *next_node;
545 
546 		next_node = rb_next(&se->run_node);
547 		cfs_rq->rb_leftmost = next_node;
548 	}
549 
550 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
551 }
552 
553 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
554 {
555 	struct rb_node *left = cfs_rq->rb_leftmost;
556 
557 	if (!left)
558 		return NULL;
559 
560 	return rb_entry(left, struct sched_entity, run_node);
561 }
562 
563 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564 {
565 	struct rb_node *next = rb_next(&se->run_node);
566 
567 	if (!next)
568 		return NULL;
569 
570 	return rb_entry(next, struct sched_entity, run_node);
571 }
572 
573 #ifdef CONFIG_SCHED_DEBUG
574 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
575 {
576 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
577 
578 	if (!last)
579 		return NULL;
580 
581 	return rb_entry(last, struct sched_entity, run_node);
582 }
583 
584 /**************************************************************
585  * Scheduling class statistics methods:
586  */
587 
588 int sched_proc_update_handler(struct ctl_table *table, int write,
589 		void __user *buffer, size_t *lenp,
590 		loff_t *ppos)
591 {
592 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
593 	int factor = get_update_sysctl_factor();
594 
595 	if (ret || !write)
596 		return ret;
597 
598 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 					sysctl_sched_min_granularity);
600 
601 #define WRT_SYSCTL(name) \
602 	(normalized_sysctl_##name = sysctl_##name / (factor))
603 	WRT_SYSCTL(sched_min_granularity);
604 	WRT_SYSCTL(sched_latency);
605 	WRT_SYSCTL(sched_wakeup_granularity);
606 #undef WRT_SYSCTL
607 
608 	return 0;
609 }
610 #endif
611 
612 /*
613  * delta /= w
614  */
615 static inline unsigned long
616 calc_delta_fair(unsigned long delta, struct sched_entity *se)
617 {
618 	if (unlikely(se->load.weight != NICE_0_LOAD))
619 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
620 
621 	return delta;
622 }
623 
624 /*
625  * The idea is to set a period in which each task runs once.
626  *
627  * When there are too many tasks (sched_nr_latency) we have to stretch
628  * this period because otherwise the slices get too small.
629  *
630  * p = (nr <= nl) ? l : l*nr/nl
631  */
632 static u64 __sched_period(unsigned long nr_running)
633 {
634 	u64 period = sysctl_sched_latency;
635 	unsigned long nr_latency = sched_nr_latency;
636 
637 	if (unlikely(nr_running > nr_latency)) {
638 		period = sysctl_sched_min_granularity;
639 		period *= nr_running;
640 	}
641 
642 	return period;
643 }
644 
645 /*
646  * We calculate the wall-time slice from the period by taking a part
647  * proportional to the weight.
648  *
649  * s = p*P[w/rw]
650  */
651 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
652 {
653 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
654 
655 	for_each_sched_entity(se) {
656 		struct load_weight *load;
657 		struct load_weight lw;
658 
659 		cfs_rq = cfs_rq_of(se);
660 		load = &cfs_rq->load;
661 
662 		if (unlikely(!se->on_rq)) {
663 			lw = cfs_rq->load;
664 
665 			update_load_add(&lw, se->load.weight);
666 			load = &lw;
667 		}
668 		slice = calc_delta_mine(slice, se->load.weight, load);
669 	}
670 	return slice;
671 }
672 
673 /*
674  * We calculate the vruntime slice of a to-be-inserted task.
675  *
676  * vs = s/w
677  */
678 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
679 {
680 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
681 }
682 
683 #ifdef CONFIG_SMP
684 static inline void __update_task_entity_contrib(struct sched_entity *se);
685 
686 /* Give new task start runnable values to heavy its load in infant time */
687 void init_task_runnable_average(struct task_struct *p)
688 {
689 	u32 slice;
690 
691 	p->se.avg.decay_count = 0;
692 	slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 	p->se.avg.runnable_avg_sum = slice;
694 	p->se.avg.runnable_avg_period = slice;
695 	__update_task_entity_contrib(&p->se);
696 }
697 #else
698 void init_task_runnable_average(struct task_struct *p)
699 {
700 }
701 #endif
702 
703 /*
704  * Update the current task's runtime statistics. Skip current tasks that
705  * are not in our scheduling class.
706  */
707 static inline void
708 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 	      unsigned long delta_exec)
710 {
711 	unsigned long delta_exec_weighted;
712 
713 	schedstat_set(curr->statistics.exec_max,
714 		      max((u64)delta_exec, curr->statistics.exec_max));
715 
716 	curr->sum_exec_runtime += delta_exec;
717 	schedstat_add(cfs_rq, exec_clock, delta_exec);
718 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
719 
720 	curr->vruntime += delta_exec_weighted;
721 	update_min_vruntime(cfs_rq);
722 }
723 
724 static void update_curr(struct cfs_rq *cfs_rq)
725 {
726 	struct sched_entity *curr = cfs_rq->curr;
727 	u64 now = rq_clock_task(rq_of(cfs_rq));
728 	unsigned long delta_exec;
729 
730 	if (unlikely(!curr))
731 		return;
732 
733 	/*
734 	 * Get the amount of time the current task was running
735 	 * since the last time we changed load (this cannot
736 	 * overflow on 32 bits):
737 	 */
738 	delta_exec = (unsigned long)(now - curr->exec_start);
739 	if (!delta_exec)
740 		return;
741 
742 	__update_curr(cfs_rq, curr, delta_exec);
743 	curr->exec_start = now;
744 
745 	if (entity_is_task(curr)) {
746 		struct task_struct *curtask = task_of(curr);
747 
748 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
749 		cpuacct_charge(curtask, delta_exec);
750 		account_group_exec_runtime(curtask, delta_exec);
751 	}
752 
753 	account_cfs_rq_runtime(cfs_rq, delta_exec);
754 }
755 
756 static inline void
757 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
758 {
759 	schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
760 }
761 
762 /*
763  * Task is being enqueued - update stats:
764  */
765 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
766 {
767 	/*
768 	 * Are we enqueueing a waiting task? (for current tasks
769 	 * a dequeue/enqueue event is a NOP)
770 	 */
771 	if (se != cfs_rq->curr)
772 		update_stats_wait_start(cfs_rq, se);
773 }
774 
775 static void
776 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
777 {
778 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
779 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
780 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
782 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
783 #ifdef CONFIG_SCHEDSTATS
784 	if (entity_is_task(se)) {
785 		trace_sched_stat_wait(task_of(se),
786 			rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
787 	}
788 #endif
789 	schedstat_set(se->statistics.wait_start, 0);
790 }
791 
792 static inline void
793 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
794 {
795 	/*
796 	 * Mark the end of the wait period if dequeueing a
797 	 * waiting task:
798 	 */
799 	if (se != cfs_rq->curr)
800 		update_stats_wait_end(cfs_rq, se);
801 }
802 
803 /*
804  * We are picking a new current task - update its stats:
805  */
806 static inline void
807 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
808 {
809 	/*
810 	 * We are starting a new run period:
811 	 */
812 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
813 }
814 
815 /**************************************************
816  * Scheduling class queueing methods:
817  */
818 
819 #ifdef CONFIG_NUMA_BALANCING
820 /*
821  * numa task sample period in ms
822  */
823 unsigned int sysctl_numa_balancing_scan_period_min = 100;
824 unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
825 unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
826 
827 /* Portion of address space to scan in MB */
828 unsigned int sysctl_numa_balancing_scan_size = 256;
829 
830 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
831 unsigned int sysctl_numa_balancing_scan_delay = 1000;
832 
833 static void task_numa_placement(struct task_struct *p)
834 {
835 	int seq;
836 
837 	if (!p->mm)	/* for example, ksmd faulting in a user's mm */
838 		return;
839 	seq = ACCESS_ONCE(p->mm->numa_scan_seq);
840 	if (p->numa_scan_seq == seq)
841 		return;
842 	p->numa_scan_seq = seq;
843 
844 	/* FIXME: Scheduling placement policy hints go here */
845 }
846 
847 /*
848  * Got a PROT_NONE fault for a page on @node.
849  */
850 void task_numa_fault(int node, int pages, bool migrated)
851 {
852 	struct task_struct *p = current;
853 
854 	if (!numabalancing_enabled)
855 		return;
856 
857 	/* FIXME: Allocate task-specific structure for placement policy here */
858 
859 	/*
860 	 * If pages are properly placed (did not migrate) then scan slower.
861 	 * This is reset periodically in case of phase changes
862 	 */
863         if (!migrated)
864 		p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
865 			p->numa_scan_period + jiffies_to_msecs(10));
866 
867 	task_numa_placement(p);
868 }
869 
870 static void reset_ptenuma_scan(struct task_struct *p)
871 {
872 	ACCESS_ONCE(p->mm->numa_scan_seq)++;
873 	p->mm->numa_scan_offset = 0;
874 }
875 
876 /*
877  * The expensive part of numa migration is done from task_work context.
878  * Triggered from task_tick_numa().
879  */
880 void task_numa_work(struct callback_head *work)
881 {
882 	unsigned long migrate, next_scan, now = jiffies;
883 	struct task_struct *p = current;
884 	struct mm_struct *mm = p->mm;
885 	struct vm_area_struct *vma;
886 	unsigned long start, end;
887 	long pages;
888 
889 	WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
890 
891 	work->next = work; /* protect against double add */
892 	/*
893 	 * Who cares about NUMA placement when they're dying.
894 	 *
895 	 * NOTE: make sure not to dereference p->mm before this check,
896 	 * exit_task_work() happens _after_ exit_mm() so we could be called
897 	 * without p->mm even though we still had it when we enqueued this
898 	 * work.
899 	 */
900 	if (p->flags & PF_EXITING)
901 		return;
902 
903 	/*
904 	 * We do not care about task placement until a task runs on a node
905 	 * other than the first one used by the address space. This is
906 	 * largely because migrations are driven by what CPU the task
907 	 * is running on. If it's never scheduled on another node, it'll
908 	 * not migrate so why bother trapping the fault.
909 	 */
910 	if (mm->first_nid == NUMA_PTE_SCAN_INIT)
911 		mm->first_nid = numa_node_id();
912 	if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
913 		/* Are we running on a new node yet? */
914 		if (numa_node_id() == mm->first_nid &&
915 		    !sched_feat_numa(NUMA_FORCE))
916 			return;
917 
918 		mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
919 	}
920 
921 	/*
922 	 * Reset the scan period if enough time has gone by. Objective is that
923 	 * scanning will be reduced if pages are properly placed. As tasks
924 	 * can enter different phases this needs to be re-examined. Lacking
925 	 * proper tracking of reference behaviour, this blunt hammer is used.
926 	 */
927 	migrate = mm->numa_next_reset;
928 	if (time_after(now, migrate)) {
929 		p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
930 		next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
931 		xchg(&mm->numa_next_reset, next_scan);
932 	}
933 
934 	/*
935 	 * Enforce maximal scan/migration frequency..
936 	 */
937 	migrate = mm->numa_next_scan;
938 	if (time_before(now, migrate))
939 		return;
940 
941 	if (p->numa_scan_period == 0)
942 		p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
943 
944 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
945 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
946 		return;
947 
948 	/*
949 	 * Do not set pte_numa if the current running node is rate-limited.
950 	 * This loses statistics on the fault but if we are unwilling to
951 	 * migrate to this node, it is less likely we can do useful work
952 	 */
953 	if (migrate_ratelimited(numa_node_id()))
954 		return;
955 
956 	start = mm->numa_scan_offset;
957 	pages = sysctl_numa_balancing_scan_size;
958 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
959 	if (!pages)
960 		return;
961 
962 	down_read(&mm->mmap_sem);
963 	vma = find_vma(mm, start);
964 	if (!vma) {
965 		reset_ptenuma_scan(p);
966 		start = 0;
967 		vma = mm->mmap;
968 	}
969 	for (; vma; vma = vma->vm_next) {
970 		if (!vma_migratable(vma))
971 			continue;
972 
973 		/* Skip small VMAs. They are not likely to be of relevance */
974 		if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
975 			continue;
976 
977 		do {
978 			start = max(start, vma->vm_start);
979 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
980 			end = min(end, vma->vm_end);
981 			pages -= change_prot_numa(vma, start, end);
982 
983 			start = end;
984 			if (pages <= 0)
985 				goto out;
986 		} while (end != vma->vm_end);
987 	}
988 
989 out:
990 	/*
991 	 * It is possible to reach the end of the VMA list but the last few VMAs are
992 	 * not guaranteed to the vma_migratable. If they are not, we would find the
993 	 * !migratable VMA on the next scan but not reset the scanner to the start
994 	 * so check it now.
995 	 */
996 	if (vma)
997 		mm->numa_scan_offset = start;
998 	else
999 		reset_ptenuma_scan(p);
1000 	up_read(&mm->mmap_sem);
1001 }
1002 
1003 /*
1004  * Drive the periodic memory faults..
1005  */
1006 void task_tick_numa(struct rq *rq, struct task_struct *curr)
1007 {
1008 	struct callback_head *work = &curr->numa_work;
1009 	u64 period, now;
1010 
1011 	/*
1012 	 * We don't care about NUMA placement if we don't have memory.
1013 	 */
1014 	if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1015 		return;
1016 
1017 	/*
1018 	 * Using runtime rather than walltime has the dual advantage that
1019 	 * we (mostly) drive the selection from busy threads and that the
1020 	 * task needs to have done some actual work before we bother with
1021 	 * NUMA placement.
1022 	 */
1023 	now = curr->se.sum_exec_runtime;
1024 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1025 
1026 	if (now - curr->node_stamp > period) {
1027 		if (!curr->node_stamp)
1028 			curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
1029 		curr->node_stamp = now;
1030 
1031 		if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1032 			init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1033 			task_work_add(curr, work, true);
1034 		}
1035 	}
1036 }
1037 #else
1038 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1039 {
1040 }
1041 #endif /* CONFIG_NUMA_BALANCING */
1042 
1043 static void
1044 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1045 {
1046 	update_load_add(&cfs_rq->load, se->load.weight);
1047 	if (!parent_entity(se))
1048 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1049 #ifdef CONFIG_SMP
1050 	if (entity_is_task(se))
1051 		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
1052 #endif
1053 	cfs_rq->nr_running++;
1054 }
1055 
1056 static void
1057 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1058 {
1059 	update_load_sub(&cfs_rq->load, se->load.weight);
1060 	if (!parent_entity(se))
1061 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
1062 	if (entity_is_task(se))
1063 		list_del_init(&se->group_node);
1064 	cfs_rq->nr_running--;
1065 }
1066 
1067 #ifdef CONFIG_FAIR_GROUP_SCHED
1068 # ifdef CONFIG_SMP
1069 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1070 {
1071 	long tg_weight;
1072 
1073 	/*
1074 	 * Use this CPU's actual weight instead of the last load_contribution
1075 	 * to gain a more accurate current total weight. See
1076 	 * update_cfs_rq_load_contribution().
1077 	 */
1078 	tg_weight = atomic_long_read(&tg->load_avg);
1079 	tg_weight -= cfs_rq->tg_load_contrib;
1080 	tg_weight += cfs_rq->load.weight;
1081 
1082 	return tg_weight;
1083 }
1084 
1085 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1086 {
1087 	long tg_weight, load, shares;
1088 
1089 	tg_weight = calc_tg_weight(tg, cfs_rq);
1090 	load = cfs_rq->load.weight;
1091 
1092 	shares = (tg->shares * load);
1093 	if (tg_weight)
1094 		shares /= tg_weight;
1095 
1096 	if (shares < MIN_SHARES)
1097 		shares = MIN_SHARES;
1098 	if (shares > tg->shares)
1099 		shares = tg->shares;
1100 
1101 	return shares;
1102 }
1103 # else /* CONFIG_SMP */
1104 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1105 {
1106 	return tg->shares;
1107 }
1108 # endif /* CONFIG_SMP */
1109 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1110 			    unsigned long weight)
1111 {
1112 	if (se->on_rq) {
1113 		/* commit outstanding execution time */
1114 		if (cfs_rq->curr == se)
1115 			update_curr(cfs_rq);
1116 		account_entity_dequeue(cfs_rq, se);
1117 	}
1118 
1119 	update_load_set(&se->load, weight);
1120 
1121 	if (se->on_rq)
1122 		account_entity_enqueue(cfs_rq, se);
1123 }
1124 
1125 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1126 
1127 static void update_cfs_shares(struct cfs_rq *cfs_rq)
1128 {
1129 	struct task_group *tg;
1130 	struct sched_entity *se;
1131 	long shares;
1132 
1133 	tg = cfs_rq->tg;
1134 	se = tg->se[cpu_of(rq_of(cfs_rq))];
1135 	if (!se || throttled_hierarchy(cfs_rq))
1136 		return;
1137 #ifndef CONFIG_SMP
1138 	if (likely(se->load.weight == tg->shares))
1139 		return;
1140 #endif
1141 	shares = calc_cfs_shares(cfs_rq, tg);
1142 
1143 	reweight_entity(cfs_rq_of(se), se, shares);
1144 }
1145 #else /* CONFIG_FAIR_GROUP_SCHED */
1146 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1147 {
1148 }
1149 #endif /* CONFIG_FAIR_GROUP_SCHED */
1150 
1151 #ifdef CONFIG_SMP
1152 /*
1153  * We choose a half-life close to 1 scheduling period.
1154  * Note: The tables below are dependent on this value.
1155  */
1156 #define LOAD_AVG_PERIOD 32
1157 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1158 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1159 
1160 /* Precomputed fixed inverse multiplies for multiplication by y^n */
1161 static const u32 runnable_avg_yN_inv[] = {
1162 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1163 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1164 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1165 	0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1166 	0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1167 	0x85aac367, 0x82cd8698,
1168 };
1169 
1170 /*
1171  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
1172  * over-estimates when re-combining.
1173  */
1174 static const u32 runnable_avg_yN_sum[] = {
1175 	    0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1176 	 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1177 	17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1178 };
1179 
1180 /*
1181  * Approximate:
1182  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
1183  */
1184 static __always_inline u64 decay_load(u64 val, u64 n)
1185 {
1186 	unsigned int local_n;
1187 
1188 	if (!n)
1189 		return val;
1190 	else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1191 		return 0;
1192 
1193 	/* after bounds checking we can collapse to 32-bit */
1194 	local_n = n;
1195 
1196 	/*
1197 	 * As y^PERIOD = 1/2, we can combine
1198 	 *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1199 	 * With a look-up table which covers k^n (n<PERIOD)
1200 	 *
1201 	 * To achieve constant time decay_load.
1202 	 */
1203 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1204 		val >>= local_n / LOAD_AVG_PERIOD;
1205 		local_n %= LOAD_AVG_PERIOD;
1206 	}
1207 
1208 	val *= runnable_avg_yN_inv[local_n];
1209 	/* We don't use SRR here since we always want to round down. */
1210 	return val >> 32;
1211 }
1212 
1213 /*
1214  * For updates fully spanning n periods, the contribution to runnable
1215  * average will be: \Sum 1024*y^n
1216  *
1217  * We can compute this reasonably efficiently by combining:
1218  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
1219  */
1220 static u32 __compute_runnable_contrib(u64 n)
1221 {
1222 	u32 contrib = 0;
1223 
1224 	if (likely(n <= LOAD_AVG_PERIOD))
1225 		return runnable_avg_yN_sum[n];
1226 	else if (unlikely(n >= LOAD_AVG_MAX_N))
1227 		return LOAD_AVG_MAX;
1228 
1229 	/* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1230 	do {
1231 		contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1232 		contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1233 
1234 		n -= LOAD_AVG_PERIOD;
1235 	} while (n > LOAD_AVG_PERIOD);
1236 
1237 	contrib = decay_load(contrib, n);
1238 	return contrib + runnable_avg_yN_sum[n];
1239 }
1240 
1241 /*
1242  * We can represent the historical contribution to runnable average as the
1243  * coefficients of a geometric series.  To do this we sub-divide our runnable
1244  * history into segments of approximately 1ms (1024us); label the segment that
1245  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1246  *
1247  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1248  *      p0            p1           p2
1249  *     (now)       (~1ms ago)  (~2ms ago)
1250  *
1251  * Let u_i denote the fraction of p_i that the entity was runnable.
1252  *
1253  * We then designate the fractions u_i as our co-efficients, yielding the
1254  * following representation of historical load:
1255  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1256  *
1257  * We choose y based on the with of a reasonably scheduling period, fixing:
1258  *   y^32 = 0.5
1259  *
1260  * This means that the contribution to load ~32ms ago (u_32) will be weighted
1261  * approximately half as much as the contribution to load within the last ms
1262  * (u_0).
1263  *
1264  * When a period "rolls over" and we have new u_0`, multiplying the previous
1265  * sum again by y is sufficient to update:
1266  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1267  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1268  */
1269 static __always_inline int __update_entity_runnable_avg(u64 now,
1270 							struct sched_avg *sa,
1271 							int runnable)
1272 {
1273 	u64 delta, periods;
1274 	u32 runnable_contrib;
1275 	int delta_w, decayed = 0;
1276 
1277 	delta = now - sa->last_runnable_update;
1278 	/*
1279 	 * This should only happen when time goes backwards, which it
1280 	 * unfortunately does during sched clock init when we swap over to TSC.
1281 	 */
1282 	if ((s64)delta < 0) {
1283 		sa->last_runnable_update = now;
1284 		return 0;
1285 	}
1286 
1287 	/*
1288 	 * Use 1024ns as the unit of measurement since it's a reasonable
1289 	 * approximation of 1us and fast to compute.
1290 	 */
1291 	delta >>= 10;
1292 	if (!delta)
1293 		return 0;
1294 	sa->last_runnable_update = now;
1295 
1296 	/* delta_w is the amount already accumulated against our next period */
1297 	delta_w = sa->runnable_avg_period % 1024;
1298 	if (delta + delta_w >= 1024) {
1299 		/* period roll-over */
1300 		decayed = 1;
1301 
1302 		/*
1303 		 * Now that we know we're crossing a period boundary, figure
1304 		 * out how much from delta we need to complete the current
1305 		 * period and accrue it.
1306 		 */
1307 		delta_w = 1024 - delta_w;
1308 		if (runnable)
1309 			sa->runnable_avg_sum += delta_w;
1310 		sa->runnable_avg_period += delta_w;
1311 
1312 		delta -= delta_w;
1313 
1314 		/* Figure out how many additional periods this update spans */
1315 		periods = delta / 1024;
1316 		delta %= 1024;
1317 
1318 		sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1319 						  periods + 1);
1320 		sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1321 						     periods + 1);
1322 
1323 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
1324 		runnable_contrib = __compute_runnable_contrib(periods);
1325 		if (runnable)
1326 			sa->runnable_avg_sum += runnable_contrib;
1327 		sa->runnable_avg_period += runnable_contrib;
1328 	}
1329 
1330 	/* Remainder of delta accrued against u_0` */
1331 	if (runnable)
1332 		sa->runnable_avg_sum += delta;
1333 	sa->runnable_avg_period += delta;
1334 
1335 	return decayed;
1336 }
1337 
1338 /* Synchronize an entity's decay with its parenting cfs_rq.*/
1339 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
1340 {
1341 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1342 	u64 decays = atomic64_read(&cfs_rq->decay_counter);
1343 
1344 	decays -= se->avg.decay_count;
1345 	if (!decays)
1346 		return 0;
1347 
1348 	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1349 	se->avg.decay_count = 0;
1350 
1351 	return decays;
1352 }
1353 
1354 #ifdef CONFIG_FAIR_GROUP_SCHED
1355 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1356 						 int force_update)
1357 {
1358 	struct task_group *tg = cfs_rq->tg;
1359 	long tg_contrib;
1360 
1361 	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1362 	tg_contrib -= cfs_rq->tg_load_contrib;
1363 
1364 	if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1365 		atomic_long_add(tg_contrib, &tg->load_avg);
1366 		cfs_rq->tg_load_contrib += tg_contrib;
1367 	}
1368 }
1369 
1370 /*
1371  * Aggregate cfs_rq runnable averages into an equivalent task_group
1372  * representation for computing load contributions.
1373  */
1374 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1375 						  struct cfs_rq *cfs_rq)
1376 {
1377 	struct task_group *tg = cfs_rq->tg;
1378 	long contrib;
1379 
1380 	/* The fraction of a cpu used by this cfs_rq */
1381 	contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1382 			  sa->runnable_avg_period + 1);
1383 	contrib -= cfs_rq->tg_runnable_contrib;
1384 
1385 	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1386 		atomic_add(contrib, &tg->runnable_avg);
1387 		cfs_rq->tg_runnable_contrib += contrib;
1388 	}
1389 }
1390 
1391 static inline void __update_group_entity_contrib(struct sched_entity *se)
1392 {
1393 	struct cfs_rq *cfs_rq = group_cfs_rq(se);
1394 	struct task_group *tg = cfs_rq->tg;
1395 	int runnable_avg;
1396 
1397 	u64 contrib;
1398 
1399 	contrib = cfs_rq->tg_load_contrib * tg->shares;
1400 	se->avg.load_avg_contrib = div_u64(contrib,
1401 				     atomic_long_read(&tg->load_avg) + 1);
1402 
1403 	/*
1404 	 * For group entities we need to compute a correction term in the case
1405 	 * that they are consuming <1 cpu so that we would contribute the same
1406 	 * load as a task of equal weight.
1407 	 *
1408 	 * Explicitly co-ordinating this measurement would be expensive, but
1409 	 * fortunately the sum of each cpus contribution forms a usable
1410 	 * lower-bound on the true value.
1411 	 *
1412 	 * Consider the aggregate of 2 contributions.  Either they are disjoint
1413 	 * (and the sum represents true value) or they are disjoint and we are
1414 	 * understating by the aggregate of their overlap.
1415 	 *
1416 	 * Extending this to N cpus, for a given overlap, the maximum amount we
1417 	 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1418 	 * cpus that overlap for this interval and w_i is the interval width.
1419 	 *
1420 	 * On a small machine; the first term is well-bounded which bounds the
1421 	 * total error since w_i is a subset of the period.  Whereas on a
1422 	 * larger machine, while this first term can be larger, if w_i is the
1423 	 * of consequential size guaranteed to see n_i*w_i quickly converge to
1424 	 * our upper bound of 1-cpu.
1425 	 */
1426 	runnable_avg = atomic_read(&tg->runnable_avg);
1427 	if (runnable_avg < NICE_0_LOAD) {
1428 		se->avg.load_avg_contrib *= runnable_avg;
1429 		se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1430 	}
1431 }
1432 #else
1433 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1434 						 int force_update) {}
1435 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1436 						  struct cfs_rq *cfs_rq) {}
1437 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
1438 #endif
1439 
1440 static inline void __update_task_entity_contrib(struct sched_entity *se)
1441 {
1442 	u32 contrib;
1443 
1444 	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1445 	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1446 	contrib /= (se->avg.runnable_avg_period + 1);
1447 	se->avg.load_avg_contrib = scale_load(contrib);
1448 }
1449 
1450 /* Compute the current contribution to load_avg by se, return any delta */
1451 static long __update_entity_load_avg_contrib(struct sched_entity *se)
1452 {
1453 	long old_contrib = se->avg.load_avg_contrib;
1454 
1455 	if (entity_is_task(se)) {
1456 		__update_task_entity_contrib(se);
1457 	} else {
1458 		__update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
1459 		__update_group_entity_contrib(se);
1460 	}
1461 
1462 	return se->avg.load_avg_contrib - old_contrib;
1463 }
1464 
1465 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1466 						 long load_contrib)
1467 {
1468 	if (likely(load_contrib < cfs_rq->blocked_load_avg))
1469 		cfs_rq->blocked_load_avg -= load_contrib;
1470 	else
1471 		cfs_rq->blocked_load_avg = 0;
1472 }
1473 
1474 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1475 
1476 /* Update a sched_entity's runnable average */
1477 static inline void update_entity_load_avg(struct sched_entity *se,
1478 					  int update_cfs_rq)
1479 {
1480 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1481 	long contrib_delta;
1482 	u64 now;
1483 
1484 	/*
1485 	 * For a group entity we need to use their owned cfs_rq_clock_task() in
1486 	 * case they are the parent of a throttled hierarchy.
1487 	 */
1488 	if (entity_is_task(se))
1489 		now = cfs_rq_clock_task(cfs_rq);
1490 	else
1491 		now = cfs_rq_clock_task(group_cfs_rq(se));
1492 
1493 	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
1494 		return;
1495 
1496 	contrib_delta = __update_entity_load_avg_contrib(se);
1497 
1498 	if (!update_cfs_rq)
1499 		return;
1500 
1501 	if (se->on_rq)
1502 		cfs_rq->runnable_load_avg += contrib_delta;
1503 	else
1504 		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1505 }
1506 
1507 /*
1508  * Decay the load contributed by all blocked children and account this so that
1509  * their contribution may appropriately discounted when they wake up.
1510  */
1511 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1512 {
1513 	u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
1514 	u64 decays;
1515 
1516 	decays = now - cfs_rq->last_decay;
1517 	if (!decays && !force_update)
1518 		return;
1519 
1520 	if (atomic_long_read(&cfs_rq->removed_load)) {
1521 		unsigned long removed_load;
1522 		removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
1523 		subtract_blocked_load_contrib(cfs_rq, removed_load);
1524 	}
1525 
1526 	if (decays) {
1527 		cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1528 						      decays);
1529 		atomic64_add(decays, &cfs_rq->decay_counter);
1530 		cfs_rq->last_decay = now;
1531 	}
1532 
1533 	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1534 }
1535 
1536 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1537 {
1538 	__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
1539 	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
1540 }
1541 
1542 /* Add the load generated by se into cfs_rq's child load-average */
1543 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1544 						  struct sched_entity *se,
1545 						  int wakeup)
1546 {
1547 	/*
1548 	 * We track migrations using entity decay_count <= 0, on a wake-up
1549 	 * migration we use a negative decay count to track the remote decays
1550 	 * accumulated while sleeping.
1551 	 *
1552 	 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1553 	 * are seen by enqueue_entity_load_avg() as a migration with an already
1554 	 * constructed load_avg_contrib.
1555 	 */
1556 	if (unlikely(se->avg.decay_count <= 0)) {
1557 		se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
1558 		if (se->avg.decay_count) {
1559 			/*
1560 			 * In a wake-up migration we have to approximate the
1561 			 * time sleeping.  This is because we can't synchronize
1562 			 * clock_task between the two cpus, and it is not
1563 			 * guaranteed to be read-safe.  Instead, we can
1564 			 * approximate this using our carried decays, which are
1565 			 * explicitly atomically readable.
1566 			 */
1567 			se->avg.last_runnable_update -= (-se->avg.decay_count)
1568 							<< 20;
1569 			update_entity_load_avg(se, 0);
1570 			/* Indicate that we're now synchronized and on-rq */
1571 			se->avg.decay_count = 0;
1572 		}
1573 		wakeup = 0;
1574 	} else {
1575 		/*
1576 		 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1577 		 * would have made count negative); we must be careful to avoid
1578 		 * double-accounting blocked time after synchronizing decays.
1579 		 */
1580 		se->avg.last_runnable_update += __synchronize_entity_decay(se)
1581 							<< 20;
1582 	}
1583 
1584 	/* migrated tasks did not contribute to our blocked load */
1585 	if (wakeup) {
1586 		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
1587 		update_entity_load_avg(se, 0);
1588 	}
1589 
1590 	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
1591 	/* we force update consideration on load-balancer moves */
1592 	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
1593 }
1594 
1595 /*
1596  * Remove se's load from this cfs_rq child load-average, if the entity is
1597  * transitioning to a blocked state we track its projected decay using
1598  * blocked_load_avg.
1599  */
1600 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1601 						  struct sched_entity *se,
1602 						  int sleep)
1603 {
1604 	update_entity_load_avg(se, 1);
1605 	/* we force update consideration on load-balancer moves */
1606 	update_cfs_rq_blocked_load(cfs_rq, !sleep);
1607 
1608 	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
1609 	if (sleep) {
1610 		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1611 		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1612 	} /* migrations, e.g. sleep=0 leave decay_count == 0 */
1613 }
1614 
1615 /*
1616  * Update the rq's load with the elapsed running time before entering
1617  * idle. if the last scheduled task is not a CFS task, idle_enter will
1618  * be the only way to update the runnable statistic.
1619  */
1620 void idle_enter_fair(struct rq *this_rq)
1621 {
1622 	update_rq_runnable_avg(this_rq, 1);
1623 }
1624 
1625 /*
1626  * Update the rq's load with the elapsed idle time before a task is
1627  * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1628  * be the only way to update the runnable statistic.
1629  */
1630 void idle_exit_fair(struct rq *this_rq)
1631 {
1632 	update_rq_runnable_avg(this_rq, 0);
1633 }
1634 
1635 #else
1636 static inline void update_entity_load_avg(struct sched_entity *se,
1637 					  int update_cfs_rq) {}
1638 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
1639 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1640 					   struct sched_entity *se,
1641 					   int wakeup) {}
1642 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1643 					   struct sched_entity *se,
1644 					   int sleep) {}
1645 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1646 					      int force_update) {}
1647 #endif
1648 
1649 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1650 {
1651 #ifdef CONFIG_SCHEDSTATS
1652 	struct task_struct *tsk = NULL;
1653 
1654 	if (entity_is_task(se))
1655 		tsk = task_of(se);
1656 
1657 	if (se->statistics.sleep_start) {
1658 		u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
1659 
1660 		if ((s64)delta < 0)
1661 			delta = 0;
1662 
1663 		if (unlikely(delta > se->statistics.sleep_max))
1664 			se->statistics.sleep_max = delta;
1665 
1666 		se->statistics.sleep_start = 0;
1667 		se->statistics.sum_sleep_runtime += delta;
1668 
1669 		if (tsk) {
1670 			account_scheduler_latency(tsk, delta >> 10, 1);
1671 			trace_sched_stat_sleep(tsk, delta);
1672 		}
1673 	}
1674 	if (se->statistics.block_start) {
1675 		u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
1676 
1677 		if ((s64)delta < 0)
1678 			delta = 0;
1679 
1680 		if (unlikely(delta > se->statistics.block_max))
1681 			se->statistics.block_max = delta;
1682 
1683 		se->statistics.block_start = 0;
1684 		se->statistics.sum_sleep_runtime += delta;
1685 
1686 		if (tsk) {
1687 			if (tsk->in_iowait) {
1688 				se->statistics.iowait_sum += delta;
1689 				se->statistics.iowait_count++;
1690 				trace_sched_stat_iowait(tsk, delta);
1691 			}
1692 
1693 			trace_sched_stat_blocked(tsk, delta);
1694 
1695 			/*
1696 			 * Blocking time is in units of nanosecs, so shift by
1697 			 * 20 to get a milliseconds-range estimation of the
1698 			 * amount of time that the task spent sleeping:
1699 			 */
1700 			if (unlikely(prof_on == SLEEP_PROFILING)) {
1701 				profile_hits(SLEEP_PROFILING,
1702 						(void *)get_wchan(tsk),
1703 						delta >> 20);
1704 			}
1705 			account_scheduler_latency(tsk, delta >> 10, 0);
1706 		}
1707 	}
1708 #endif
1709 }
1710 
1711 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1712 {
1713 #ifdef CONFIG_SCHED_DEBUG
1714 	s64 d = se->vruntime - cfs_rq->min_vruntime;
1715 
1716 	if (d < 0)
1717 		d = -d;
1718 
1719 	if (d > 3*sysctl_sched_latency)
1720 		schedstat_inc(cfs_rq, nr_spread_over);
1721 #endif
1722 }
1723 
1724 static void
1725 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1726 {
1727 	u64 vruntime = cfs_rq->min_vruntime;
1728 
1729 	/*
1730 	 * The 'current' period is already promised to the current tasks,
1731 	 * however the extra weight of the new task will slow them down a
1732 	 * little, place the new task so that it fits in the slot that
1733 	 * stays open at the end.
1734 	 */
1735 	if (initial && sched_feat(START_DEBIT))
1736 		vruntime += sched_vslice(cfs_rq, se);
1737 
1738 	/* sleeps up to a single latency don't count. */
1739 	if (!initial) {
1740 		unsigned long thresh = sysctl_sched_latency;
1741 
1742 		/*
1743 		 * Halve their sleep time's effect, to allow
1744 		 * for a gentler effect of sleepers:
1745 		 */
1746 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
1747 			thresh >>= 1;
1748 
1749 		vruntime -= thresh;
1750 	}
1751 
1752 	/* ensure we never gain time by being placed backwards. */
1753 	se->vruntime = max_vruntime(se->vruntime, vruntime);
1754 }
1755 
1756 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1757 
1758 static void
1759 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1760 {
1761 	/*
1762 	 * Update the normalized vruntime before updating min_vruntime
1763 	 * through calling update_curr().
1764 	 */
1765 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1766 		se->vruntime += cfs_rq->min_vruntime;
1767 
1768 	/*
1769 	 * Update run-time statistics of the 'current'.
1770 	 */
1771 	update_curr(cfs_rq);
1772 	enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1773 	account_entity_enqueue(cfs_rq, se);
1774 	update_cfs_shares(cfs_rq);
1775 
1776 	if (flags & ENQUEUE_WAKEUP) {
1777 		place_entity(cfs_rq, se, 0);
1778 		enqueue_sleeper(cfs_rq, se);
1779 	}
1780 
1781 	update_stats_enqueue(cfs_rq, se);
1782 	check_spread(cfs_rq, se);
1783 	if (se != cfs_rq->curr)
1784 		__enqueue_entity(cfs_rq, se);
1785 	se->on_rq = 1;
1786 
1787 	if (cfs_rq->nr_running == 1) {
1788 		list_add_leaf_cfs_rq(cfs_rq);
1789 		check_enqueue_throttle(cfs_rq);
1790 	}
1791 }
1792 
1793 static void __clear_buddies_last(struct sched_entity *se)
1794 {
1795 	for_each_sched_entity(se) {
1796 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1797 		if (cfs_rq->last == se)
1798 			cfs_rq->last = NULL;
1799 		else
1800 			break;
1801 	}
1802 }
1803 
1804 static void __clear_buddies_next(struct sched_entity *se)
1805 {
1806 	for_each_sched_entity(se) {
1807 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1808 		if (cfs_rq->next == se)
1809 			cfs_rq->next = NULL;
1810 		else
1811 			break;
1812 	}
1813 }
1814 
1815 static void __clear_buddies_skip(struct sched_entity *se)
1816 {
1817 	for_each_sched_entity(se) {
1818 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1819 		if (cfs_rq->skip == se)
1820 			cfs_rq->skip = NULL;
1821 		else
1822 			break;
1823 	}
1824 }
1825 
1826 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1827 {
1828 	if (cfs_rq->last == se)
1829 		__clear_buddies_last(se);
1830 
1831 	if (cfs_rq->next == se)
1832 		__clear_buddies_next(se);
1833 
1834 	if (cfs_rq->skip == se)
1835 		__clear_buddies_skip(se);
1836 }
1837 
1838 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1839 
1840 static void
1841 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1842 {
1843 	/*
1844 	 * Update run-time statistics of the 'current'.
1845 	 */
1846 	update_curr(cfs_rq);
1847 	dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1848 
1849 	update_stats_dequeue(cfs_rq, se);
1850 	if (flags & DEQUEUE_SLEEP) {
1851 #ifdef CONFIG_SCHEDSTATS
1852 		if (entity_is_task(se)) {
1853 			struct task_struct *tsk = task_of(se);
1854 
1855 			if (tsk->state & TASK_INTERRUPTIBLE)
1856 				se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
1857 			if (tsk->state & TASK_UNINTERRUPTIBLE)
1858 				se->statistics.block_start = rq_clock(rq_of(cfs_rq));
1859 		}
1860 #endif
1861 	}
1862 
1863 	clear_buddies(cfs_rq, se);
1864 
1865 	if (se != cfs_rq->curr)
1866 		__dequeue_entity(cfs_rq, se);
1867 	se->on_rq = 0;
1868 	account_entity_dequeue(cfs_rq, se);
1869 
1870 	/*
1871 	 * Normalize the entity after updating the min_vruntime because the
1872 	 * update can refer to the ->curr item and we need to reflect this
1873 	 * movement in our normalized position.
1874 	 */
1875 	if (!(flags & DEQUEUE_SLEEP))
1876 		se->vruntime -= cfs_rq->min_vruntime;
1877 
1878 	/* return excess runtime on last dequeue */
1879 	return_cfs_rq_runtime(cfs_rq);
1880 
1881 	update_min_vruntime(cfs_rq);
1882 	update_cfs_shares(cfs_rq);
1883 }
1884 
1885 /*
1886  * Preempt the current task with a newly woken task if needed:
1887  */
1888 static void
1889 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1890 {
1891 	unsigned long ideal_runtime, delta_exec;
1892 	struct sched_entity *se;
1893 	s64 delta;
1894 
1895 	ideal_runtime = sched_slice(cfs_rq, curr);
1896 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1897 	if (delta_exec > ideal_runtime) {
1898 		resched_task(rq_of(cfs_rq)->curr);
1899 		/*
1900 		 * The current task ran long enough, ensure it doesn't get
1901 		 * re-elected due to buddy favours.
1902 		 */
1903 		clear_buddies(cfs_rq, curr);
1904 		return;
1905 	}
1906 
1907 	/*
1908 	 * Ensure that a task that missed wakeup preemption by a
1909 	 * narrow margin doesn't have to wait for a full slice.
1910 	 * This also mitigates buddy induced latencies under load.
1911 	 */
1912 	if (delta_exec < sysctl_sched_min_granularity)
1913 		return;
1914 
1915 	se = __pick_first_entity(cfs_rq);
1916 	delta = curr->vruntime - se->vruntime;
1917 
1918 	if (delta < 0)
1919 		return;
1920 
1921 	if (delta > ideal_runtime)
1922 		resched_task(rq_of(cfs_rq)->curr);
1923 }
1924 
1925 static void
1926 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1927 {
1928 	/* 'current' is not kept within the tree. */
1929 	if (se->on_rq) {
1930 		/*
1931 		 * Any task has to be enqueued before it get to execute on
1932 		 * a CPU. So account for the time it spent waiting on the
1933 		 * runqueue.
1934 		 */
1935 		update_stats_wait_end(cfs_rq, se);
1936 		__dequeue_entity(cfs_rq, se);
1937 	}
1938 
1939 	update_stats_curr_start(cfs_rq, se);
1940 	cfs_rq->curr = se;
1941 #ifdef CONFIG_SCHEDSTATS
1942 	/*
1943 	 * Track our maximum slice length, if the CPU's load is at
1944 	 * least twice that of our own weight (i.e. dont track it
1945 	 * when there are only lesser-weight tasks around):
1946 	 */
1947 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1948 		se->statistics.slice_max = max(se->statistics.slice_max,
1949 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
1950 	}
1951 #endif
1952 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
1953 }
1954 
1955 static int
1956 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1957 
1958 /*
1959  * Pick the next process, keeping these things in mind, in this order:
1960  * 1) keep things fair between processes/task groups
1961  * 2) pick the "next" process, since someone really wants that to run
1962  * 3) pick the "last" process, for cache locality
1963  * 4) do not run the "skip" process, if something else is available
1964  */
1965 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1966 {
1967 	struct sched_entity *se = __pick_first_entity(cfs_rq);
1968 	struct sched_entity *left = se;
1969 
1970 	/*
1971 	 * Avoid running the skip buddy, if running something else can
1972 	 * be done without getting too unfair.
1973 	 */
1974 	if (cfs_rq->skip == se) {
1975 		struct sched_entity *second = __pick_next_entity(se);
1976 		if (second && wakeup_preempt_entity(second, left) < 1)
1977 			se = second;
1978 	}
1979 
1980 	/*
1981 	 * Prefer last buddy, try to return the CPU to a preempted task.
1982 	 */
1983 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1984 		se = cfs_rq->last;
1985 
1986 	/*
1987 	 * Someone really wants this to run. If it's not unfair, run it.
1988 	 */
1989 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1990 		se = cfs_rq->next;
1991 
1992 	clear_buddies(cfs_rq, se);
1993 
1994 	return se;
1995 }
1996 
1997 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1998 
1999 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2000 {
2001 	/*
2002 	 * If still on the runqueue then deactivate_task()
2003 	 * was not called and update_curr() has to be done:
2004 	 */
2005 	if (prev->on_rq)
2006 		update_curr(cfs_rq);
2007 
2008 	/* throttle cfs_rqs exceeding runtime */
2009 	check_cfs_rq_runtime(cfs_rq);
2010 
2011 	check_spread(cfs_rq, prev);
2012 	if (prev->on_rq) {
2013 		update_stats_wait_start(cfs_rq, prev);
2014 		/* Put 'current' back into the tree. */
2015 		__enqueue_entity(cfs_rq, prev);
2016 		/* in !on_rq case, update occurred at dequeue */
2017 		update_entity_load_avg(prev, 1);
2018 	}
2019 	cfs_rq->curr = NULL;
2020 }
2021 
2022 static void
2023 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2024 {
2025 	/*
2026 	 * Update run-time statistics of the 'current'.
2027 	 */
2028 	update_curr(cfs_rq);
2029 
2030 	/*
2031 	 * Ensure that runnable average is periodically updated.
2032 	 */
2033 	update_entity_load_avg(curr, 1);
2034 	update_cfs_rq_blocked_load(cfs_rq, 1);
2035 	update_cfs_shares(cfs_rq);
2036 
2037 #ifdef CONFIG_SCHED_HRTICK
2038 	/*
2039 	 * queued ticks are scheduled to match the slice, so don't bother
2040 	 * validating it and just reschedule.
2041 	 */
2042 	if (queued) {
2043 		resched_task(rq_of(cfs_rq)->curr);
2044 		return;
2045 	}
2046 	/*
2047 	 * don't let the period tick interfere with the hrtick preemption
2048 	 */
2049 	if (!sched_feat(DOUBLE_TICK) &&
2050 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2051 		return;
2052 #endif
2053 
2054 	if (cfs_rq->nr_running > 1)
2055 		check_preempt_tick(cfs_rq, curr);
2056 }
2057 
2058 
2059 /**************************************************
2060  * CFS bandwidth control machinery
2061  */
2062 
2063 #ifdef CONFIG_CFS_BANDWIDTH
2064 
2065 #ifdef HAVE_JUMP_LABEL
2066 static struct static_key __cfs_bandwidth_used;
2067 
2068 static inline bool cfs_bandwidth_used(void)
2069 {
2070 	return static_key_false(&__cfs_bandwidth_used);
2071 }
2072 
2073 void account_cfs_bandwidth_used(int enabled, int was_enabled)
2074 {
2075 	/* only need to count groups transitioning between enabled/!enabled */
2076 	if (enabled && !was_enabled)
2077 		static_key_slow_inc(&__cfs_bandwidth_used);
2078 	else if (!enabled && was_enabled)
2079 		static_key_slow_dec(&__cfs_bandwidth_used);
2080 }
2081 #else /* HAVE_JUMP_LABEL */
2082 static bool cfs_bandwidth_used(void)
2083 {
2084 	return true;
2085 }
2086 
2087 void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2088 #endif /* HAVE_JUMP_LABEL */
2089 
2090 /*
2091  * default period for cfs group bandwidth.
2092  * default: 0.1s, units: nanoseconds
2093  */
2094 static inline u64 default_cfs_period(void)
2095 {
2096 	return 100000000ULL;
2097 }
2098 
2099 static inline u64 sched_cfs_bandwidth_slice(void)
2100 {
2101 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2102 }
2103 
2104 /*
2105  * Replenish runtime according to assigned quota and update expiration time.
2106  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2107  * additional synchronization around rq->lock.
2108  *
2109  * requires cfs_b->lock
2110  */
2111 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
2112 {
2113 	u64 now;
2114 
2115 	if (cfs_b->quota == RUNTIME_INF)
2116 		return;
2117 
2118 	now = sched_clock_cpu(smp_processor_id());
2119 	cfs_b->runtime = cfs_b->quota;
2120 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2121 }
2122 
2123 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2124 {
2125 	return &tg->cfs_bandwidth;
2126 }
2127 
2128 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2129 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2130 {
2131 	if (unlikely(cfs_rq->throttle_count))
2132 		return cfs_rq->throttled_clock_task;
2133 
2134 	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
2135 }
2136 
2137 /* returns 0 on failure to allocate runtime */
2138 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2139 {
2140 	struct task_group *tg = cfs_rq->tg;
2141 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
2142 	u64 amount = 0, min_amount, expires;
2143 
2144 	/* note: this is a positive sum as runtime_remaining <= 0 */
2145 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2146 
2147 	raw_spin_lock(&cfs_b->lock);
2148 	if (cfs_b->quota == RUNTIME_INF)
2149 		amount = min_amount;
2150 	else {
2151 		/*
2152 		 * If the bandwidth pool has become inactive, then at least one
2153 		 * period must have elapsed since the last consumption.
2154 		 * Refresh the global state and ensure bandwidth timer becomes
2155 		 * active.
2156 		 */
2157 		if (!cfs_b->timer_active) {
2158 			__refill_cfs_bandwidth_runtime(cfs_b);
2159 			__start_cfs_bandwidth(cfs_b);
2160 		}
2161 
2162 		if (cfs_b->runtime > 0) {
2163 			amount = min(cfs_b->runtime, min_amount);
2164 			cfs_b->runtime -= amount;
2165 			cfs_b->idle = 0;
2166 		}
2167 	}
2168 	expires = cfs_b->runtime_expires;
2169 	raw_spin_unlock(&cfs_b->lock);
2170 
2171 	cfs_rq->runtime_remaining += amount;
2172 	/*
2173 	 * we may have advanced our local expiration to account for allowed
2174 	 * spread between our sched_clock and the one on which runtime was
2175 	 * issued.
2176 	 */
2177 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2178 		cfs_rq->runtime_expires = expires;
2179 
2180 	return cfs_rq->runtime_remaining > 0;
2181 }
2182 
2183 /*
2184  * Note: This depends on the synchronization provided by sched_clock and the
2185  * fact that rq->clock snapshots this value.
2186  */
2187 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2188 {
2189 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2190 
2191 	/* if the deadline is ahead of our clock, nothing to do */
2192 	if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
2193 		return;
2194 
2195 	if (cfs_rq->runtime_remaining < 0)
2196 		return;
2197 
2198 	/*
2199 	 * If the local deadline has passed we have to consider the
2200 	 * possibility that our sched_clock is 'fast' and the global deadline
2201 	 * has not truly expired.
2202 	 *
2203 	 * Fortunately we can check determine whether this the case by checking
2204 	 * whether the global deadline has advanced.
2205 	 */
2206 
2207 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2208 		/* extend local deadline, drift is bounded above by 2 ticks */
2209 		cfs_rq->runtime_expires += TICK_NSEC;
2210 	} else {
2211 		/* global deadline is ahead, expiration has passed */
2212 		cfs_rq->runtime_remaining = 0;
2213 	}
2214 }
2215 
2216 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2217 				     unsigned long delta_exec)
2218 {
2219 	/* dock delta_exec before expiring quota (as it could span periods) */
2220 	cfs_rq->runtime_remaining -= delta_exec;
2221 	expire_cfs_rq_runtime(cfs_rq);
2222 
2223 	if (likely(cfs_rq->runtime_remaining > 0))
2224 		return;
2225 
2226 	/*
2227 	 * if we're unable to extend our runtime we resched so that the active
2228 	 * hierarchy can be throttled
2229 	 */
2230 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2231 		resched_task(rq_of(cfs_rq)->curr);
2232 }
2233 
2234 static __always_inline
2235 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
2236 {
2237 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
2238 		return;
2239 
2240 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
2241 }
2242 
2243 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2244 {
2245 	return cfs_bandwidth_used() && cfs_rq->throttled;
2246 }
2247 
2248 /* check whether cfs_rq, or any parent, is throttled */
2249 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2250 {
2251 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
2252 }
2253 
2254 /*
2255  * Ensure that neither of the group entities corresponding to src_cpu or
2256  * dest_cpu are members of a throttled hierarchy when performing group
2257  * load-balance operations.
2258  */
2259 static inline int throttled_lb_pair(struct task_group *tg,
2260 				    int src_cpu, int dest_cpu)
2261 {
2262 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2263 
2264 	src_cfs_rq = tg->cfs_rq[src_cpu];
2265 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
2266 
2267 	return throttled_hierarchy(src_cfs_rq) ||
2268 	       throttled_hierarchy(dest_cfs_rq);
2269 }
2270 
2271 /* updated child weight may affect parent so we have to do this bottom up */
2272 static int tg_unthrottle_up(struct task_group *tg, void *data)
2273 {
2274 	struct rq *rq = data;
2275 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2276 
2277 	cfs_rq->throttle_count--;
2278 #ifdef CONFIG_SMP
2279 	if (!cfs_rq->throttle_count) {
2280 		/* adjust cfs_rq_clock_task() */
2281 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
2282 					     cfs_rq->throttled_clock_task;
2283 	}
2284 #endif
2285 
2286 	return 0;
2287 }
2288 
2289 static int tg_throttle_down(struct task_group *tg, void *data)
2290 {
2291 	struct rq *rq = data;
2292 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2293 
2294 	/* group is entering throttled state, stop time */
2295 	if (!cfs_rq->throttle_count)
2296 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
2297 	cfs_rq->throttle_count++;
2298 
2299 	return 0;
2300 }
2301 
2302 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
2303 {
2304 	struct rq *rq = rq_of(cfs_rq);
2305 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2306 	struct sched_entity *se;
2307 	long task_delta, dequeue = 1;
2308 
2309 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2310 
2311 	/* freeze hierarchy runnable averages while throttled */
2312 	rcu_read_lock();
2313 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2314 	rcu_read_unlock();
2315 
2316 	task_delta = cfs_rq->h_nr_running;
2317 	for_each_sched_entity(se) {
2318 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2319 		/* throttled entity or throttle-on-deactivate */
2320 		if (!se->on_rq)
2321 			break;
2322 
2323 		if (dequeue)
2324 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2325 		qcfs_rq->h_nr_running -= task_delta;
2326 
2327 		if (qcfs_rq->load.weight)
2328 			dequeue = 0;
2329 	}
2330 
2331 	if (!se)
2332 		rq->nr_running -= task_delta;
2333 
2334 	cfs_rq->throttled = 1;
2335 	cfs_rq->throttled_clock = rq_clock(rq);
2336 	raw_spin_lock(&cfs_b->lock);
2337 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2338 	raw_spin_unlock(&cfs_b->lock);
2339 }
2340 
2341 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2342 {
2343 	struct rq *rq = rq_of(cfs_rq);
2344 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2345 	struct sched_entity *se;
2346 	int enqueue = 1;
2347 	long task_delta;
2348 
2349 	se = cfs_rq->tg->se[cpu_of(rq)];
2350 
2351 	cfs_rq->throttled = 0;
2352 
2353 	update_rq_clock(rq);
2354 
2355 	raw_spin_lock(&cfs_b->lock);
2356 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
2357 	list_del_rcu(&cfs_rq->throttled_list);
2358 	raw_spin_unlock(&cfs_b->lock);
2359 
2360 	/* update hierarchical throttle state */
2361 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2362 
2363 	if (!cfs_rq->load.weight)
2364 		return;
2365 
2366 	task_delta = cfs_rq->h_nr_running;
2367 	for_each_sched_entity(se) {
2368 		if (se->on_rq)
2369 			enqueue = 0;
2370 
2371 		cfs_rq = cfs_rq_of(se);
2372 		if (enqueue)
2373 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2374 		cfs_rq->h_nr_running += task_delta;
2375 
2376 		if (cfs_rq_throttled(cfs_rq))
2377 			break;
2378 	}
2379 
2380 	if (!se)
2381 		rq->nr_running += task_delta;
2382 
2383 	/* determine whether we need to wake up potentially idle cpu */
2384 	if (rq->curr == rq->idle && rq->cfs.nr_running)
2385 		resched_task(rq->curr);
2386 }
2387 
2388 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2389 		u64 remaining, u64 expires)
2390 {
2391 	struct cfs_rq *cfs_rq;
2392 	u64 runtime = remaining;
2393 
2394 	rcu_read_lock();
2395 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2396 				throttled_list) {
2397 		struct rq *rq = rq_of(cfs_rq);
2398 
2399 		raw_spin_lock(&rq->lock);
2400 		if (!cfs_rq_throttled(cfs_rq))
2401 			goto next;
2402 
2403 		runtime = -cfs_rq->runtime_remaining + 1;
2404 		if (runtime > remaining)
2405 			runtime = remaining;
2406 		remaining -= runtime;
2407 
2408 		cfs_rq->runtime_remaining += runtime;
2409 		cfs_rq->runtime_expires = expires;
2410 
2411 		/* we check whether we're throttled above */
2412 		if (cfs_rq->runtime_remaining > 0)
2413 			unthrottle_cfs_rq(cfs_rq);
2414 
2415 next:
2416 		raw_spin_unlock(&rq->lock);
2417 
2418 		if (!remaining)
2419 			break;
2420 	}
2421 	rcu_read_unlock();
2422 
2423 	return remaining;
2424 }
2425 
2426 /*
2427  * Responsible for refilling a task_group's bandwidth and unthrottling its
2428  * cfs_rqs as appropriate. If there has been no activity within the last
2429  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2430  * used to track this state.
2431  */
2432 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2433 {
2434 	u64 runtime, runtime_expires;
2435 	int idle = 1, throttled;
2436 
2437 	raw_spin_lock(&cfs_b->lock);
2438 	/* no need to continue the timer with no bandwidth constraint */
2439 	if (cfs_b->quota == RUNTIME_INF)
2440 		goto out_unlock;
2441 
2442 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2443 	/* idle depends on !throttled (for the case of a large deficit) */
2444 	idle = cfs_b->idle && !throttled;
2445 	cfs_b->nr_periods += overrun;
2446 
2447 	/* if we're going inactive then everything else can be deferred */
2448 	if (idle)
2449 		goto out_unlock;
2450 
2451 	__refill_cfs_bandwidth_runtime(cfs_b);
2452 
2453 	if (!throttled) {
2454 		/* mark as potentially idle for the upcoming period */
2455 		cfs_b->idle = 1;
2456 		goto out_unlock;
2457 	}
2458 
2459 	/* account preceding periods in which throttling occurred */
2460 	cfs_b->nr_throttled += overrun;
2461 
2462 	/*
2463 	 * There are throttled entities so we must first use the new bandwidth
2464 	 * to unthrottle them before making it generally available.  This
2465 	 * ensures that all existing debts will be paid before a new cfs_rq is
2466 	 * allowed to run.
2467 	 */
2468 	runtime = cfs_b->runtime;
2469 	runtime_expires = cfs_b->runtime_expires;
2470 	cfs_b->runtime = 0;
2471 
2472 	/*
2473 	 * This check is repeated as we are holding onto the new bandwidth
2474 	 * while we unthrottle.  This can potentially race with an unthrottled
2475 	 * group trying to acquire new bandwidth from the global pool.
2476 	 */
2477 	while (throttled && runtime > 0) {
2478 		raw_spin_unlock(&cfs_b->lock);
2479 		/* we can't nest cfs_b->lock while distributing bandwidth */
2480 		runtime = distribute_cfs_runtime(cfs_b, runtime,
2481 						 runtime_expires);
2482 		raw_spin_lock(&cfs_b->lock);
2483 
2484 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2485 	}
2486 
2487 	/* return (any) remaining runtime */
2488 	cfs_b->runtime = runtime;
2489 	/*
2490 	 * While we are ensured activity in the period following an
2491 	 * unthrottle, this also covers the case in which the new bandwidth is
2492 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
2493 	 * timer to remain active while there are any throttled entities.)
2494 	 */
2495 	cfs_b->idle = 0;
2496 out_unlock:
2497 	if (idle)
2498 		cfs_b->timer_active = 0;
2499 	raw_spin_unlock(&cfs_b->lock);
2500 
2501 	return idle;
2502 }
2503 
2504 /* a cfs_rq won't donate quota below this amount */
2505 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2506 /* minimum remaining period time to redistribute slack quota */
2507 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2508 /* how long we wait to gather additional slack before distributing */
2509 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2510 
2511 /* are we near the end of the current quota period? */
2512 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2513 {
2514 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
2515 	u64 remaining;
2516 
2517 	/* if the call-back is running a quota refresh is already occurring */
2518 	if (hrtimer_callback_running(refresh_timer))
2519 		return 1;
2520 
2521 	/* is a quota refresh about to occur? */
2522 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2523 	if (remaining < min_expire)
2524 		return 1;
2525 
2526 	return 0;
2527 }
2528 
2529 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2530 {
2531 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2532 
2533 	/* if there's a quota refresh soon don't bother with slack */
2534 	if (runtime_refresh_within(cfs_b, min_left))
2535 		return;
2536 
2537 	start_bandwidth_timer(&cfs_b->slack_timer,
2538 				ns_to_ktime(cfs_bandwidth_slack_period));
2539 }
2540 
2541 /* we know any runtime found here is valid as update_curr() precedes return */
2542 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2543 {
2544 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2545 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2546 
2547 	if (slack_runtime <= 0)
2548 		return;
2549 
2550 	raw_spin_lock(&cfs_b->lock);
2551 	if (cfs_b->quota != RUNTIME_INF &&
2552 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2553 		cfs_b->runtime += slack_runtime;
2554 
2555 		/* we are under rq->lock, defer unthrottling using a timer */
2556 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2557 		    !list_empty(&cfs_b->throttled_cfs_rq))
2558 			start_cfs_slack_bandwidth(cfs_b);
2559 	}
2560 	raw_spin_unlock(&cfs_b->lock);
2561 
2562 	/* even if it's not valid for return we don't want to try again */
2563 	cfs_rq->runtime_remaining -= slack_runtime;
2564 }
2565 
2566 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2567 {
2568 	if (!cfs_bandwidth_used())
2569 		return;
2570 
2571 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
2572 		return;
2573 
2574 	__return_cfs_rq_runtime(cfs_rq);
2575 }
2576 
2577 /*
2578  * This is done with a timer (instead of inline with bandwidth return) since
2579  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2580  */
2581 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2582 {
2583 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2584 	u64 expires;
2585 
2586 	/* confirm we're still not at a refresh boundary */
2587 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2588 		return;
2589 
2590 	raw_spin_lock(&cfs_b->lock);
2591 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2592 		runtime = cfs_b->runtime;
2593 		cfs_b->runtime = 0;
2594 	}
2595 	expires = cfs_b->runtime_expires;
2596 	raw_spin_unlock(&cfs_b->lock);
2597 
2598 	if (!runtime)
2599 		return;
2600 
2601 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2602 
2603 	raw_spin_lock(&cfs_b->lock);
2604 	if (expires == cfs_b->runtime_expires)
2605 		cfs_b->runtime = runtime;
2606 	raw_spin_unlock(&cfs_b->lock);
2607 }
2608 
2609 /*
2610  * When a group wakes up we want to make sure that its quota is not already
2611  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2612  * runtime as update_curr() throttling can not not trigger until it's on-rq.
2613  */
2614 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2615 {
2616 	if (!cfs_bandwidth_used())
2617 		return;
2618 
2619 	/* an active group must be handled by the update_curr()->put() path */
2620 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2621 		return;
2622 
2623 	/* ensure the group is not already throttled */
2624 	if (cfs_rq_throttled(cfs_rq))
2625 		return;
2626 
2627 	/* update runtime allocation */
2628 	account_cfs_rq_runtime(cfs_rq, 0);
2629 	if (cfs_rq->runtime_remaining <= 0)
2630 		throttle_cfs_rq(cfs_rq);
2631 }
2632 
2633 /* conditionally throttle active cfs_rq's from put_prev_entity() */
2634 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2635 {
2636 	if (!cfs_bandwidth_used())
2637 		return;
2638 
2639 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2640 		return;
2641 
2642 	/*
2643 	 * it's possible for a throttled entity to be forced into a running
2644 	 * state (e.g. set_curr_task), in this case we're finished.
2645 	 */
2646 	if (cfs_rq_throttled(cfs_rq))
2647 		return;
2648 
2649 	throttle_cfs_rq(cfs_rq);
2650 }
2651 
2652 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2653 {
2654 	struct cfs_bandwidth *cfs_b =
2655 		container_of(timer, struct cfs_bandwidth, slack_timer);
2656 	do_sched_cfs_slack_timer(cfs_b);
2657 
2658 	return HRTIMER_NORESTART;
2659 }
2660 
2661 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2662 {
2663 	struct cfs_bandwidth *cfs_b =
2664 		container_of(timer, struct cfs_bandwidth, period_timer);
2665 	ktime_t now;
2666 	int overrun;
2667 	int idle = 0;
2668 
2669 	for (;;) {
2670 		now = hrtimer_cb_get_time(timer);
2671 		overrun = hrtimer_forward(timer, now, cfs_b->period);
2672 
2673 		if (!overrun)
2674 			break;
2675 
2676 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
2677 	}
2678 
2679 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2680 }
2681 
2682 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2683 {
2684 	raw_spin_lock_init(&cfs_b->lock);
2685 	cfs_b->runtime = 0;
2686 	cfs_b->quota = RUNTIME_INF;
2687 	cfs_b->period = ns_to_ktime(default_cfs_period());
2688 
2689 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2690 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2691 	cfs_b->period_timer.function = sched_cfs_period_timer;
2692 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2693 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
2694 }
2695 
2696 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2697 {
2698 	cfs_rq->runtime_enabled = 0;
2699 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
2700 }
2701 
2702 /* requires cfs_b->lock, may release to reprogram timer */
2703 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2704 {
2705 	/*
2706 	 * The timer may be active because we're trying to set a new bandwidth
2707 	 * period or because we're racing with the tear-down path
2708 	 * (timer_active==0 becomes visible before the hrtimer call-back
2709 	 * terminates).  In either case we ensure that it's re-programmed
2710 	 */
2711 	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2712 		raw_spin_unlock(&cfs_b->lock);
2713 		/* ensure cfs_b->lock is available while we wait */
2714 		hrtimer_cancel(&cfs_b->period_timer);
2715 
2716 		raw_spin_lock(&cfs_b->lock);
2717 		/* if someone else restarted the timer then we're done */
2718 		if (cfs_b->timer_active)
2719 			return;
2720 	}
2721 
2722 	cfs_b->timer_active = 1;
2723 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2724 }
2725 
2726 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2727 {
2728 	hrtimer_cancel(&cfs_b->period_timer);
2729 	hrtimer_cancel(&cfs_b->slack_timer);
2730 }
2731 
2732 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2733 {
2734 	struct cfs_rq *cfs_rq;
2735 
2736 	for_each_leaf_cfs_rq(rq, cfs_rq) {
2737 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2738 
2739 		if (!cfs_rq->runtime_enabled)
2740 			continue;
2741 
2742 		/*
2743 		 * clock_task is not advancing so we just need to make sure
2744 		 * there's some valid quota amount
2745 		 */
2746 		cfs_rq->runtime_remaining = cfs_b->quota;
2747 		if (cfs_rq_throttled(cfs_rq))
2748 			unthrottle_cfs_rq(cfs_rq);
2749 	}
2750 }
2751 
2752 #else /* CONFIG_CFS_BANDWIDTH */
2753 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2754 {
2755 	return rq_clock_task(rq_of(cfs_rq));
2756 }
2757 
2758 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2759 				     unsigned long delta_exec) {}
2760 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2761 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2762 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2763 
2764 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2765 {
2766 	return 0;
2767 }
2768 
2769 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2770 {
2771 	return 0;
2772 }
2773 
2774 static inline int throttled_lb_pair(struct task_group *tg,
2775 				    int src_cpu, int dest_cpu)
2776 {
2777 	return 0;
2778 }
2779 
2780 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2781 
2782 #ifdef CONFIG_FAIR_GROUP_SCHED
2783 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2784 #endif
2785 
2786 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2787 {
2788 	return NULL;
2789 }
2790 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2791 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2792 
2793 #endif /* CONFIG_CFS_BANDWIDTH */
2794 
2795 /**************************************************
2796  * CFS operations on tasks:
2797  */
2798 
2799 #ifdef CONFIG_SCHED_HRTICK
2800 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2801 {
2802 	struct sched_entity *se = &p->se;
2803 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2804 
2805 	WARN_ON(task_rq(p) != rq);
2806 
2807 	if (cfs_rq->nr_running > 1) {
2808 		u64 slice = sched_slice(cfs_rq, se);
2809 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2810 		s64 delta = slice - ran;
2811 
2812 		if (delta < 0) {
2813 			if (rq->curr == p)
2814 				resched_task(p);
2815 			return;
2816 		}
2817 
2818 		/*
2819 		 * Don't schedule slices shorter than 10000ns, that just
2820 		 * doesn't make sense. Rely on vruntime for fairness.
2821 		 */
2822 		if (rq->curr != p)
2823 			delta = max_t(s64, 10000LL, delta);
2824 
2825 		hrtick_start(rq, delta);
2826 	}
2827 }
2828 
2829 /*
2830  * called from enqueue/dequeue and updates the hrtick when the
2831  * current task is from our class and nr_running is low enough
2832  * to matter.
2833  */
2834 static void hrtick_update(struct rq *rq)
2835 {
2836 	struct task_struct *curr = rq->curr;
2837 
2838 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2839 		return;
2840 
2841 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2842 		hrtick_start_fair(rq, curr);
2843 }
2844 #else /* !CONFIG_SCHED_HRTICK */
2845 static inline void
2846 hrtick_start_fair(struct rq *rq, struct task_struct *p)
2847 {
2848 }
2849 
2850 static inline void hrtick_update(struct rq *rq)
2851 {
2852 }
2853 #endif
2854 
2855 /*
2856  * The enqueue_task method is called before nr_running is
2857  * increased. Here we update the fair scheduling stats and
2858  * then put the task into the rbtree:
2859  */
2860 static void
2861 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2862 {
2863 	struct cfs_rq *cfs_rq;
2864 	struct sched_entity *se = &p->se;
2865 
2866 	for_each_sched_entity(se) {
2867 		if (se->on_rq)
2868 			break;
2869 		cfs_rq = cfs_rq_of(se);
2870 		enqueue_entity(cfs_rq, se, flags);
2871 
2872 		/*
2873 		 * end evaluation on encountering a throttled cfs_rq
2874 		 *
2875 		 * note: in the case of encountering a throttled cfs_rq we will
2876 		 * post the final h_nr_running increment below.
2877 		*/
2878 		if (cfs_rq_throttled(cfs_rq))
2879 			break;
2880 		cfs_rq->h_nr_running++;
2881 
2882 		flags = ENQUEUE_WAKEUP;
2883 	}
2884 
2885 	for_each_sched_entity(se) {
2886 		cfs_rq = cfs_rq_of(se);
2887 		cfs_rq->h_nr_running++;
2888 
2889 		if (cfs_rq_throttled(cfs_rq))
2890 			break;
2891 
2892 		update_cfs_shares(cfs_rq);
2893 		update_entity_load_avg(se, 1);
2894 	}
2895 
2896 	if (!se) {
2897 		update_rq_runnable_avg(rq, rq->nr_running);
2898 		inc_nr_running(rq);
2899 	}
2900 	hrtick_update(rq);
2901 }
2902 
2903 static void set_next_buddy(struct sched_entity *se);
2904 
2905 /*
2906  * The dequeue_task method is called before nr_running is
2907  * decreased. We remove the task from the rbtree and
2908  * update the fair scheduling stats:
2909  */
2910 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2911 {
2912 	struct cfs_rq *cfs_rq;
2913 	struct sched_entity *se = &p->se;
2914 	int task_sleep = flags & DEQUEUE_SLEEP;
2915 
2916 	for_each_sched_entity(se) {
2917 		cfs_rq = cfs_rq_of(se);
2918 		dequeue_entity(cfs_rq, se, flags);
2919 
2920 		/*
2921 		 * end evaluation on encountering a throttled cfs_rq
2922 		 *
2923 		 * note: in the case of encountering a throttled cfs_rq we will
2924 		 * post the final h_nr_running decrement below.
2925 		*/
2926 		if (cfs_rq_throttled(cfs_rq))
2927 			break;
2928 		cfs_rq->h_nr_running--;
2929 
2930 		/* Don't dequeue parent if it has other entities besides us */
2931 		if (cfs_rq->load.weight) {
2932 			/*
2933 			 * Bias pick_next to pick a task from this cfs_rq, as
2934 			 * p is sleeping when it is within its sched_slice.
2935 			 */
2936 			if (task_sleep && parent_entity(se))
2937 				set_next_buddy(parent_entity(se));
2938 
2939 			/* avoid re-evaluating load for this entity */
2940 			se = parent_entity(se);
2941 			break;
2942 		}
2943 		flags |= DEQUEUE_SLEEP;
2944 	}
2945 
2946 	for_each_sched_entity(se) {
2947 		cfs_rq = cfs_rq_of(se);
2948 		cfs_rq->h_nr_running--;
2949 
2950 		if (cfs_rq_throttled(cfs_rq))
2951 			break;
2952 
2953 		update_cfs_shares(cfs_rq);
2954 		update_entity_load_avg(se, 1);
2955 	}
2956 
2957 	if (!se) {
2958 		dec_nr_running(rq);
2959 		update_rq_runnable_avg(rq, 1);
2960 	}
2961 	hrtick_update(rq);
2962 }
2963 
2964 #ifdef CONFIG_SMP
2965 /* Used instead of source_load when we know the type == 0 */
2966 static unsigned long weighted_cpuload(const int cpu)
2967 {
2968 	return cpu_rq(cpu)->cfs.runnable_load_avg;
2969 }
2970 
2971 /*
2972  * Return a low guess at the load of a migration-source cpu weighted
2973  * according to the scheduling class and "nice" value.
2974  *
2975  * We want to under-estimate the load of migration sources, to
2976  * balance conservatively.
2977  */
2978 static unsigned long source_load(int cpu, int type)
2979 {
2980 	struct rq *rq = cpu_rq(cpu);
2981 	unsigned long total = weighted_cpuload(cpu);
2982 
2983 	if (type == 0 || !sched_feat(LB_BIAS))
2984 		return total;
2985 
2986 	return min(rq->cpu_load[type-1], total);
2987 }
2988 
2989 /*
2990  * Return a high guess at the load of a migration-target cpu weighted
2991  * according to the scheduling class and "nice" value.
2992  */
2993 static unsigned long target_load(int cpu, int type)
2994 {
2995 	struct rq *rq = cpu_rq(cpu);
2996 	unsigned long total = weighted_cpuload(cpu);
2997 
2998 	if (type == 0 || !sched_feat(LB_BIAS))
2999 		return total;
3000 
3001 	return max(rq->cpu_load[type-1], total);
3002 }
3003 
3004 static unsigned long power_of(int cpu)
3005 {
3006 	return cpu_rq(cpu)->cpu_power;
3007 }
3008 
3009 static unsigned long cpu_avg_load_per_task(int cpu)
3010 {
3011 	struct rq *rq = cpu_rq(cpu);
3012 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
3013 	unsigned long load_avg = rq->cfs.runnable_load_avg;
3014 
3015 	if (nr_running)
3016 		return load_avg / nr_running;
3017 
3018 	return 0;
3019 }
3020 
3021 
3022 static void task_waking_fair(struct task_struct *p)
3023 {
3024 	struct sched_entity *se = &p->se;
3025 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3026 	u64 min_vruntime;
3027 
3028 #ifndef CONFIG_64BIT
3029 	u64 min_vruntime_copy;
3030 
3031 	do {
3032 		min_vruntime_copy = cfs_rq->min_vruntime_copy;
3033 		smp_rmb();
3034 		min_vruntime = cfs_rq->min_vruntime;
3035 	} while (min_vruntime != min_vruntime_copy);
3036 #else
3037 	min_vruntime = cfs_rq->min_vruntime;
3038 #endif
3039 
3040 	se->vruntime -= min_vruntime;
3041 }
3042 
3043 #ifdef CONFIG_FAIR_GROUP_SCHED
3044 /*
3045  * effective_load() calculates the load change as seen from the root_task_group
3046  *
3047  * Adding load to a group doesn't make a group heavier, but can cause movement
3048  * of group shares between cpus. Assuming the shares were perfectly aligned one
3049  * can calculate the shift in shares.
3050  *
3051  * Calculate the effective load difference if @wl is added (subtracted) to @tg
3052  * on this @cpu and results in a total addition (subtraction) of @wg to the
3053  * total group weight.
3054  *
3055  * Given a runqueue weight distribution (rw_i) we can compute a shares
3056  * distribution (s_i) using:
3057  *
3058  *   s_i = rw_i / \Sum rw_j						(1)
3059  *
3060  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3061  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3062  * shares distribution (s_i):
3063  *
3064  *   rw_i = {   2,   4,   1,   0 }
3065  *   s_i  = { 2/7, 4/7, 1/7,   0 }
3066  *
3067  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3068  * task used to run on and the CPU the waker is running on), we need to
3069  * compute the effect of waking a task on either CPU and, in case of a sync
3070  * wakeup, compute the effect of the current task going to sleep.
3071  *
3072  * So for a change of @wl to the local @cpu with an overall group weight change
3073  * of @wl we can compute the new shares distribution (s'_i) using:
3074  *
3075  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
3076  *
3077  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3078  * differences in waking a task to CPU 0. The additional task changes the
3079  * weight and shares distributions like:
3080  *
3081  *   rw'_i = {   3,   4,   1,   0 }
3082  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
3083  *
3084  * We can then compute the difference in effective weight by using:
3085  *
3086  *   dw_i = S * (s'_i - s_i)						(3)
3087  *
3088  * Where 'S' is the group weight as seen by its parent.
3089  *
3090  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3091  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3092  * 4/7) times the weight of the group.
3093  */
3094 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3095 {
3096 	struct sched_entity *se = tg->se[cpu];
3097 
3098 	if (!tg->parent)	/* the trivial, non-cgroup case */
3099 		return wl;
3100 
3101 	for_each_sched_entity(se) {
3102 		long w, W;
3103 
3104 		tg = se->my_q->tg;
3105 
3106 		/*
3107 		 * W = @wg + \Sum rw_j
3108 		 */
3109 		W = wg + calc_tg_weight(tg, se->my_q);
3110 
3111 		/*
3112 		 * w = rw_i + @wl
3113 		 */
3114 		w = se->my_q->load.weight + wl;
3115 
3116 		/*
3117 		 * wl = S * s'_i; see (2)
3118 		 */
3119 		if (W > 0 && w < W)
3120 			wl = (w * tg->shares) / W;
3121 		else
3122 			wl = tg->shares;
3123 
3124 		/*
3125 		 * Per the above, wl is the new se->load.weight value; since
3126 		 * those are clipped to [MIN_SHARES, ...) do so now. See
3127 		 * calc_cfs_shares().
3128 		 */
3129 		if (wl < MIN_SHARES)
3130 			wl = MIN_SHARES;
3131 
3132 		/*
3133 		 * wl = dw_i = S * (s'_i - s_i); see (3)
3134 		 */
3135 		wl -= se->load.weight;
3136 
3137 		/*
3138 		 * Recursively apply this logic to all parent groups to compute
3139 		 * the final effective load change on the root group. Since
3140 		 * only the @tg group gets extra weight, all parent groups can
3141 		 * only redistribute existing shares. @wl is the shift in shares
3142 		 * resulting from this level per the above.
3143 		 */
3144 		wg = 0;
3145 	}
3146 
3147 	return wl;
3148 }
3149 #else
3150 
3151 static inline unsigned long effective_load(struct task_group *tg, int cpu,
3152 		unsigned long wl, unsigned long wg)
3153 {
3154 	return wl;
3155 }
3156 
3157 #endif
3158 
3159 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3160 {
3161 	s64 this_load, load;
3162 	int idx, this_cpu, prev_cpu;
3163 	unsigned long tl_per_task;
3164 	struct task_group *tg;
3165 	unsigned long weight;
3166 	int balanced;
3167 
3168 	idx	  = sd->wake_idx;
3169 	this_cpu  = smp_processor_id();
3170 	prev_cpu  = task_cpu(p);
3171 	load	  = source_load(prev_cpu, idx);
3172 	this_load = target_load(this_cpu, idx);
3173 
3174 	/*
3175 	 * If sync wakeup then subtract the (maximum possible)
3176 	 * effect of the currently running task from the load
3177 	 * of the current CPU:
3178 	 */
3179 	if (sync) {
3180 		tg = task_group(current);
3181 		weight = current->se.load.weight;
3182 
3183 		this_load += effective_load(tg, this_cpu, -weight, -weight);
3184 		load += effective_load(tg, prev_cpu, 0, -weight);
3185 	}
3186 
3187 	tg = task_group(p);
3188 	weight = p->se.load.weight;
3189 
3190 	/*
3191 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
3192 	 * due to the sync cause above having dropped this_load to 0, we'll
3193 	 * always have an imbalance, but there's really nothing you can do
3194 	 * about that, so that's good too.
3195 	 *
3196 	 * Otherwise check if either cpus are near enough in load to allow this
3197 	 * task to be woken on this_cpu.
3198 	 */
3199 	if (this_load > 0) {
3200 		s64 this_eff_load, prev_eff_load;
3201 
3202 		this_eff_load = 100;
3203 		this_eff_load *= power_of(prev_cpu);
3204 		this_eff_load *= this_load +
3205 			effective_load(tg, this_cpu, weight, weight);
3206 
3207 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3208 		prev_eff_load *= power_of(this_cpu);
3209 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3210 
3211 		balanced = this_eff_load <= prev_eff_load;
3212 	} else
3213 		balanced = true;
3214 
3215 	/*
3216 	 * If the currently running task will sleep within
3217 	 * a reasonable amount of time then attract this newly
3218 	 * woken task:
3219 	 */
3220 	if (sync && balanced)
3221 		return 1;
3222 
3223 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
3224 	tl_per_task = cpu_avg_load_per_task(this_cpu);
3225 
3226 	if (balanced ||
3227 	    (this_load <= load &&
3228 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
3229 		/*
3230 		 * This domain has SD_WAKE_AFFINE and
3231 		 * p is cache cold in this domain, and
3232 		 * there is no bad imbalance.
3233 		 */
3234 		schedstat_inc(sd, ttwu_move_affine);
3235 		schedstat_inc(p, se.statistics.nr_wakeups_affine);
3236 
3237 		return 1;
3238 	}
3239 	return 0;
3240 }
3241 
3242 /*
3243  * find_idlest_group finds and returns the least busy CPU group within the
3244  * domain.
3245  */
3246 static struct sched_group *
3247 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
3248 		  int this_cpu, int load_idx)
3249 {
3250 	struct sched_group *idlest = NULL, *group = sd->groups;
3251 	unsigned long min_load = ULONG_MAX, this_load = 0;
3252 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
3253 
3254 	do {
3255 		unsigned long load, avg_load;
3256 		int local_group;
3257 		int i;
3258 
3259 		/* Skip over this group if it has no CPUs allowed */
3260 		if (!cpumask_intersects(sched_group_cpus(group),
3261 					tsk_cpus_allowed(p)))
3262 			continue;
3263 
3264 		local_group = cpumask_test_cpu(this_cpu,
3265 					       sched_group_cpus(group));
3266 
3267 		/* Tally up the load of all CPUs in the group */
3268 		avg_load = 0;
3269 
3270 		for_each_cpu(i, sched_group_cpus(group)) {
3271 			/* Bias balancing toward cpus of our domain */
3272 			if (local_group)
3273 				load = source_load(i, load_idx);
3274 			else
3275 				load = target_load(i, load_idx);
3276 
3277 			avg_load += load;
3278 		}
3279 
3280 		/* Adjust by relative CPU power of the group */
3281 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
3282 
3283 		if (local_group) {
3284 			this_load = avg_load;
3285 		} else if (avg_load < min_load) {
3286 			min_load = avg_load;
3287 			idlest = group;
3288 		}
3289 	} while (group = group->next, group != sd->groups);
3290 
3291 	if (!idlest || 100*this_load < imbalance*min_load)
3292 		return NULL;
3293 	return idlest;
3294 }
3295 
3296 /*
3297  * find_idlest_cpu - find the idlest cpu among the cpus in group.
3298  */
3299 static int
3300 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3301 {
3302 	unsigned long load, min_load = ULONG_MAX;
3303 	int idlest = -1;
3304 	int i;
3305 
3306 	/* Traverse only the allowed CPUs */
3307 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
3308 		load = weighted_cpuload(i);
3309 
3310 		if (load < min_load || (load == min_load && i == this_cpu)) {
3311 			min_load = load;
3312 			idlest = i;
3313 		}
3314 	}
3315 
3316 	return idlest;
3317 }
3318 
3319 /*
3320  * Try and locate an idle CPU in the sched_domain.
3321  */
3322 static int select_idle_sibling(struct task_struct *p, int target)
3323 {
3324 	struct sched_domain *sd;
3325 	struct sched_group *sg;
3326 	int i = task_cpu(p);
3327 
3328 	if (idle_cpu(target))
3329 		return target;
3330 
3331 	/*
3332 	 * If the prevous cpu is cache affine and idle, don't be stupid.
3333 	 */
3334 	if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3335 		return i;
3336 
3337 	/*
3338 	 * Otherwise, iterate the domains and find an elegible idle cpu.
3339 	 */
3340 	sd = rcu_dereference(per_cpu(sd_llc, target));
3341 	for_each_lower_domain(sd) {
3342 		sg = sd->groups;
3343 		do {
3344 			if (!cpumask_intersects(sched_group_cpus(sg),
3345 						tsk_cpus_allowed(p)))
3346 				goto next;
3347 
3348 			for_each_cpu(i, sched_group_cpus(sg)) {
3349 				if (i == target || !idle_cpu(i))
3350 					goto next;
3351 			}
3352 
3353 			target = cpumask_first_and(sched_group_cpus(sg),
3354 					tsk_cpus_allowed(p));
3355 			goto done;
3356 next:
3357 			sg = sg->next;
3358 		} while (sg != sd->groups);
3359 	}
3360 done:
3361 	return target;
3362 }
3363 
3364 /*
3365  * sched_balance_self: balance the current task (running on cpu) in domains
3366  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3367  * SD_BALANCE_EXEC.
3368  *
3369  * Balance, ie. select the least loaded group.
3370  *
3371  * Returns the target CPU number, or the same CPU if no balancing is needed.
3372  *
3373  * preempt must be disabled.
3374  */
3375 static int
3376 select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
3377 {
3378 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
3379 	int cpu = smp_processor_id();
3380 	int prev_cpu = task_cpu(p);
3381 	int new_cpu = cpu;
3382 	int want_affine = 0;
3383 	int sync = wake_flags & WF_SYNC;
3384 
3385 	if (p->nr_cpus_allowed == 1)
3386 		return prev_cpu;
3387 
3388 	if (sd_flag & SD_BALANCE_WAKE) {
3389 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
3390 			want_affine = 1;
3391 		new_cpu = prev_cpu;
3392 	}
3393 
3394 	rcu_read_lock();
3395 	for_each_domain(cpu, tmp) {
3396 		if (!(tmp->flags & SD_LOAD_BALANCE))
3397 			continue;
3398 
3399 		/*
3400 		 * If both cpu and prev_cpu are part of this domain,
3401 		 * cpu is a valid SD_WAKE_AFFINE target.
3402 		 */
3403 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3404 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3405 			affine_sd = tmp;
3406 			break;
3407 		}
3408 
3409 		if (tmp->flags & sd_flag)
3410 			sd = tmp;
3411 	}
3412 
3413 	if (affine_sd) {
3414 		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
3415 			prev_cpu = cpu;
3416 
3417 		new_cpu = select_idle_sibling(p, prev_cpu);
3418 		goto unlock;
3419 	}
3420 
3421 	while (sd) {
3422 		int load_idx = sd->forkexec_idx;
3423 		struct sched_group *group;
3424 		int weight;
3425 
3426 		if (!(sd->flags & sd_flag)) {
3427 			sd = sd->child;
3428 			continue;
3429 		}
3430 
3431 		if (sd_flag & SD_BALANCE_WAKE)
3432 			load_idx = sd->wake_idx;
3433 
3434 		group = find_idlest_group(sd, p, cpu, load_idx);
3435 		if (!group) {
3436 			sd = sd->child;
3437 			continue;
3438 		}
3439 
3440 		new_cpu = find_idlest_cpu(group, p, cpu);
3441 		if (new_cpu == -1 || new_cpu == cpu) {
3442 			/* Now try balancing at a lower domain level of cpu */
3443 			sd = sd->child;
3444 			continue;
3445 		}
3446 
3447 		/* Now try balancing at a lower domain level of new_cpu */
3448 		cpu = new_cpu;
3449 		weight = sd->span_weight;
3450 		sd = NULL;
3451 		for_each_domain(cpu, tmp) {
3452 			if (weight <= tmp->span_weight)
3453 				break;
3454 			if (tmp->flags & sd_flag)
3455 				sd = tmp;
3456 		}
3457 		/* while loop will break here if sd == NULL */
3458 	}
3459 unlock:
3460 	rcu_read_unlock();
3461 
3462 	return new_cpu;
3463 }
3464 
3465 /*
3466  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3467  * cfs_rq_of(p) references at time of call are still valid and identify the
3468  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
3469  * other assumptions, including the state of rq->lock, should be made.
3470  */
3471 static void
3472 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3473 {
3474 	struct sched_entity *se = &p->se;
3475 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3476 
3477 	/*
3478 	 * Load tracking: accumulate removed load so that it can be processed
3479 	 * when we next update owning cfs_rq under rq->lock.  Tasks contribute
3480 	 * to blocked load iff they have a positive decay-count.  It can never
3481 	 * be negative here since on-rq tasks have decay-count == 0.
3482 	 */
3483 	if (se->avg.decay_count) {
3484 		se->avg.decay_count = -__synchronize_entity_decay(se);
3485 		atomic_long_add(se->avg.load_avg_contrib,
3486 						&cfs_rq->removed_load);
3487 	}
3488 }
3489 #endif /* CONFIG_SMP */
3490 
3491 static unsigned long
3492 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
3493 {
3494 	unsigned long gran = sysctl_sched_wakeup_granularity;
3495 
3496 	/*
3497 	 * Since its curr running now, convert the gran from real-time
3498 	 * to virtual-time in his units.
3499 	 *
3500 	 * By using 'se' instead of 'curr' we penalize light tasks, so
3501 	 * they get preempted easier. That is, if 'se' < 'curr' then
3502 	 * the resulting gran will be larger, therefore penalizing the
3503 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3504 	 * be smaller, again penalizing the lighter task.
3505 	 *
3506 	 * This is especially important for buddies when the leftmost
3507 	 * task is higher priority than the buddy.
3508 	 */
3509 	return calc_delta_fair(gran, se);
3510 }
3511 
3512 /*
3513  * Should 'se' preempt 'curr'.
3514  *
3515  *             |s1
3516  *        |s2
3517  *   |s3
3518  *         g
3519  *      |<--->|c
3520  *
3521  *  w(c, s1) = -1
3522  *  w(c, s2) =  0
3523  *  w(c, s3) =  1
3524  *
3525  */
3526 static int
3527 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3528 {
3529 	s64 gran, vdiff = curr->vruntime - se->vruntime;
3530 
3531 	if (vdiff <= 0)
3532 		return -1;
3533 
3534 	gran = wakeup_gran(curr, se);
3535 	if (vdiff > gran)
3536 		return 1;
3537 
3538 	return 0;
3539 }
3540 
3541 static void set_last_buddy(struct sched_entity *se)
3542 {
3543 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3544 		return;
3545 
3546 	for_each_sched_entity(se)
3547 		cfs_rq_of(se)->last = se;
3548 }
3549 
3550 static void set_next_buddy(struct sched_entity *se)
3551 {
3552 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3553 		return;
3554 
3555 	for_each_sched_entity(se)
3556 		cfs_rq_of(se)->next = se;
3557 }
3558 
3559 static void set_skip_buddy(struct sched_entity *se)
3560 {
3561 	for_each_sched_entity(se)
3562 		cfs_rq_of(se)->skip = se;
3563 }
3564 
3565 /*
3566  * Preempt the current task with a newly woken task if needed:
3567  */
3568 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
3569 {
3570 	struct task_struct *curr = rq->curr;
3571 	struct sched_entity *se = &curr->se, *pse = &p->se;
3572 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3573 	int scale = cfs_rq->nr_running >= sched_nr_latency;
3574 	int next_buddy_marked = 0;
3575 
3576 	if (unlikely(se == pse))
3577 		return;
3578 
3579 	/*
3580 	 * This is possible from callers such as move_task(), in which we
3581 	 * unconditionally check_prempt_curr() after an enqueue (which may have
3582 	 * lead to a throttle).  This both saves work and prevents false
3583 	 * next-buddy nomination below.
3584 	 */
3585 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3586 		return;
3587 
3588 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3589 		set_next_buddy(pse);
3590 		next_buddy_marked = 1;
3591 	}
3592 
3593 	/*
3594 	 * We can come here with TIF_NEED_RESCHED already set from new task
3595 	 * wake up path.
3596 	 *
3597 	 * Note: this also catches the edge-case of curr being in a throttled
3598 	 * group (e.g. via set_curr_task), since update_curr() (in the
3599 	 * enqueue of curr) will have resulted in resched being set.  This
3600 	 * prevents us from potentially nominating it as a false LAST_BUDDY
3601 	 * below.
3602 	 */
3603 	if (test_tsk_need_resched(curr))
3604 		return;
3605 
3606 	/* Idle tasks are by definition preempted by non-idle tasks. */
3607 	if (unlikely(curr->policy == SCHED_IDLE) &&
3608 	    likely(p->policy != SCHED_IDLE))
3609 		goto preempt;
3610 
3611 	/*
3612 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3613 	 * is driven by the tick):
3614 	 */
3615 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
3616 		return;
3617 
3618 	find_matching_se(&se, &pse);
3619 	update_curr(cfs_rq_of(se));
3620 	BUG_ON(!pse);
3621 	if (wakeup_preempt_entity(se, pse) == 1) {
3622 		/*
3623 		 * Bias pick_next to pick the sched entity that is
3624 		 * triggering this preemption.
3625 		 */
3626 		if (!next_buddy_marked)
3627 			set_next_buddy(pse);
3628 		goto preempt;
3629 	}
3630 
3631 	return;
3632 
3633 preempt:
3634 	resched_task(curr);
3635 	/*
3636 	 * Only set the backward buddy when the current task is still
3637 	 * on the rq. This can happen when a wakeup gets interleaved
3638 	 * with schedule on the ->pre_schedule() or idle_balance()
3639 	 * point, either of which can * drop the rq lock.
3640 	 *
3641 	 * Also, during early boot the idle thread is in the fair class,
3642 	 * for obvious reasons its a bad idea to schedule back to it.
3643 	 */
3644 	if (unlikely(!se->on_rq || curr == rq->idle))
3645 		return;
3646 
3647 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3648 		set_last_buddy(se);
3649 }
3650 
3651 static struct task_struct *pick_next_task_fair(struct rq *rq)
3652 {
3653 	struct task_struct *p;
3654 	struct cfs_rq *cfs_rq = &rq->cfs;
3655 	struct sched_entity *se;
3656 
3657 	if (!cfs_rq->nr_running)
3658 		return NULL;
3659 
3660 	do {
3661 		se = pick_next_entity(cfs_rq);
3662 		set_next_entity(cfs_rq, se);
3663 		cfs_rq = group_cfs_rq(se);
3664 	} while (cfs_rq);
3665 
3666 	p = task_of(se);
3667 	if (hrtick_enabled(rq))
3668 		hrtick_start_fair(rq, p);
3669 
3670 	return p;
3671 }
3672 
3673 /*
3674  * Account for a descheduled task:
3675  */
3676 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3677 {
3678 	struct sched_entity *se = &prev->se;
3679 	struct cfs_rq *cfs_rq;
3680 
3681 	for_each_sched_entity(se) {
3682 		cfs_rq = cfs_rq_of(se);
3683 		put_prev_entity(cfs_rq, se);
3684 	}
3685 }
3686 
3687 /*
3688  * sched_yield() is very simple
3689  *
3690  * The magic of dealing with the ->skip buddy is in pick_next_entity.
3691  */
3692 static void yield_task_fair(struct rq *rq)
3693 {
3694 	struct task_struct *curr = rq->curr;
3695 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3696 	struct sched_entity *se = &curr->se;
3697 
3698 	/*
3699 	 * Are we the only task in the tree?
3700 	 */
3701 	if (unlikely(rq->nr_running == 1))
3702 		return;
3703 
3704 	clear_buddies(cfs_rq, se);
3705 
3706 	if (curr->policy != SCHED_BATCH) {
3707 		update_rq_clock(rq);
3708 		/*
3709 		 * Update run-time statistics of the 'current'.
3710 		 */
3711 		update_curr(cfs_rq);
3712 		/*
3713 		 * Tell update_rq_clock() that we've just updated,
3714 		 * so we don't do microscopic update in schedule()
3715 		 * and double the fastpath cost.
3716 		 */
3717 		 rq->skip_clock_update = 1;
3718 	}
3719 
3720 	set_skip_buddy(se);
3721 }
3722 
3723 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3724 {
3725 	struct sched_entity *se = &p->se;
3726 
3727 	/* throttled hierarchies are not runnable */
3728 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3729 		return false;
3730 
3731 	/* Tell the scheduler that we'd really like pse to run next. */
3732 	set_next_buddy(se);
3733 
3734 	yield_task_fair(rq);
3735 
3736 	return true;
3737 }
3738 
3739 #ifdef CONFIG_SMP
3740 /**************************************************
3741  * Fair scheduling class load-balancing methods.
3742  *
3743  * BASICS
3744  *
3745  * The purpose of load-balancing is to achieve the same basic fairness the
3746  * per-cpu scheduler provides, namely provide a proportional amount of compute
3747  * time to each task. This is expressed in the following equation:
3748  *
3749  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
3750  *
3751  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3752  * W_i,0 is defined as:
3753  *
3754  *   W_i,0 = \Sum_j w_i,j                                             (2)
3755  *
3756  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3757  * is derived from the nice value as per prio_to_weight[].
3758  *
3759  * The weight average is an exponential decay average of the instantaneous
3760  * weight:
3761  *
3762  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
3763  *
3764  * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3765  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3766  * can also include other factors [XXX].
3767  *
3768  * To achieve this balance we define a measure of imbalance which follows
3769  * directly from (1):
3770  *
3771  *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
3772  *
3773  * We them move tasks around to minimize the imbalance. In the continuous
3774  * function space it is obvious this converges, in the discrete case we get
3775  * a few fun cases generally called infeasible weight scenarios.
3776  *
3777  * [XXX expand on:
3778  *     - infeasible weights;
3779  *     - local vs global optima in the discrete case. ]
3780  *
3781  *
3782  * SCHED DOMAINS
3783  *
3784  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3785  * for all i,j solution, we create a tree of cpus that follows the hardware
3786  * topology where each level pairs two lower groups (or better). This results
3787  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3788  * tree to only the first of the previous level and we decrease the frequency
3789  * of load-balance at each level inv. proportional to the number of cpus in
3790  * the groups.
3791  *
3792  * This yields:
3793  *
3794  *     log_2 n     1     n
3795  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
3796  *     i = 0      2^i   2^i
3797  *                               `- size of each group
3798  *         |         |     `- number of cpus doing load-balance
3799  *         |         `- freq
3800  *         `- sum over all levels
3801  *
3802  * Coupled with a limit on how many tasks we can migrate every balance pass,
3803  * this makes (5) the runtime complexity of the balancer.
3804  *
3805  * An important property here is that each CPU is still (indirectly) connected
3806  * to every other cpu in at most O(log n) steps:
3807  *
3808  * The adjacency matrix of the resulting graph is given by:
3809  *
3810  *             log_2 n
3811  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
3812  *             k = 0
3813  *
3814  * And you'll find that:
3815  *
3816  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
3817  *
3818  * Showing there's indeed a path between every cpu in at most O(log n) steps.
3819  * The task movement gives a factor of O(m), giving a convergence complexity
3820  * of:
3821  *
3822  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
3823  *
3824  *
3825  * WORK CONSERVING
3826  *
3827  * In order to avoid CPUs going idle while there's still work to do, new idle
3828  * balancing is more aggressive and has the newly idle cpu iterate up the domain
3829  * tree itself instead of relying on other CPUs to bring it work.
3830  *
3831  * This adds some complexity to both (5) and (8) but it reduces the total idle
3832  * time.
3833  *
3834  * [XXX more?]
3835  *
3836  *
3837  * CGROUPS
3838  *
3839  * Cgroups make a horror show out of (2), instead of a simple sum we get:
3840  *
3841  *                                s_k,i
3842  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
3843  *                                 S_k
3844  *
3845  * Where
3846  *
3847  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
3848  *
3849  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3850  *
3851  * The big problem is S_k, its a global sum needed to compute a local (W_i)
3852  * property.
3853  *
3854  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3855  *      rewrite all of this once again.]
3856  */
3857 
3858 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3859 
3860 #define LBF_ALL_PINNED	0x01
3861 #define LBF_NEED_BREAK	0x02
3862 #define LBF_SOME_PINNED 0x04
3863 
3864 struct lb_env {
3865 	struct sched_domain	*sd;
3866 
3867 	struct rq		*src_rq;
3868 	int			src_cpu;
3869 
3870 	int			dst_cpu;
3871 	struct rq		*dst_rq;
3872 
3873 	struct cpumask		*dst_grpmask;
3874 	int			new_dst_cpu;
3875 	enum cpu_idle_type	idle;
3876 	long			imbalance;
3877 	/* The set of CPUs under consideration for load-balancing */
3878 	struct cpumask		*cpus;
3879 
3880 	unsigned int		flags;
3881 
3882 	unsigned int		loop;
3883 	unsigned int		loop_break;
3884 	unsigned int		loop_max;
3885 };
3886 
3887 /*
3888  * move_task - move a task from one runqueue to another runqueue.
3889  * Both runqueues must be locked.
3890  */
3891 static void move_task(struct task_struct *p, struct lb_env *env)
3892 {
3893 	deactivate_task(env->src_rq, p, 0);
3894 	set_task_cpu(p, env->dst_cpu);
3895 	activate_task(env->dst_rq, p, 0);
3896 	check_preempt_curr(env->dst_rq, p, 0);
3897 }
3898 
3899 /*
3900  * Is this task likely cache-hot:
3901  */
3902 static int
3903 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3904 {
3905 	s64 delta;
3906 
3907 	if (p->sched_class != &fair_sched_class)
3908 		return 0;
3909 
3910 	if (unlikely(p->policy == SCHED_IDLE))
3911 		return 0;
3912 
3913 	/*
3914 	 * Buddy candidates are cache hot:
3915 	 */
3916 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3917 			(&p->se == cfs_rq_of(&p->se)->next ||
3918 			 &p->se == cfs_rq_of(&p->se)->last))
3919 		return 1;
3920 
3921 	if (sysctl_sched_migration_cost == -1)
3922 		return 1;
3923 	if (sysctl_sched_migration_cost == 0)
3924 		return 0;
3925 
3926 	delta = now - p->se.exec_start;
3927 
3928 	return delta < (s64)sysctl_sched_migration_cost;
3929 }
3930 
3931 /*
3932  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3933  */
3934 static
3935 int can_migrate_task(struct task_struct *p, struct lb_env *env)
3936 {
3937 	int tsk_cache_hot = 0;
3938 	/*
3939 	 * We do not migrate tasks that are:
3940 	 * 1) throttled_lb_pair, or
3941 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3942 	 * 3) running (obviously), or
3943 	 * 4) are cache-hot on their current CPU.
3944 	 */
3945 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3946 		return 0;
3947 
3948 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3949 		int cpu;
3950 
3951 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3952 
3953 		/*
3954 		 * Remember if this task can be migrated to any other cpu in
3955 		 * our sched_group. We may want to revisit it if we couldn't
3956 		 * meet load balance goals by pulling other tasks on src_cpu.
3957 		 *
3958 		 * Also avoid computing new_dst_cpu if we have already computed
3959 		 * one in current iteration.
3960 		 */
3961 		if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3962 			return 0;
3963 
3964 		/* Prevent to re-select dst_cpu via env's cpus */
3965 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3966 			if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3967 				env->flags |= LBF_SOME_PINNED;
3968 				env->new_dst_cpu = cpu;
3969 				break;
3970 			}
3971 		}
3972 
3973 		return 0;
3974 	}
3975 
3976 	/* Record that we found atleast one task that could run on dst_cpu */
3977 	env->flags &= ~LBF_ALL_PINNED;
3978 
3979 	if (task_running(env->src_rq, p)) {
3980 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3981 		return 0;
3982 	}
3983 
3984 	/*
3985 	 * Aggressive migration if:
3986 	 * 1) task is cache cold, or
3987 	 * 2) too many balance attempts have failed.
3988 	 */
3989 
3990 	tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
3991 	if (!tsk_cache_hot ||
3992 		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3993 
3994 		if (tsk_cache_hot) {
3995 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3996 			schedstat_inc(p, se.statistics.nr_forced_migrations);
3997 		}
3998 
3999 		return 1;
4000 	}
4001 
4002 	schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4003 	return 0;
4004 }
4005 
4006 /*
4007  * move_one_task tries to move exactly one task from busiest to this_rq, as
4008  * part of active balancing operations within "domain".
4009  * Returns 1 if successful and 0 otherwise.
4010  *
4011  * Called with both runqueues locked.
4012  */
4013 static int move_one_task(struct lb_env *env)
4014 {
4015 	struct task_struct *p, *n;
4016 
4017 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
4018 		if (!can_migrate_task(p, env))
4019 			continue;
4020 
4021 		move_task(p, env);
4022 		/*
4023 		 * Right now, this is only the second place move_task()
4024 		 * is called, so we can safely collect move_task()
4025 		 * stats here rather than inside move_task().
4026 		 */
4027 		schedstat_inc(env->sd, lb_gained[env->idle]);
4028 		return 1;
4029 	}
4030 	return 0;
4031 }
4032 
4033 static unsigned long task_h_load(struct task_struct *p);
4034 
4035 static const unsigned int sched_nr_migrate_break = 32;
4036 
4037 /*
4038  * move_tasks tries to move up to imbalance weighted load from busiest to
4039  * this_rq, as part of a balancing operation within domain "sd".
4040  * Returns 1 if successful and 0 otherwise.
4041  *
4042  * Called with both runqueues locked.
4043  */
4044 static int move_tasks(struct lb_env *env)
4045 {
4046 	struct list_head *tasks = &env->src_rq->cfs_tasks;
4047 	struct task_struct *p;
4048 	unsigned long load;
4049 	int pulled = 0;
4050 
4051 	if (env->imbalance <= 0)
4052 		return 0;
4053 
4054 	while (!list_empty(tasks)) {
4055 		p = list_first_entry(tasks, struct task_struct, se.group_node);
4056 
4057 		env->loop++;
4058 		/* We've more or less seen every task there is, call it quits */
4059 		if (env->loop > env->loop_max)
4060 			break;
4061 
4062 		/* take a breather every nr_migrate tasks */
4063 		if (env->loop > env->loop_break) {
4064 			env->loop_break += sched_nr_migrate_break;
4065 			env->flags |= LBF_NEED_BREAK;
4066 			break;
4067 		}
4068 
4069 		if (!can_migrate_task(p, env))
4070 			goto next;
4071 
4072 		load = task_h_load(p);
4073 
4074 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
4075 			goto next;
4076 
4077 		if ((load / 2) > env->imbalance)
4078 			goto next;
4079 
4080 		move_task(p, env);
4081 		pulled++;
4082 		env->imbalance -= load;
4083 
4084 #ifdef CONFIG_PREEMPT
4085 		/*
4086 		 * NEWIDLE balancing is a source of latency, so preemptible
4087 		 * kernels will stop after the first task is pulled to minimize
4088 		 * the critical section.
4089 		 */
4090 		if (env->idle == CPU_NEWLY_IDLE)
4091 			break;
4092 #endif
4093 
4094 		/*
4095 		 * We only want to steal up to the prescribed amount of
4096 		 * weighted load.
4097 		 */
4098 		if (env->imbalance <= 0)
4099 			break;
4100 
4101 		continue;
4102 next:
4103 		list_move_tail(&p->se.group_node, tasks);
4104 	}
4105 
4106 	/*
4107 	 * Right now, this is one of only two places move_task() is called,
4108 	 * so we can safely collect move_task() stats here rather than
4109 	 * inside move_task().
4110 	 */
4111 	schedstat_add(env->sd, lb_gained[env->idle], pulled);
4112 
4113 	return pulled;
4114 }
4115 
4116 #ifdef CONFIG_FAIR_GROUP_SCHED
4117 /*
4118  * update tg->load_weight by folding this cpu's load_avg
4119  */
4120 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
4121 {
4122 	struct sched_entity *se = tg->se[cpu];
4123 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
4124 
4125 	/* throttled entities do not contribute to load */
4126 	if (throttled_hierarchy(cfs_rq))
4127 		return;
4128 
4129 	update_cfs_rq_blocked_load(cfs_rq, 1);
4130 
4131 	if (se) {
4132 		update_entity_load_avg(se, 1);
4133 		/*
4134 		 * We pivot on our runnable average having decayed to zero for
4135 		 * list removal.  This generally implies that all our children
4136 		 * have also been removed (modulo rounding error or bandwidth
4137 		 * control); however, such cases are rare and we can fix these
4138 		 * at enqueue.
4139 		 *
4140 		 * TODO: fix up out-of-order children on enqueue.
4141 		 */
4142 		if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4143 			list_del_leaf_cfs_rq(cfs_rq);
4144 	} else {
4145 		struct rq *rq = rq_of(cfs_rq);
4146 		update_rq_runnable_avg(rq, rq->nr_running);
4147 	}
4148 }
4149 
4150 static void update_blocked_averages(int cpu)
4151 {
4152 	struct rq *rq = cpu_rq(cpu);
4153 	struct cfs_rq *cfs_rq;
4154 	unsigned long flags;
4155 
4156 	raw_spin_lock_irqsave(&rq->lock, flags);
4157 	update_rq_clock(rq);
4158 	/*
4159 	 * Iterates the task_group tree in a bottom up fashion, see
4160 	 * list_add_leaf_cfs_rq() for details.
4161 	 */
4162 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4163 		/*
4164 		 * Note: We may want to consider periodically releasing
4165 		 * rq->lock about these updates so that creating many task
4166 		 * groups does not result in continually extending hold time.
4167 		 */
4168 		__update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
4169 	}
4170 
4171 	raw_spin_unlock_irqrestore(&rq->lock, flags);
4172 }
4173 
4174 /*
4175  * Compute the cpu's hierarchical load factor for each task group.
4176  * This needs to be done in a top-down fashion because the load of a child
4177  * group is a fraction of its parents load.
4178  */
4179 static int tg_load_down(struct task_group *tg, void *data)
4180 {
4181 	unsigned long load;
4182 	long cpu = (long)data;
4183 
4184 	if (!tg->parent) {
4185 		load = cpu_rq(cpu)->avg.load_avg_contrib;
4186 	} else {
4187 		load = tg->parent->cfs_rq[cpu]->h_load;
4188 		load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
4189 				tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
4190 	}
4191 
4192 	tg->cfs_rq[cpu]->h_load = load;
4193 
4194 	return 0;
4195 }
4196 
4197 static void update_h_load(long cpu)
4198 {
4199 	struct rq *rq = cpu_rq(cpu);
4200 	unsigned long now = jiffies;
4201 
4202 	if (rq->h_load_throttle == now)
4203 		return;
4204 
4205 	rq->h_load_throttle = now;
4206 
4207 	rcu_read_lock();
4208 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
4209 	rcu_read_unlock();
4210 }
4211 
4212 static unsigned long task_h_load(struct task_struct *p)
4213 {
4214 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
4215 
4216 	return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4217 			cfs_rq->runnable_load_avg + 1);
4218 }
4219 #else
4220 static inline void update_blocked_averages(int cpu)
4221 {
4222 }
4223 
4224 static inline void update_h_load(long cpu)
4225 {
4226 }
4227 
4228 static unsigned long task_h_load(struct task_struct *p)
4229 {
4230 	return p->se.avg.load_avg_contrib;
4231 }
4232 #endif
4233 
4234 /********** Helpers for find_busiest_group ************************/
4235 /*
4236  * sd_lb_stats - Structure to store the statistics of a sched_domain
4237  * 		during load balancing.
4238  */
4239 struct sd_lb_stats {
4240 	struct sched_group *busiest; /* Busiest group in this sd */
4241 	struct sched_group *this;  /* Local group in this sd */
4242 	unsigned long total_load;  /* Total load of all groups in sd */
4243 	unsigned long total_pwr;   /*	Total power of all groups in sd */
4244 	unsigned long avg_load;	   /* Average load across all groups in sd */
4245 
4246 	/** Statistics of this group */
4247 	unsigned long this_load;
4248 	unsigned long this_load_per_task;
4249 	unsigned long this_nr_running;
4250 	unsigned long this_has_capacity;
4251 	unsigned int  this_idle_cpus;
4252 
4253 	/* Statistics of the busiest group */
4254 	unsigned int  busiest_idle_cpus;
4255 	unsigned long max_load;
4256 	unsigned long busiest_load_per_task;
4257 	unsigned long busiest_nr_running;
4258 	unsigned long busiest_group_capacity;
4259 	unsigned long busiest_has_capacity;
4260 	unsigned int  busiest_group_weight;
4261 
4262 	int group_imb; /* Is there imbalance in this sd */
4263 };
4264 
4265 /*
4266  * sg_lb_stats - stats of a sched_group required for load_balancing
4267  */
4268 struct sg_lb_stats {
4269 	unsigned long avg_load; /*Avg load across the CPUs of the group */
4270 	unsigned long group_load; /* Total load over the CPUs of the group */
4271 	unsigned long sum_nr_running; /* Nr tasks running in the group */
4272 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4273 	unsigned long group_capacity;
4274 	unsigned long idle_cpus;
4275 	unsigned long group_weight;
4276 	int group_imb; /* Is there an imbalance in the group ? */
4277 	int group_has_capacity; /* Is there extra capacity in the group? */
4278 };
4279 
4280 /**
4281  * get_sd_load_idx - Obtain the load index for a given sched domain.
4282  * @sd: The sched_domain whose load_idx is to be obtained.
4283  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4284  *
4285  * Return: The load index.
4286  */
4287 static inline int get_sd_load_idx(struct sched_domain *sd,
4288 					enum cpu_idle_type idle)
4289 {
4290 	int load_idx;
4291 
4292 	switch (idle) {
4293 	case CPU_NOT_IDLE:
4294 		load_idx = sd->busy_idx;
4295 		break;
4296 
4297 	case CPU_NEWLY_IDLE:
4298 		load_idx = sd->newidle_idx;
4299 		break;
4300 	default:
4301 		load_idx = sd->idle_idx;
4302 		break;
4303 	}
4304 
4305 	return load_idx;
4306 }
4307 
4308 static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
4309 {
4310 	return SCHED_POWER_SCALE;
4311 }
4312 
4313 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4314 {
4315 	return default_scale_freq_power(sd, cpu);
4316 }
4317 
4318 static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
4319 {
4320 	unsigned long weight = sd->span_weight;
4321 	unsigned long smt_gain = sd->smt_gain;
4322 
4323 	smt_gain /= weight;
4324 
4325 	return smt_gain;
4326 }
4327 
4328 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4329 {
4330 	return default_scale_smt_power(sd, cpu);
4331 }
4332 
4333 static unsigned long scale_rt_power(int cpu)
4334 {
4335 	struct rq *rq = cpu_rq(cpu);
4336 	u64 total, available, age_stamp, avg;
4337 
4338 	/*
4339 	 * Since we're reading these variables without serialization make sure
4340 	 * we read them once before doing sanity checks on them.
4341 	 */
4342 	age_stamp = ACCESS_ONCE(rq->age_stamp);
4343 	avg = ACCESS_ONCE(rq->rt_avg);
4344 
4345 	total = sched_avg_period() + (rq_clock(rq) - age_stamp);
4346 
4347 	if (unlikely(total < avg)) {
4348 		/* Ensures that power won't end up being negative */
4349 		available = 0;
4350 	} else {
4351 		available = total - avg;
4352 	}
4353 
4354 	if (unlikely((s64)total < SCHED_POWER_SCALE))
4355 		total = SCHED_POWER_SCALE;
4356 
4357 	total >>= SCHED_POWER_SHIFT;
4358 
4359 	return div_u64(available, total);
4360 }
4361 
4362 static void update_cpu_power(struct sched_domain *sd, int cpu)
4363 {
4364 	unsigned long weight = sd->span_weight;
4365 	unsigned long power = SCHED_POWER_SCALE;
4366 	struct sched_group *sdg = sd->groups;
4367 
4368 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4369 		if (sched_feat(ARCH_POWER))
4370 			power *= arch_scale_smt_power(sd, cpu);
4371 		else
4372 			power *= default_scale_smt_power(sd, cpu);
4373 
4374 		power >>= SCHED_POWER_SHIFT;
4375 	}
4376 
4377 	sdg->sgp->power_orig = power;
4378 
4379 	if (sched_feat(ARCH_POWER))
4380 		power *= arch_scale_freq_power(sd, cpu);
4381 	else
4382 		power *= default_scale_freq_power(sd, cpu);
4383 
4384 	power >>= SCHED_POWER_SHIFT;
4385 
4386 	power *= scale_rt_power(cpu);
4387 	power >>= SCHED_POWER_SHIFT;
4388 
4389 	if (!power)
4390 		power = 1;
4391 
4392 	cpu_rq(cpu)->cpu_power = power;
4393 	sdg->sgp->power = power;
4394 }
4395 
4396 void update_group_power(struct sched_domain *sd, int cpu)
4397 {
4398 	struct sched_domain *child = sd->child;
4399 	struct sched_group *group, *sdg = sd->groups;
4400 	unsigned long power;
4401 	unsigned long interval;
4402 
4403 	interval = msecs_to_jiffies(sd->balance_interval);
4404 	interval = clamp(interval, 1UL, max_load_balance_interval);
4405 	sdg->sgp->next_update = jiffies + interval;
4406 
4407 	if (!child) {
4408 		update_cpu_power(sd, cpu);
4409 		return;
4410 	}
4411 
4412 	power = 0;
4413 
4414 	if (child->flags & SD_OVERLAP) {
4415 		/*
4416 		 * SD_OVERLAP domains cannot assume that child groups
4417 		 * span the current group.
4418 		 */
4419 
4420 		for_each_cpu(cpu, sched_group_cpus(sdg))
4421 			power += power_of(cpu);
4422 	} else  {
4423 		/*
4424 		 * !SD_OVERLAP domains can assume that child groups
4425 		 * span the current group.
4426 		 */
4427 
4428 		group = child->groups;
4429 		do {
4430 			power += group->sgp->power;
4431 			group = group->next;
4432 		} while (group != child->groups);
4433 	}
4434 
4435 	sdg->sgp->power_orig = sdg->sgp->power = power;
4436 }
4437 
4438 /*
4439  * Try and fix up capacity for tiny siblings, this is needed when
4440  * things like SD_ASYM_PACKING need f_b_g to select another sibling
4441  * which on its own isn't powerful enough.
4442  *
4443  * See update_sd_pick_busiest() and check_asym_packing().
4444  */
4445 static inline int
4446 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4447 {
4448 	/*
4449 	 * Only siblings can have significantly less than SCHED_POWER_SCALE
4450 	 */
4451 	if (!(sd->flags & SD_SHARE_CPUPOWER))
4452 		return 0;
4453 
4454 	/*
4455 	 * If ~90% of the cpu_power is still there, we're good.
4456 	 */
4457 	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
4458 		return 1;
4459 
4460 	return 0;
4461 }
4462 
4463 /**
4464  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
4465  * @env: The load balancing environment.
4466  * @group: sched_group whose statistics are to be updated.
4467  * @load_idx: Load index of sched_domain of this_cpu for load calc.
4468  * @local_group: Does group contain this_cpu.
4469  * @balance: Should we balance.
4470  * @sgs: variable to hold the statistics for this group.
4471  */
4472 static inline void update_sg_lb_stats(struct lb_env *env,
4473 			struct sched_group *group, int load_idx,
4474 			int local_group, int *balance, struct sg_lb_stats *sgs)
4475 {
4476 	unsigned long nr_running, max_nr_running, min_nr_running;
4477 	unsigned long load, max_cpu_load, min_cpu_load;
4478 	unsigned int balance_cpu = -1, first_idle_cpu = 0;
4479 	unsigned long avg_load_per_task = 0;
4480 	int i;
4481 
4482 	if (local_group)
4483 		balance_cpu = group_balance_cpu(group);
4484 
4485 	/* Tally up the load of all CPUs in the group */
4486 	max_cpu_load = 0;
4487 	min_cpu_load = ~0UL;
4488 	max_nr_running = 0;
4489 	min_nr_running = ~0UL;
4490 
4491 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
4492 		struct rq *rq = cpu_rq(i);
4493 
4494 		nr_running = rq->nr_running;
4495 
4496 		/* Bias balancing toward cpus of our domain */
4497 		if (local_group) {
4498 			if (idle_cpu(i) && !first_idle_cpu &&
4499 					cpumask_test_cpu(i, sched_group_mask(group))) {
4500 				first_idle_cpu = 1;
4501 				balance_cpu = i;
4502 			}
4503 
4504 			load = target_load(i, load_idx);
4505 		} else {
4506 			load = source_load(i, load_idx);
4507 			if (load > max_cpu_load)
4508 				max_cpu_load = load;
4509 			if (min_cpu_load > load)
4510 				min_cpu_load = load;
4511 
4512 			if (nr_running > max_nr_running)
4513 				max_nr_running = nr_running;
4514 			if (min_nr_running > nr_running)
4515 				min_nr_running = nr_running;
4516 		}
4517 
4518 		sgs->group_load += load;
4519 		sgs->sum_nr_running += nr_running;
4520 		sgs->sum_weighted_load += weighted_cpuload(i);
4521 		if (idle_cpu(i))
4522 			sgs->idle_cpus++;
4523 	}
4524 
4525 	/*
4526 	 * First idle cpu or the first cpu(busiest) in this sched group
4527 	 * is eligible for doing load balancing at this and above
4528 	 * domains. In the newly idle case, we will allow all the cpu's
4529 	 * to do the newly idle load balance.
4530 	 */
4531 	if (local_group) {
4532 		if (env->idle != CPU_NEWLY_IDLE) {
4533 			if (balance_cpu != env->dst_cpu) {
4534 				*balance = 0;
4535 				return;
4536 			}
4537 			update_group_power(env->sd, env->dst_cpu);
4538 		} else if (time_after_eq(jiffies, group->sgp->next_update))
4539 			update_group_power(env->sd, env->dst_cpu);
4540 	}
4541 
4542 	/* Adjust by relative CPU power of the group */
4543 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
4544 
4545 	/*
4546 	 * Consider the group unbalanced when the imbalance is larger
4547 	 * than the average weight of a task.
4548 	 *
4549 	 * APZ: with cgroup the avg task weight can vary wildly and
4550 	 *      might not be a suitable number - should we keep a
4551 	 *      normalized nr_running number somewhere that negates
4552 	 *      the hierarchy?
4553 	 */
4554 	if (sgs->sum_nr_running)
4555 		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4556 
4557 	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4558 	    (max_nr_running - min_nr_running) > 1)
4559 		sgs->group_imb = 1;
4560 
4561 	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
4562 						SCHED_POWER_SCALE);
4563 	if (!sgs->group_capacity)
4564 		sgs->group_capacity = fix_small_capacity(env->sd, group);
4565 	sgs->group_weight = group->group_weight;
4566 
4567 	if (sgs->group_capacity > sgs->sum_nr_running)
4568 		sgs->group_has_capacity = 1;
4569 }
4570 
4571 /**
4572  * update_sd_pick_busiest - return 1 on busiest group
4573  * @env: The load balancing environment.
4574  * @sds: sched_domain statistics
4575  * @sg: sched_group candidate to be checked for being the busiest
4576  * @sgs: sched_group statistics
4577  *
4578  * Determine if @sg is a busier group than the previously selected
4579  * busiest group.
4580  *
4581  * Return: %true if @sg is a busier group than the previously selected
4582  * busiest group. %false otherwise.
4583  */
4584 static bool update_sd_pick_busiest(struct lb_env *env,
4585 				   struct sd_lb_stats *sds,
4586 				   struct sched_group *sg,
4587 				   struct sg_lb_stats *sgs)
4588 {
4589 	if (sgs->avg_load <= sds->max_load)
4590 		return false;
4591 
4592 	if (sgs->sum_nr_running > sgs->group_capacity)
4593 		return true;
4594 
4595 	if (sgs->group_imb)
4596 		return true;
4597 
4598 	/*
4599 	 * ASYM_PACKING needs to move all the work to the lowest
4600 	 * numbered CPUs in the group, therefore mark all groups
4601 	 * higher than ourself as busy.
4602 	 */
4603 	if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4604 	    env->dst_cpu < group_first_cpu(sg)) {
4605 		if (!sds->busiest)
4606 			return true;
4607 
4608 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4609 			return true;
4610 	}
4611 
4612 	return false;
4613 }
4614 
4615 /**
4616  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
4617  * @env: The load balancing environment.
4618  * @balance: Should we balance.
4619  * @sds: variable to hold the statistics for this sched_domain.
4620  */
4621 static inline void update_sd_lb_stats(struct lb_env *env,
4622 					int *balance, struct sd_lb_stats *sds)
4623 {
4624 	struct sched_domain *child = env->sd->child;
4625 	struct sched_group *sg = env->sd->groups;
4626 	struct sg_lb_stats sgs;
4627 	int load_idx, prefer_sibling = 0;
4628 
4629 	if (child && child->flags & SD_PREFER_SIBLING)
4630 		prefer_sibling = 1;
4631 
4632 	load_idx = get_sd_load_idx(env->sd, env->idle);
4633 
4634 	do {
4635 		int local_group;
4636 
4637 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
4638 		memset(&sgs, 0, sizeof(sgs));
4639 		update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
4640 
4641 		if (local_group && !(*balance))
4642 			return;
4643 
4644 		sds->total_load += sgs.group_load;
4645 		sds->total_pwr += sg->sgp->power;
4646 
4647 		/*
4648 		 * In case the child domain prefers tasks go to siblings
4649 		 * first, lower the sg capacity to one so that we'll try
4650 		 * and move all the excess tasks away. We lower the capacity
4651 		 * of a group only if the local group has the capacity to fit
4652 		 * these excess tasks, i.e. nr_running < group_capacity. The
4653 		 * extra check prevents the case where you always pull from the
4654 		 * heaviest group when it is already under-utilized (possible
4655 		 * with a large weight task outweighs the tasks on the system).
4656 		 */
4657 		if (prefer_sibling && !local_group && sds->this_has_capacity)
4658 			sgs.group_capacity = min(sgs.group_capacity, 1UL);
4659 
4660 		if (local_group) {
4661 			sds->this_load = sgs.avg_load;
4662 			sds->this = sg;
4663 			sds->this_nr_running = sgs.sum_nr_running;
4664 			sds->this_load_per_task = sgs.sum_weighted_load;
4665 			sds->this_has_capacity = sgs.group_has_capacity;
4666 			sds->this_idle_cpus = sgs.idle_cpus;
4667 		} else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
4668 			sds->max_load = sgs.avg_load;
4669 			sds->busiest = sg;
4670 			sds->busiest_nr_running = sgs.sum_nr_running;
4671 			sds->busiest_idle_cpus = sgs.idle_cpus;
4672 			sds->busiest_group_capacity = sgs.group_capacity;
4673 			sds->busiest_load_per_task = sgs.sum_weighted_load;
4674 			sds->busiest_has_capacity = sgs.group_has_capacity;
4675 			sds->busiest_group_weight = sgs.group_weight;
4676 			sds->group_imb = sgs.group_imb;
4677 		}
4678 
4679 		sg = sg->next;
4680 	} while (sg != env->sd->groups);
4681 }
4682 
4683 /**
4684  * check_asym_packing - Check to see if the group is packed into the
4685  *			sched doman.
4686  *
4687  * This is primarily intended to used at the sibling level.  Some
4688  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
4689  * case of POWER7, it can move to lower SMT modes only when higher
4690  * threads are idle.  When in lower SMT modes, the threads will
4691  * perform better since they share less core resources.  Hence when we
4692  * have idle threads, we want them to be the higher ones.
4693  *
4694  * This packing function is run on idle threads.  It checks to see if
4695  * the busiest CPU in this domain (core in the P7 case) has a higher
4696  * CPU number than the packing function is being run on.  Here we are
4697  * assuming lower CPU number will be equivalent to lower a SMT thread
4698  * number.
4699  *
4700  * Return: 1 when packing is required and a task should be moved to
4701  * this CPU.  The amount of the imbalance is returned in *imbalance.
4702  *
4703  * @env: The load balancing environment.
4704  * @sds: Statistics of the sched_domain which is to be packed
4705  */
4706 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
4707 {
4708 	int busiest_cpu;
4709 
4710 	if (!(env->sd->flags & SD_ASYM_PACKING))
4711 		return 0;
4712 
4713 	if (!sds->busiest)
4714 		return 0;
4715 
4716 	busiest_cpu = group_first_cpu(sds->busiest);
4717 	if (env->dst_cpu > busiest_cpu)
4718 		return 0;
4719 
4720 	env->imbalance = DIV_ROUND_CLOSEST(
4721 		sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4722 
4723 	return 1;
4724 }
4725 
4726 /**
4727  * fix_small_imbalance - Calculate the minor imbalance that exists
4728  *			amongst the groups of a sched_domain, during
4729  *			load balancing.
4730  * @env: The load balancing environment.
4731  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4732  */
4733 static inline
4734 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4735 {
4736 	unsigned long tmp, pwr_now = 0, pwr_move = 0;
4737 	unsigned int imbn = 2;
4738 	unsigned long scaled_busy_load_per_task;
4739 
4740 	if (sds->this_nr_running) {
4741 		sds->this_load_per_task /= sds->this_nr_running;
4742 		if (sds->busiest_load_per_task >
4743 				sds->this_load_per_task)
4744 			imbn = 1;
4745 	} else {
4746 		sds->this_load_per_task =
4747 			cpu_avg_load_per_task(env->dst_cpu);
4748 	}
4749 
4750 	scaled_busy_load_per_task = sds->busiest_load_per_task
4751 					 * SCHED_POWER_SCALE;
4752 	scaled_busy_load_per_task /= sds->busiest->sgp->power;
4753 
4754 	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4755 			(scaled_busy_load_per_task * imbn)) {
4756 		env->imbalance = sds->busiest_load_per_task;
4757 		return;
4758 	}
4759 
4760 	/*
4761 	 * OK, we don't have enough imbalance to justify moving tasks,
4762 	 * however we may be able to increase total CPU power used by
4763 	 * moving them.
4764 	 */
4765 
4766 	pwr_now += sds->busiest->sgp->power *
4767 			min(sds->busiest_load_per_task, sds->max_load);
4768 	pwr_now += sds->this->sgp->power *
4769 			min(sds->this_load_per_task, sds->this_load);
4770 	pwr_now /= SCHED_POWER_SCALE;
4771 
4772 	/* Amount of load we'd subtract */
4773 	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4774 		sds->busiest->sgp->power;
4775 	if (sds->max_load > tmp)
4776 		pwr_move += sds->busiest->sgp->power *
4777 			min(sds->busiest_load_per_task, sds->max_load - tmp);
4778 
4779 	/* Amount of load we'd add */
4780 	if (sds->max_load * sds->busiest->sgp->power <
4781 		sds->busiest_load_per_task * SCHED_POWER_SCALE)
4782 		tmp = (sds->max_load * sds->busiest->sgp->power) /
4783 			sds->this->sgp->power;
4784 	else
4785 		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4786 			sds->this->sgp->power;
4787 	pwr_move += sds->this->sgp->power *
4788 			min(sds->this_load_per_task, sds->this_load + tmp);
4789 	pwr_move /= SCHED_POWER_SCALE;
4790 
4791 	/* Move if we gain throughput */
4792 	if (pwr_move > pwr_now)
4793 		env->imbalance = sds->busiest_load_per_task;
4794 }
4795 
4796 /**
4797  * calculate_imbalance - Calculate the amount of imbalance present within the
4798  *			 groups of a given sched_domain during load balance.
4799  * @env: load balance environment
4800  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4801  */
4802 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4803 {
4804 	unsigned long max_pull, load_above_capacity = ~0UL;
4805 
4806 	sds->busiest_load_per_task /= sds->busiest_nr_running;
4807 	if (sds->group_imb) {
4808 		sds->busiest_load_per_task =
4809 			min(sds->busiest_load_per_task, sds->avg_load);
4810 	}
4811 
4812 	/*
4813 	 * In the presence of smp nice balancing, certain scenarios can have
4814 	 * max load less than avg load(as we skip the groups at or below
4815 	 * its cpu_power, while calculating max_load..)
4816 	 */
4817 	if (sds->max_load < sds->avg_load) {
4818 		env->imbalance = 0;
4819 		return fix_small_imbalance(env, sds);
4820 	}
4821 
4822 	if (!sds->group_imb) {
4823 		/*
4824 		 * Don't want to pull so many tasks that a group would go idle.
4825 		 */
4826 		load_above_capacity = (sds->busiest_nr_running -
4827 						sds->busiest_group_capacity);
4828 
4829 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4830 
4831 		load_above_capacity /= sds->busiest->sgp->power;
4832 	}
4833 
4834 	/*
4835 	 * We're trying to get all the cpus to the average_load, so we don't
4836 	 * want to push ourselves above the average load, nor do we wish to
4837 	 * reduce the max loaded cpu below the average load. At the same time,
4838 	 * we also don't want to reduce the group load below the group capacity
4839 	 * (so that we can implement power-savings policies etc). Thus we look
4840 	 * for the minimum possible imbalance.
4841 	 * Be careful of negative numbers as they'll appear as very large values
4842 	 * with unsigned longs.
4843 	 */
4844 	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4845 
4846 	/* How much load to actually move to equalise the imbalance */
4847 	env->imbalance = min(max_pull * sds->busiest->sgp->power,
4848 		(sds->avg_load - sds->this_load) * sds->this->sgp->power)
4849 			/ SCHED_POWER_SCALE;
4850 
4851 	/*
4852 	 * if *imbalance is less than the average load per runnable task
4853 	 * there is no guarantee that any tasks will be moved so we'll have
4854 	 * a think about bumping its value to force at least one task to be
4855 	 * moved
4856 	 */
4857 	if (env->imbalance < sds->busiest_load_per_task)
4858 		return fix_small_imbalance(env, sds);
4859 
4860 }
4861 
4862 /******* find_busiest_group() helpers end here *********************/
4863 
4864 /**
4865  * find_busiest_group - Returns the busiest group within the sched_domain
4866  * if there is an imbalance. If there isn't an imbalance, and
4867  * the user has opted for power-savings, it returns a group whose
4868  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4869  * such a group exists.
4870  *
4871  * Also calculates the amount of weighted load which should be moved
4872  * to restore balance.
4873  *
4874  * @env: The load balancing environment.
4875  * @balance: Pointer to a variable indicating if this_cpu
4876  *	is the appropriate cpu to perform load balancing at this_level.
4877  *
4878  * Return:	- The busiest group if imbalance exists.
4879  *		- If no imbalance and user has opted for power-savings balance,
4880  *		   return the least loaded group whose CPUs can be
4881  *		   put to idle by rebalancing its tasks onto our group.
4882  */
4883 static struct sched_group *
4884 find_busiest_group(struct lb_env *env, int *balance)
4885 {
4886 	struct sd_lb_stats sds;
4887 
4888 	memset(&sds, 0, sizeof(sds));
4889 
4890 	/*
4891 	 * Compute the various statistics relavent for load balancing at
4892 	 * this level.
4893 	 */
4894 	update_sd_lb_stats(env, balance, &sds);
4895 
4896 	/*
4897 	 * this_cpu is not the appropriate cpu to perform load balancing at
4898 	 * this level.
4899 	 */
4900 	if (!(*balance))
4901 		goto ret;
4902 
4903 	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4904 	    check_asym_packing(env, &sds))
4905 		return sds.busiest;
4906 
4907 	/* There is no busy sibling group to pull tasks from */
4908 	if (!sds.busiest || sds.busiest_nr_running == 0)
4909 		goto out_balanced;
4910 
4911 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4912 
4913 	/*
4914 	 * If the busiest group is imbalanced the below checks don't
4915 	 * work because they assumes all things are equal, which typically
4916 	 * isn't true due to cpus_allowed constraints and the like.
4917 	 */
4918 	if (sds.group_imb)
4919 		goto force_balance;
4920 
4921 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4922 	if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4923 			!sds.busiest_has_capacity)
4924 		goto force_balance;
4925 
4926 	/*
4927 	 * If the local group is more busy than the selected busiest group
4928 	 * don't try and pull any tasks.
4929 	 */
4930 	if (sds.this_load >= sds.max_load)
4931 		goto out_balanced;
4932 
4933 	/*
4934 	 * Don't pull any tasks if this group is already above the domain
4935 	 * average load.
4936 	 */
4937 	if (sds.this_load >= sds.avg_load)
4938 		goto out_balanced;
4939 
4940 	if (env->idle == CPU_IDLE) {
4941 		/*
4942 		 * This cpu is idle. If the busiest group load doesn't
4943 		 * have more tasks than the number of available cpu's and
4944 		 * there is no imbalance between this and busiest group
4945 		 * wrt to idle cpu's, it is balanced.
4946 		 */
4947 		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4948 		    sds.busiest_nr_running <= sds.busiest_group_weight)
4949 			goto out_balanced;
4950 	} else {
4951 		/*
4952 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4953 		 * imbalance_pct to be conservative.
4954 		 */
4955 		if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
4956 			goto out_balanced;
4957 	}
4958 
4959 force_balance:
4960 	/* Looks like there is an imbalance. Compute it */
4961 	calculate_imbalance(env, &sds);
4962 	return sds.busiest;
4963 
4964 out_balanced:
4965 ret:
4966 	env->imbalance = 0;
4967 	return NULL;
4968 }
4969 
4970 /*
4971  * find_busiest_queue - find the busiest runqueue among the cpus in group.
4972  */
4973 static struct rq *find_busiest_queue(struct lb_env *env,
4974 				     struct sched_group *group)
4975 {
4976 	struct rq *busiest = NULL, *rq;
4977 	unsigned long max_load = 0;
4978 	int i;
4979 
4980 	for_each_cpu(i, sched_group_cpus(group)) {
4981 		unsigned long power = power_of(i);
4982 		unsigned long capacity = DIV_ROUND_CLOSEST(power,
4983 							   SCHED_POWER_SCALE);
4984 		unsigned long wl;
4985 
4986 		if (!capacity)
4987 			capacity = fix_small_capacity(env->sd, group);
4988 
4989 		if (!cpumask_test_cpu(i, env->cpus))
4990 			continue;
4991 
4992 		rq = cpu_rq(i);
4993 		wl = weighted_cpuload(i);
4994 
4995 		/*
4996 		 * When comparing with imbalance, use weighted_cpuload()
4997 		 * which is not scaled with the cpu power.
4998 		 */
4999 		if (capacity && rq->nr_running == 1 && wl > env->imbalance)
5000 			continue;
5001 
5002 		/*
5003 		 * For the load comparisons with the other cpu's, consider
5004 		 * the weighted_cpuload() scaled with the cpu power, so that
5005 		 * the load can be moved away from the cpu that is potentially
5006 		 * running at a lower capacity.
5007 		 */
5008 		wl = (wl * SCHED_POWER_SCALE) / power;
5009 
5010 		if (wl > max_load) {
5011 			max_load = wl;
5012 			busiest = rq;
5013 		}
5014 	}
5015 
5016 	return busiest;
5017 }
5018 
5019 /*
5020  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5021  * so long as it is large enough.
5022  */
5023 #define MAX_PINNED_INTERVAL	512
5024 
5025 /* Working cpumask for load_balance and load_balance_newidle. */
5026 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5027 
5028 static int need_active_balance(struct lb_env *env)
5029 {
5030 	struct sched_domain *sd = env->sd;
5031 
5032 	if (env->idle == CPU_NEWLY_IDLE) {
5033 
5034 		/*
5035 		 * ASYM_PACKING needs to force migrate tasks from busy but
5036 		 * higher numbered CPUs in order to pack all tasks in the
5037 		 * lowest numbered CPUs.
5038 		 */
5039 		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
5040 			return 1;
5041 	}
5042 
5043 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5044 }
5045 
5046 static int active_load_balance_cpu_stop(void *data);
5047 
5048 /*
5049  * Check this_cpu to ensure it is balanced within domain. Attempt to move
5050  * tasks if there is an imbalance.
5051  */
5052 static int load_balance(int this_cpu, struct rq *this_rq,
5053 			struct sched_domain *sd, enum cpu_idle_type idle,
5054 			int *balance)
5055 {
5056 	int ld_moved, cur_ld_moved, active_balance = 0;
5057 	struct sched_group *group;
5058 	struct rq *busiest;
5059 	unsigned long flags;
5060 	struct cpumask *cpus = __get_cpu_var(load_balance_mask);
5061 
5062 	struct lb_env env = {
5063 		.sd		= sd,
5064 		.dst_cpu	= this_cpu,
5065 		.dst_rq		= this_rq,
5066 		.dst_grpmask    = sched_group_cpus(sd->groups),
5067 		.idle		= idle,
5068 		.loop_break	= sched_nr_migrate_break,
5069 		.cpus		= cpus,
5070 	};
5071 
5072 	/*
5073 	 * For NEWLY_IDLE load_balancing, we don't need to consider
5074 	 * other cpus in our group
5075 	 */
5076 	if (idle == CPU_NEWLY_IDLE)
5077 		env.dst_grpmask = NULL;
5078 
5079 	cpumask_copy(cpus, cpu_active_mask);
5080 
5081 	schedstat_inc(sd, lb_count[idle]);
5082 
5083 redo:
5084 	group = find_busiest_group(&env, balance);
5085 
5086 	if (*balance == 0)
5087 		goto out_balanced;
5088 
5089 	if (!group) {
5090 		schedstat_inc(sd, lb_nobusyg[idle]);
5091 		goto out_balanced;
5092 	}
5093 
5094 	busiest = find_busiest_queue(&env, group);
5095 	if (!busiest) {
5096 		schedstat_inc(sd, lb_nobusyq[idle]);
5097 		goto out_balanced;
5098 	}
5099 
5100 	BUG_ON(busiest == env.dst_rq);
5101 
5102 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
5103 
5104 	ld_moved = 0;
5105 	if (busiest->nr_running > 1) {
5106 		/*
5107 		 * Attempt to move tasks. If find_busiest_group has found
5108 		 * an imbalance but busiest->nr_running <= 1, the group is
5109 		 * still unbalanced. ld_moved simply stays zero, so it is
5110 		 * correctly treated as an imbalance.
5111 		 */
5112 		env.flags |= LBF_ALL_PINNED;
5113 		env.src_cpu   = busiest->cpu;
5114 		env.src_rq    = busiest;
5115 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
5116 
5117 		update_h_load(env.src_cpu);
5118 more_balance:
5119 		local_irq_save(flags);
5120 		double_rq_lock(env.dst_rq, busiest);
5121 
5122 		/*
5123 		 * cur_ld_moved - load moved in current iteration
5124 		 * ld_moved     - cumulative load moved across iterations
5125 		 */
5126 		cur_ld_moved = move_tasks(&env);
5127 		ld_moved += cur_ld_moved;
5128 		double_rq_unlock(env.dst_rq, busiest);
5129 		local_irq_restore(flags);
5130 
5131 		/*
5132 		 * some other cpu did the load balance for us.
5133 		 */
5134 		if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5135 			resched_cpu(env.dst_cpu);
5136 
5137 		if (env.flags & LBF_NEED_BREAK) {
5138 			env.flags &= ~LBF_NEED_BREAK;
5139 			goto more_balance;
5140 		}
5141 
5142 		/*
5143 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5144 		 * us and move them to an alternate dst_cpu in our sched_group
5145 		 * where they can run. The upper limit on how many times we
5146 		 * iterate on same src_cpu is dependent on number of cpus in our
5147 		 * sched_group.
5148 		 *
5149 		 * This changes load balance semantics a bit on who can move
5150 		 * load to a given_cpu. In addition to the given_cpu itself
5151 		 * (or a ilb_cpu acting on its behalf where given_cpu is
5152 		 * nohz-idle), we now have balance_cpu in a position to move
5153 		 * load to given_cpu. In rare situations, this may cause
5154 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5155 		 * _independently_ and at _same_ time to move some load to
5156 		 * given_cpu) causing exceess load to be moved to given_cpu.
5157 		 * This however should not happen so much in practice and
5158 		 * moreover subsequent load balance cycles should correct the
5159 		 * excess load moved.
5160 		 */
5161 		if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5162 
5163 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
5164 			env.dst_cpu	 = env.new_dst_cpu;
5165 			env.flags	&= ~LBF_SOME_PINNED;
5166 			env.loop	 = 0;
5167 			env.loop_break	 = sched_nr_migrate_break;
5168 
5169 			/* Prevent to re-select dst_cpu via env's cpus */
5170 			cpumask_clear_cpu(env.dst_cpu, env.cpus);
5171 
5172 			/*
5173 			 * Go back to "more_balance" rather than "redo" since we
5174 			 * need to continue with same src_cpu.
5175 			 */
5176 			goto more_balance;
5177 		}
5178 
5179 		/* All tasks on this runqueue were pinned by CPU affinity */
5180 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
5181 			cpumask_clear_cpu(cpu_of(busiest), cpus);
5182 			if (!cpumask_empty(cpus)) {
5183 				env.loop = 0;
5184 				env.loop_break = sched_nr_migrate_break;
5185 				goto redo;
5186 			}
5187 			goto out_balanced;
5188 		}
5189 	}
5190 
5191 	if (!ld_moved) {
5192 		schedstat_inc(sd, lb_failed[idle]);
5193 		/*
5194 		 * Increment the failure counter only on periodic balance.
5195 		 * We do not want newidle balance, which can be very
5196 		 * frequent, pollute the failure counter causing
5197 		 * excessive cache_hot migrations and active balances.
5198 		 */
5199 		if (idle != CPU_NEWLY_IDLE)
5200 			sd->nr_balance_failed++;
5201 
5202 		if (need_active_balance(&env)) {
5203 			raw_spin_lock_irqsave(&busiest->lock, flags);
5204 
5205 			/* don't kick the active_load_balance_cpu_stop,
5206 			 * if the curr task on busiest cpu can't be
5207 			 * moved to this_cpu
5208 			 */
5209 			if (!cpumask_test_cpu(this_cpu,
5210 					tsk_cpus_allowed(busiest->curr))) {
5211 				raw_spin_unlock_irqrestore(&busiest->lock,
5212 							    flags);
5213 				env.flags |= LBF_ALL_PINNED;
5214 				goto out_one_pinned;
5215 			}
5216 
5217 			/*
5218 			 * ->active_balance synchronizes accesses to
5219 			 * ->active_balance_work.  Once set, it's cleared
5220 			 * only after active load balance is finished.
5221 			 */
5222 			if (!busiest->active_balance) {
5223 				busiest->active_balance = 1;
5224 				busiest->push_cpu = this_cpu;
5225 				active_balance = 1;
5226 			}
5227 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
5228 
5229 			if (active_balance) {
5230 				stop_one_cpu_nowait(cpu_of(busiest),
5231 					active_load_balance_cpu_stop, busiest,
5232 					&busiest->active_balance_work);
5233 			}
5234 
5235 			/*
5236 			 * We've kicked active balancing, reset the failure
5237 			 * counter.
5238 			 */
5239 			sd->nr_balance_failed = sd->cache_nice_tries+1;
5240 		}
5241 	} else
5242 		sd->nr_balance_failed = 0;
5243 
5244 	if (likely(!active_balance)) {
5245 		/* We were unbalanced, so reset the balancing interval */
5246 		sd->balance_interval = sd->min_interval;
5247 	} else {
5248 		/*
5249 		 * If we've begun active balancing, start to back off. This
5250 		 * case may not be covered by the all_pinned logic if there
5251 		 * is only 1 task on the busy runqueue (because we don't call
5252 		 * move_tasks).
5253 		 */
5254 		if (sd->balance_interval < sd->max_interval)
5255 			sd->balance_interval *= 2;
5256 	}
5257 
5258 	goto out;
5259 
5260 out_balanced:
5261 	schedstat_inc(sd, lb_balanced[idle]);
5262 
5263 	sd->nr_balance_failed = 0;
5264 
5265 out_one_pinned:
5266 	/* tune up the balancing interval */
5267 	if (((env.flags & LBF_ALL_PINNED) &&
5268 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
5269 			(sd->balance_interval < sd->max_interval))
5270 		sd->balance_interval *= 2;
5271 
5272 	ld_moved = 0;
5273 out:
5274 	return ld_moved;
5275 }
5276 
5277 /*
5278  * idle_balance is called by schedule() if this_cpu is about to become
5279  * idle. Attempts to pull tasks from other CPUs.
5280  */
5281 void idle_balance(int this_cpu, struct rq *this_rq)
5282 {
5283 	struct sched_domain *sd;
5284 	int pulled_task = 0;
5285 	unsigned long next_balance = jiffies + HZ;
5286 
5287 	this_rq->idle_stamp = rq_clock(this_rq);
5288 
5289 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
5290 		return;
5291 
5292 	/*
5293 	 * Drop the rq->lock, but keep IRQ/preempt disabled.
5294 	 */
5295 	raw_spin_unlock(&this_rq->lock);
5296 
5297 	update_blocked_averages(this_cpu);
5298 	rcu_read_lock();
5299 	for_each_domain(this_cpu, sd) {
5300 		unsigned long interval;
5301 		int balance = 1;
5302 
5303 		if (!(sd->flags & SD_LOAD_BALANCE))
5304 			continue;
5305 
5306 		if (sd->flags & SD_BALANCE_NEWIDLE) {
5307 			/* If we've pulled tasks over stop searching: */
5308 			pulled_task = load_balance(this_cpu, this_rq,
5309 						   sd, CPU_NEWLY_IDLE, &balance);
5310 		}
5311 
5312 		interval = msecs_to_jiffies(sd->balance_interval);
5313 		if (time_after(next_balance, sd->last_balance + interval))
5314 			next_balance = sd->last_balance + interval;
5315 		if (pulled_task) {
5316 			this_rq->idle_stamp = 0;
5317 			break;
5318 		}
5319 	}
5320 	rcu_read_unlock();
5321 
5322 	raw_spin_lock(&this_rq->lock);
5323 
5324 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5325 		/*
5326 		 * We are going idle. next_balance may be set based on
5327 		 * a busy processor. So reset next_balance.
5328 		 */
5329 		this_rq->next_balance = next_balance;
5330 	}
5331 }
5332 
5333 /*
5334  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5335  * running tasks off the busiest CPU onto idle CPUs. It requires at
5336  * least 1 task to be running on each physical CPU where possible, and
5337  * avoids physical / logical imbalances.
5338  */
5339 static int active_load_balance_cpu_stop(void *data)
5340 {
5341 	struct rq *busiest_rq = data;
5342 	int busiest_cpu = cpu_of(busiest_rq);
5343 	int target_cpu = busiest_rq->push_cpu;
5344 	struct rq *target_rq = cpu_rq(target_cpu);
5345 	struct sched_domain *sd;
5346 
5347 	raw_spin_lock_irq(&busiest_rq->lock);
5348 
5349 	/* make sure the requested cpu hasn't gone down in the meantime */
5350 	if (unlikely(busiest_cpu != smp_processor_id() ||
5351 		     !busiest_rq->active_balance))
5352 		goto out_unlock;
5353 
5354 	/* Is there any task to move? */
5355 	if (busiest_rq->nr_running <= 1)
5356 		goto out_unlock;
5357 
5358 	/*
5359 	 * This condition is "impossible", if it occurs
5360 	 * we need to fix it. Originally reported by
5361 	 * Bjorn Helgaas on a 128-cpu setup.
5362 	 */
5363 	BUG_ON(busiest_rq == target_rq);
5364 
5365 	/* move a task from busiest_rq to target_rq */
5366 	double_lock_balance(busiest_rq, target_rq);
5367 
5368 	/* Search for an sd spanning us and the target CPU. */
5369 	rcu_read_lock();
5370 	for_each_domain(target_cpu, sd) {
5371 		if ((sd->flags & SD_LOAD_BALANCE) &&
5372 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5373 				break;
5374 	}
5375 
5376 	if (likely(sd)) {
5377 		struct lb_env env = {
5378 			.sd		= sd,
5379 			.dst_cpu	= target_cpu,
5380 			.dst_rq		= target_rq,
5381 			.src_cpu	= busiest_rq->cpu,
5382 			.src_rq		= busiest_rq,
5383 			.idle		= CPU_IDLE,
5384 		};
5385 
5386 		schedstat_inc(sd, alb_count);
5387 
5388 		if (move_one_task(&env))
5389 			schedstat_inc(sd, alb_pushed);
5390 		else
5391 			schedstat_inc(sd, alb_failed);
5392 	}
5393 	rcu_read_unlock();
5394 	double_unlock_balance(busiest_rq, target_rq);
5395 out_unlock:
5396 	busiest_rq->active_balance = 0;
5397 	raw_spin_unlock_irq(&busiest_rq->lock);
5398 	return 0;
5399 }
5400 
5401 #ifdef CONFIG_NO_HZ_COMMON
5402 /*
5403  * idle load balancing details
5404  * - When one of the busy CPUs notice that there may be an idle rebalancing
5405  *   needed, they will kick the idle load balancer, which then does idle
5406  *   load balancing for all the idle CPUs.
5407  */
5408 static struct {
5409 	cpumask_var_t idle_cpus_mask;
5410 	atomic_t nr_cpus;
5411 	unsigned long next_balance;     /* in jiffy units */
5412 } nohz ____cacheline_aligned;
5413 
5414 static inline int find_new_ilb(int call_cpu)
5415 {
5416 	int ilb = cpumask_first(nohz.idle_cpus_mask);
5417 
5418 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
5419 		return ilb;
5420 
5421 	return nr_cpu_ids;
5422 }
5423 
5424 /*
5425  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5426  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5427  * CPU (if there is one).
5428  */
5429 static void nohz_balancer_kick(int cpu)
5430 {
5431 	int ilb_cpu;
5432 
5433 	nohz.next_balance++;
5434 
5435 	ilb_cpu = find_new_ilb(cpu);
5436 
5437 	if (ilb_cpu >= nr_cpu_ids)
5438 		return;
5439 
5440 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
5441 		return;
5442 	/*
5443 	 * Use smp_send_reschedule() instead of resched_cpu().
5444 	 * This way we generate a sched IPI on the target cpu which
5445 	 * is idle. And the softirq performing nohz idle load balance
5446 	 * will be run before returning from the IPI.
5447 	 */
5448 	smp_send_reschedule(ilb_cpu);
5449 	return;
5450 }
5451 
5452 static inline void nohz_balance_exit_idle(int cpu)
5453 {
5454 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5455 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5456 		atomic_dec(&nohz.nr_cpus);
5457 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5458 	}
5459 }
5460 
5461 static inline void set_cpu_sd_state_busy(void)
5462 {
5463 	struct sched_domain *sd;
5464 
5465 	rcu_read_lock();
5466 	sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5467 
5468 	if (!sd || !sd->nohz_idle)
5469 		goto unlock;
5470 	sd->nohz_idle = 0;
5471 
5472 	for (; sd; sd = sd->parent)
5473 		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5474 unlock:
5475 	rcu_read_unlock();
5476 }
5477 
5478 void set_cpu_sd_state_idle(void)
5479 {
5480 	struct sched_domain *sd;
5481 
5482 	rcu_read_lock();
5483 	sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5484 
5485 	if (!sd || sd->nohz_idle)
5486 		goto unlock;
5487 	sd->nohz_idle = 1;
5488 
5489 	for (; sd; sd = sd->parent)
5490 		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5491 unlock:
5492 	rcu_read_unlock();
5493 }
5494 
5495 /*
5496  * This routine will record that the cpu is going idle with tick stopped.
5497  * This info will be used in performing idle load balancing in the future.
5498  */
5499 void nohz_balance_enter_idle(int cpu)
5500 {
5501 	/*
5502 	 * If this cpu is going down, then nothing needs to be done.
5503 	 */
5504 	if (!cpu_active(cpu))
5505 		return;
5506 
5507 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5508 		return;
5509 
5510 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5511 	atomic_inc(&nohz.nr_cpus);
5512 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5513 }
5514 
5515 static int sched_ilb_notifier(struct notifier_block *nfb,
5516 					unsigned long action, void *hcpu)
5517 {
5518 	switch (action & ~CPU_TASKS_FROZEN) {
5519 	case CPU_DYING:
5520 		nohz_balance_exit_idle(smp_processor_id());
5521 		return NOTIFY_OK;
5522 	default:
5523 		return NOTIFY_DONE;
5524 	}
5525 }
5526 #endif
5527 
5528 static DEFINE_SPINLOCK(balancing);
5529 
5530 /*
5531  * Scale the max load_balance interval with the number of CPUs in the system.
5532  * This trades load-balance latency on larger machines for less cross talk.
5533  */
5534 void update_max_interval(void)
5535 {
5536 	max_load_balance_interval = HZ*num_online_cpus()/10;
5537 }
5538 
5539 /*
5540  * It checks each scheduling domain to see if it is due to be balanced,
5541  * and initiates a balancing operation if so.
5542  *
5543  * Balancing parameters are set up in init_sched_domains.
5544  */
5545 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5546 {
5547 	int balance = 1;
5548 	struct rq *rq = cpu_rq(cpu);
5549 	unsigned long interval;
5550 	struct sched_domain *sd;
5551 	/* Earliest time when we have to do rebalance again */
5552 	unsigned long next_balance = jiffies + 60*HZ;
5553 	int update_next_balance = 0;
5554 	int need_serialize;
5555 
5556 	update_blocked_averages(cpu);
5557 
5558 	rcu_read_lock();
5559 	for_each_domain(cpu, sd) {
5560 		if (!(sd->flags & SD_LOAD_BALANCE))
5561 			continue;
5562 
5563 		interval = sd->balance_interval;
5564 		if (idle != CPU_IDLE)
5565 			interval *= sd->busy_factor;
5566 
5567 		/* scale ms to jiffies */
5568 		interval = msecs_to_jiffies(interval);
5569 		interval = clamp(interval, 1UL, max_load_balance_interval);
5570 
5571 		need_serialize = sd->flags & SD_SERIALIZE;
5572 
5573 		if (need_serialize) {
5574 			if (!spin_trylock(&balancing))
5575 				goto out;
5576 		}
5577 
5578 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
5579 			if (load_balance(cpu, rq, sd, idle, &balance)) {
5580 				/*
5581 				 * The LBF_SOME_PINNED logic could have changed
5582 				 * env->dst_cpu, so we can't know our idle
5583 				 * state even if we migrated tasks. Update it.
5584 				 */
5585 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
5586 			}
5587 			sd->last_balance = jiffies;
5588 		}
5589 		if (need_serialize)
5590 			spin_unlock(&balancing);
5591 out:
5592 		if (time_after(next_balance, sd->last_balance + interval)) {
5593 			next_balance = sd->last_balance + interval;
5594 			update_next_balance = 1;
5595 		}
5596 
5597 		/*
5598 		 * Stop the load balance at this level. There is another
5599 		 * CPU in our sched group which is doing load balancing more
5600 		 * actively.
5601 		 */
5602 		if (!balance)
5603 			break;
5604 	}
5605 	rcu_read_unlock();
5606 
5607 	/*
5608 	 * next_balance will be updated only when there is a need.
5609 	 * When the cpu is attached to null domain for ex, it will not be
5610 	 * updated.
5611 	 */
5612 	if (likely(update_next_balance))
5613 		rq->next_balance = next_balance;
5614 }
5615 
5616 #ifdef CONFIG_NO_HZ_COMMON
5617 /*
5618  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
5619  * rebalancing for all the cpus for whom scheduler ticks are stopped.
5620  */
5621 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5622 {
5623 	struct rq *this_rq = cpu_rq(this_cpu);
5624 	struct rq *rq;
5625 	int balance_cpu;
5626 
5627 	if (idle != CPU_IDLE ||
5628 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5629 		goto end;
5630 
5631 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5632 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
5633 			continue;
5634 
5635 		/*
5636 		 * If this cpu gets work to do, stop the load balancing
5637 		 * work being done for other cpus. Next load
5638 		 * balancing owner will pick it up.
5639 		 */
5640 		if (need_resched())
5641 			break;
5642 
5643 		rq = cpu_rq(balance_cpu);
5644 
5645 		raw_spin_lock_irq(&rq->lock);
5646 		update_rq_clock(rq);
5647 		update_idle_cpu_load(rq);
5648 		raw_spin_unlock_irq(&rq->lock);
5649 
5650 		rebalance_domains(balance_cpu, CPU_IDLE);
5651 
5652 		if (time_after(this_rq->next_balance, rq->next_balance))
5653 			this_rq->next_balance = rq->next_balance;
5654 	}
5655 	nohz.next_balance = this_rq->next_balance;
5656 end:
5657 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
5658 }
5659 
5660 /*
5661  * Current heuristic for kicking the idle load balancer in the presence
5662  * of an idle cpu is the system.
5663  *   - This rq has more than one task.
5664  *   - At any scheduler domain level, this cpu's scheduler group has multiple
5665  *     busy cpu's exceeding the group's power.
5666  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5667  *     domain span are idle.
5668  */
5669 static inline int nohz_kick_needed(struct rq *rq, int cpu)
5670 {
5671 	unsigned long now = jiffies;
5672 	struct sched_domain *sd;
5673 
5674 	if (unlikely(idle_cpu(cpu)))
5675 		return 0;
5676 
5677        /*
5678 	* We may be recently in ticked or tickless idle mode. At the first
5679 	* busy tick after returning from idle, we will update the busy stats.
5680 	*/
5681 	set_cpu_sd_state_busy();
5682 	nohz_balance_exit_idle(cpu);
5683 
5684 	/*
5685 	 * None are in tickless mode and hence no need for NOHZ idle load
5686 	 * balancing.
5687 	 */
5688 	if (likely(!atomic_read(&nohz.nr_cpus)))
5689 		return 0;
5690 
5691 	if (time_before(now, nohz.next_balance))
5692 		return 0;
5693 
5694 	if (rq->nr_running >= 2)
5695 		goto need_kick;
5696 
5697 	rcu_read_lock();
5698 	for_each_domain(cpu, sd) {
5699 		struct sched_group *sg = sd->groups;
5700 		struct sched_group_power *sgp = sg->sgp;
5701 		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5702 
5703 		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5704 			goto need_kick_unlock;
5705 
5706 		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5707 		    && (cpumask_first_and(nohz.idle_cpus_mask,
5708 					  sched_domain_span(sd)) < cpu))
5709 			goto need_kick_unlock;
5710 
5711 		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5712 			break;
5713 	}
5714 	rcu_read_unlock();
5715 	return 0;
5716 
5717 need_kick_unlock:
5718 	rcu_read_unlock();
5719 need_kick:
5720 	return 1;
5721 }
5722 #else
5723 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5724 #endif
5725 
5726 /*
5727  * run_rebalance_domains is triggered when needed from the scheduler tick.
5728  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5729  */
5730 static void run_rebalance_domains(struct softirq_action *h)
5731 {
5732 	int this_cpu = smp_processor_id();
5733 	struct rq *this_rq = cpu_rq(this_cpu);
5734 	enum cpu_idle_type idle = this_rq->idle_balance ?
5735 						CPU_IDLE : CPU_NOT_IDLE;
5736 
5737 	rebalance_domains(this_cpu, idle);
5738 
5739 	/*
5740 	 * If this cpu has a pending nohz_balance_kick, then do the
5741 	 * balancing on behalf of the other idle cpus whose ticks are
5742 	 * stopped.
5743 	 */
5744 	nohz_idle_balance(this_cpu, idle);
5745 }
5746 
5747 static inline int on_null_domain(int cpu)
5748 {
5749 	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5750 }
5751 
5752 /*
5753  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5754  */
5755 void trigger_load_balance(struct rq *rq, int cpu)
5756 {
5757 	/* Don't need to rebalance while attached to NULL domain */
5758 	if (time_after_eq(jiffies, rq->next_balance) &&
5759 	    likely(!on_null_domain(cpu)))
5760 		raise_softirq(SCHED_SOFTIRQ);
5761 #ifdef CONFIG_NO_HZ_COMMON
5762 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5763 		nohz_balancer_kick(cpu);
5764 #endif
5765 }
5766 
5767 static void rq_online_fair(struct rq *rq)
5768 {
5769 	update_sysctl();
5770 }
5771 
5772 static void rq_offline_fair(struct rq *rq)
5773 {
5774 	update_sysctl();
5775 
5776 	/* Ensure any throttled groups are reachable by pick_next_task */
5777 	unthrottle_offline_cfs_rqs(rq);
5778 }
5779 
5780 #endif /* CONFIG_SMP */
5781 
5782 /*
5783  * scheduler tick hitting a task of our scheduling class:
5784  */
5785 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5786 {
5787 	struct cfs_rq *cfs_rq;
5788 	struct sched_entity *se = &curr->se;
5789 
5790 	for_each_sched_entity(se) {
5791 		cfs_rq = cfs_rq_of(se);
5792 		entity_tick(cfs_rq, se, queued);
5793 	}
5794 
5795 	if (numabalancing_enabled)
5796 		task_tick_numa(rq, curr);
5797 
5798 	update_rq_runnable_avg(rq, 1);
5799 }
5800 
5801 /*
5802  * called on fork with the child task as argument from the parent's context
5803  *  - child not yet on the tasklist
5804  *  - preemption disabled
5805  */
5806 static void task_fork_fair(struct task_struct *p)
5807 {
5808 	struct cfs_rq *cfs_rq;
5809 	struct sched_entity *se = &p->se, *curr;
5810 	int this_cpu = smp_processor_id();
5811 	struct rq *rq = this_rq();
5812 	unsigned long flags;
5813 
5814 	raw_spin_lock_irqsave(&rq->lock, flags);
5815 
5816 	update_rq_clock(rq);
5817 
5818 	cfs_rq = task_cfs_rq(current);
5819 	curr = cfs_rq->curr;
5820 
5821 	if (unlikely(task_cpu(p) != this_cpu)) {
5822 		rcu_read_lock();
5823 		__set_task_cpu(p, this_cpu);
5824 		rcu_read_unlock();
5825 	}
5826 
5827 	update_curr(cfs_rq);
5828 
5829 	if (curr)
5830 		se->vruntime = curr->vruntime;
5831 	place_entity(cfs_rq, se, 1);
5832 
5833 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5834 		/*
5835 		 * Upon rescheduling, sched_class::put_prev_task() will place
5836 		 * 'current' within the tree based on its new key value.
5837 		 */
5838 		swap(curr->vruntime, se->vruntime);
5839 		resched_task(rq->curr);
5840 	}
5841 
5842 	se->vruntime -= cfs_rq->min_vruntime;
5843 
5844 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5845 }
5846 
5847 /*
5848  * Priority of the task has changed. Check to see if we preempt
5849  * the current task.
5850  */
5851 static void
5852 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5853 {
5854 	if (!p->se.on_rq)
5855 		return;
5856 
5857 	/*
5858 	 * Reschedule if we are currently running on this runqueue and
5859 	 * our priority decreased, or if we are not currently running on
5860 	 * this runqueue and our priority is higher than the current's
5861 	 */
5862 	if (rq->curr == p) {
5863 		if (p->prio > oldprio)
5864 			resched_task(rq->curr);
5865 	} else
5866 		check_preempt_curr(rq, p, 0);
5867 }
5868 
5869 static void switched_from_fair(struct rq *rq, struct task_struct *p)
5870 {
5871 	struct sched_entity *se = &p->se;
5872 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
5873 
5874 	/*
5875 	 * Ensure the task's vruntime is normalized, so that when its
5876 	 * switched back to the fair class the enqueue_entity(.flags=0) will
5877 	 * do the right thing.
5878 	 *
5879 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5880 	 * have normalized the vruntime, if it was !on_rq, then only when
5881 	 * the task is sleeping will it still have non-normalized vruntime.
5882 	 */
5883 	if (!se->on_rq && p->state != TASK_RUNNING) {
5884 		/*
5885 		 * Fix up our vruntime so that the current sleep doesn't
5886 		 * cause 'unlimited' sleep bonus.
5887 		 */
5888 		place_entity(cfs_rq, se, 0);
5889 		se->vruntime -= cfs_rq->min_vruntime;
5890 	}
5891 
5892 #ifdef CONFIG_SMP
5893 	/*
5894 	* Remove our load from contribution when we leave sched_fair
5895 	* and ensure we don't carry in an old decay_count if we
5896 	* switch back.
5897 	*/
5898 	if (p->se.avg.decay_count) {
5899 		struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5900 		__synchronize_entity_decay(&p->se);
5901 		subtract_blocked_load_contrib(cfs_rq,
5902 				p->se.avg.load_avg_contrib);
5903 	}
5904 #endif
5905 }
5906 
5907 /*
5908  * We switched to the sched_fair class.
5909  */
5910 static void switched_to_fair(struct rq *rq, struct task_struct *p)
5911 {
5912 	if (!p->se.on_rq)
5913 		return;
5914 
5915 	/*
5916 	 * We were most likely switched from sched_rt, so
5917 	 * kick off the schedule if running, otherwise just see
5918 	 * if we can still preempt the current task.
5919 	 */
5920 	if (rq->curr == p)
5921 		resched_task(rq->curr);
5922 	else
5923 		check_preempt_curr(rq, p, 0);
5924 }
5925 
5926 /* Account for a task changing its policy or group.
5927  *
5928  * This routine is mostly called to set cfs_rq->curr field when a task
5929  * migrates between groups/classes.
5930  */
5931 static void set_curr_task_fair(struct rq *rq)
5932 {
5933 	struct sched_entity *se = &rq->curr->se;
5934 
5935 	for_each_sched_entity(se) {
5936 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5937 
5938 		set_next_entity(cfs_rq, se);
5939 		/* ensure bandwidth has been allocated on our new cfs_rq */
5940 		account_cfs_rq_runtime(cfs_rq, 0);
5941 	}
5942 }
5943 
5944 void init_cfs_rq(struct cfs_rq *cfs_rq)
5945 {
5946 	cfs_rq->tasks_timeline = RB_ROOT;
5947 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5948 #ifndef CONFIG_64BIT
5949 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5950 #endif
5951 #ifdef CONFIG_SMP
5952 	atomic64_set(&cfs_rq->decay_counter, 1);
5953 	atomic_long_set(&cfs_rq->removed_load, 0);
5954 #endif
5955 }
5956 
5957 #ifdef CONFIG_FAIR_GROUP_SCHED
5958 static void task_move_group_fair(struct task_struct *p, int on_rq)
5959 {
5960 	struct cfs_rq *cfs_rq;
5961 	/*
5962 	 * If the task was not on the rq at the time of this cgroup movement
5963 	 * it must have been asleep, sleeping tasks keep their ->vruntime
5964 	 * absolute on their old rq until wakeup (needed for the fair sleeper
5965 	 * bonus in place_entity()).
5966 	 *
5967 	 * If it was on the rq, we've just 'preempted' it, which does convert
5968 	 * ->vruntime to a relative base.
5969 	 *
5970 	 * Make sure both cases convert their relative position when migrating
5971 	 * to another cgroup's rq. This does somewhat interfere with the
5972 	 * fair sleeper stuff for the first placement, but who cares.
5973 	 */
5974 	/*
5975 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
5976 	 * But there are some cases where it has already been normalized:
5977 	 *
5978 	 * - Moving a forked child which is waiting for being woken up by
5979 	 *   wake_up_new_task().
5980 	 * - Moving a task which has been woken up by try_to_wake_up() and
5981 	 *   waiting for actually being woken up by sched_ttwu_pending().
5982 	 *
5983 	 * To prevent boost or penalty in the new cfs_rq caused by delta
5984 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5985 	 */
5986 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5987 		on_rq = 1;
5988 
5989 	if (!on_rq)
5990 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5991 	set_task_rq(p, task_cpu(p));
5992 	if (!on_rq) {
5993 		cfs_rq = cfs_rq_of(&p->se);
5994 		p->se.vruntime += cfs_rq->min_vruntime;
5995 #ifdef CONFIG_SMP
5996 		/*
5997 		 * migrate_task_rq_fair() will have removed our previous
5998 		 * contribution, but we must synchronize for ongoing future
5999 		 * decay.
6000 		 */
6001 		p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6002 		cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6003 #endif
6004 	}
6005 }
6006 
6007 void free_fair_sched_group(struct task_group *tg)
6008 {
6009 	int i;
6010 
6011 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6012 
6013 	for_each_possible_cpu(i) {
6014 		if (tg->cfs_rq)
6015 			kfree(tg->cfs_rq[i]);
6016 		if (tg->se)
6017 			kfree(tg->se[i]);
6018 	}
6019 
6020 	kfree(tg->cfs_rq);
6021 	kfree(tg->se);
6022 }
6023 
6024 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6025 {
6026 	struct cfs_rq *cfs_rq;
6027 	struct sched_entity *se;
6028 	int i;
6029 
6030 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6031 	if (!tg->cfs_rq)
6032 		goto err;
6033 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6034 	if (!tg->se)
6035 		goto err;
6036 
6037 	tg->shares = NICE_0_LOAD;
6038 
6039 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6040 
6041 	for_each_possible_cpu(i) {
6042 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6043 				      GFP_KERNEL, cpu_to_node(i));
6044 		if (!cfs_rq)
6045 			goto err;
6046 
6047 		se = kzalloc_node(sizeof(struct sched_entity),
6048 				  GFP_KERNEL, cpu_to_node(i));
6049 		if (!se)
6050 			goto err_free_rq;
6051 
6052 		init_cfs_rq(cfs_rq);
6053 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6054 	}
6055 
6056 	return 1;
6057 
6058 err_free_rq:
6059 	kfree(cfs_rq);
6060 err:
6061 	return 0;
6062 }
6063 
6064 void unregister_fair_sched_group(struct task_group *tg, int cpu)
6065 {
6066 	struct rq *rq = cpu_rq(cpu);
6067 	unsigned long flags;
6068 
6069 	/*
6070 	* Only empty task groups can be destroyed; so we can speculatively
6071 	* check on_list without danger of it being re-added.
6072 	*/
6073 	if (!tg->cfs_rq[cpu]->on_list)
6074 		return;
6075 
6076 	raw_spin_lock_irqsave(&rq->lock, flags);
6077 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6078 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6079 }
6080 
6081 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6082 			struct sched_entity *se, int cpu,
6083 			struct sched_entity *parent)
6084 {
6085 	struct rq *rq = cpu_rq(cpu);
6086 
6087 	cfs_rq->tg = tg;
6088 	cfs_rq->rq = rq;
6089 	init_cfs_rq_runtime(cfs_rq);
6090 
6091 	tg->cfs_rq[cpu] = cfs_rq;
6092 	tg->se[cpu] = se;
6093 
6094 	/* se could be NULL for root_task_group */
6095 	if (!se)
6096 		return;
6097 
6098 	if (!parent)
6099 		se->cfs_rq = &rq->cfs;
6100 	else
6101 		se->cfs_rq = parent->my_q;
6102 
6103 	se->my_q = cfs_rq;
6104 	update_load_set(&se->load, 0);
6105 	se->parent = parent;
6106 }
6107 
6108 static DEFINE_MUTEX(shares_mutex);
6109 
6110 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6111 {
6112 	int i;
6113 	unsigned long flags;
6114 
6115 	/*
6116 	 * We can't change the weight of the root cgroup.
6117 	 */
6118 	if (!tg->se[0])
6119 		return -EINVAL;
6120 
6121 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6122 
6123 	mutex_lock(&shares_mutex);
6124 	if (tg->shares == shares)
6125 		goto done;
6126 
6127 	tg->shares = shares;
6128 	for_each_possible_cpu(i) {
6129 		struct rq *rq = cpu_rq(i);
6130 		struct sched_entity *se;
6131 
6132 		se = tg->se[i];
6133 		/* Propagate contribution to hierarchy */
6134 		raw_spin_lock_irqsave(&rq->lock, flags);
6135 
6136 		/* Possible calls to update_curr() need rq clock */
6137 		update_rq_clock(rq);
6138 		for_each_sched_entity(se)
6139 			update_cfs_shares(group_cfs_rq(se));
6140 		raw_spin_unlock_irqrestore(&rq->lock, flags);
6141 	}
6142 
6143 done:
6144 	mutex_unlock(&shares_mutex);
6145 	return 0;
6146 }
6147 #else /* CONFIG_FAIR_GROUP_SCHED */
6148 
6149 void free_fair_sched_group(struct task_group *tg) { }
6150 
6151 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6152 {
6153 	return 1;
6154 }
6155 
6156 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6157 
6158 #endif /* CONFIG_FAIR_GROUP_SCHED */
6159 
6160 
6161 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
6162 {
6163 	struct sched_entity *se = &task->se;
6164 	unsigned int rr_interval = 0;
6165 
6166 	/*
6167 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6168 	 * idle runqueue:
6169 	 */
6170 	if (rq->cfs.load.weight)
6171 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
6172 
6173 	return rr_interval;
6174 }
6175 
6176 /*
6177  * All the scheduling class methods:
6178  */
6179 const struct sched_class fair_sched_class = {
6180 	.next			= &idle_sched_class,
6181 	.enqueue_task		= enqueue_task_fair,
6182 	.dequeue_task		= dequeue_task_fair,
6183 	.yield_task		= yield_task_fair,
6184 	.yield_to_task		= yield_to_task_fair,
6185 
6186 	.check_preempt_curr	= check_preempt_wakeup,
6187 
6188 	.pick_next_task		= pick_next_task_fair,
6189 	.put_prev_task		= put_prev_task_fair,
6190 
6191 #ifdef CONFIG_SMP
6192 	.select_task_rq		= select_task_rq_fair,
6193 	.migrate_task_rq	= migrate_task_rq_fair,
6194 
6195 	.rq_online		= rq_online_fair,
6196 	.rq_offline		= rq_offline_fair,
6197 
6198 	.task_waking		= task_waking_fair,
6199 #endif
6200 
6201 	.set_curr_task          = set_curr_task_fair,
6202 	.task_tick		= task_tick_fair,
6203 	.task_fork		= task_fork_fair,
6204 
6205 	.prio_changed		= prio_changed_fair,
6206 	.switched_from		= switched_from_fair,
6207 	.switched_to		= switched_to_fair,
6208 
6209 	.get_rr_interval	= get_rr_interval_fair,
6210 
6211 #ifdef CONFIG_FAIR_GROUP_SCHED
6212 	.task_move_group	= task_move_group_fair,
6213 #endif
6214 };
6215 
6216 #ifdef CONFIG_SCHED_DEBUG
6217 void print_cfs_stats(struct seq_file *m, int cpu)
6218 {
6219 	struct cfs_rq *cfs_rq;
6220 
6221 	rcu_read_lock();
6222 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
6223 		print_cfs_rq(m, cpu, cfs_rq);
6224 	rcu_read_unlock();
6225 }
6226 #endif
6227 
6228 __init void init_sched_fair_class(void)
6229 {
6230 #ifdef CONFIG_SMP
6231 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6232 
6233 #ifdef CONFIG_NO_HZ_COMMON
6234 	nohz.next_balance = jiffies;
6235 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
6236 	cpu_notifier(sched_ilb_notifier, 0);
6237 #endif
6238 #endif /* SMP */
6239 
6240 }
6241