xref: /openbmc/linux/drivers/cpuidle/coupled.c (revision bc000245)
1 /*
2  * coupled.c - helper functions to enter the same idle state on multiple cpus
3  *
4  * Copyright (c) 2011 Google, Inc.
5  *
6  * Author: Colin Cross <ccross@android.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/cpu.h>
21 #include <linux/cpuidle.h>
22 #include <linux/mutex.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 
27 #include "cpuidle.h"
28 
29 /**
30  * DOC: Coupled cpuidle states
31  *
32  * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
33  * cpus cannot be independently powered down, either due to
34  * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
35  * power down), or due to HW bugs (on OMAP4460, a cpu powering up
36  * will corrupt the gic state unless the other cpu runs a work
37  * around).  Each cpu has a power state that it can enter without
38  * coordinating with the other cpu (usually Wait For Interrupt, or
39  * WFI), and one or more "coupled" power states that affect blocks
40  * shared between the cpus (L2 cache, interrupt controller, and
41  * sometimes the whole SoC).  Entering a coupled power state must
42  * be tightly controlled on both cpus.
43  *
44  * This file implements a solution, where each cpu will wait in the
45  * WFI state until all cpus are ready to enter a coupled state, at
46  * which point the coupled state function will be called on all
47  * cpus at approximately the same time.
48  *
49  * Once all cpus are ready to enter idle, they are woken by an smp
50  * cross call.  At this point, there is a chance that one of the
51  * cpus will find work to do, and choose not to enter idle.  A
52  * final pass is needed to guarantee that all cpus will call the
53  * power state enter function at the same time.  During this pass,
54  * each cpu will increment the ready counter, and continue once the
55  * ready counter matches the number of online coupled cpus.  If any
56  * cpu exits idle, the other cpus will decrement their counter and
57  * retry.
58  *
59  * requested_state stores the deepest coupled idle state each cpu
60  * is ready for.  It is assumed that the states are indexed from
61  * shallowest (highest power, lowest exit latency) to deepest
62  * (lowest power, highest exit latency).  The requested_state
63  * variable is not locked.  It is only written from the cpu that
64  * it stores (or by the on/offlining cpu if that cpu is offline),
65  * and only read after all the cpus are ready for the coupled idle
66  * state are are no longer updating it.
67  *
68  * Three atomic counters are used.  alive_count tracks the number
69  * of cpus in the coupled set that are currently or soon will be
70  * online.  waiting_count tracks the number of cpus that are in
71  * the waiting loop, in the ready loop, or in the coupled idle state.
72  * ready_count tracks the number of cpus that are in the ready loop
73  * or in the coupled idle state.
74  *
75  * To use coupled cpuidle states, a cpuidle driver must:
76  *
77  *    Set struct cpuidle_device.coupled_cpus to the mask of all
78  *    coupled cpus, usually the same as cpu_possible_mask if all cpus
79  *    are part of the same cluster.  The coupled_cpus mask must be
80  *    set in the struct cpuidle_device for each cpu.
81  *
82  *    Set struct cpuidle_device.safe_state to a state that is not a
83  *    coupled state.  This is usually WFI.
84  *
85  *    Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
86  *    state that affects multiple cpus.
87  *
88  *    Provide a struct cpuidle_state.enter function for each state
89  *    that affects multiple cpus.  This function is guaranteed to be
90  *    called on all cpus at approximately the same time.  The driver
91  *    should ensure that the cpus all abort together if any cpu tries
92  *    to abort once the function is called.  The function should return
93  *    with interrupts still disabled.
94  */
95 
96 /**
97  * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
98  * @coupled_cpus: mask of cpus that are part of the coupled set
99  * @requested_state: array of requested states for cpus in the coupled set
100  * @ready_waiting_counts: combined count of cpus  in ready or waiting loops
101  * @online_count: count of cpus that are online
102  * @refcnt: reference count of cpuidle devices that are using this struct
103  * @prevent: flag to prevent coupled idle while a cpu is hotplugging
104  */
105 struct cpuidle_coupled {
106 	cpumask_t coupled_cpus;
107 	int requested_state[NR_CPUS];
108 	atomic_t ready_waiting_counts;
109 	atomic_t abort_barrier;
110 	int online_count;
111 	int refcnt;
112 	int prevent;
113 };
114 
115 #define WAITING_BITS 16
116 #define MAX_WAITING_CPUS (1 << WAITING_BITS)
117 #define WAITING_MASK (MAX_WAITING_CPUS - 1)
118 #define READY_MASK (~WAITING_MASK)
119 
120 #define CPUIDLE_COUPLED_NOT_IDLE	(-1)
121 
122 static DEFINE_MUTEX(cpuidle_coupled_lock);
123 static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
124 
125 /*
126  * The cpuidle_coupled_poke_pending mask is used to avoid calling
127  * __smp_call_function_single with the per cpu call_single_data struct already
128  * in use.  This prevents a deadlock where two cpus are waiting for each others
129  * call_single_data struct to be available
130  */
131 static cpumask_t cpuidle_coupled_poke_pending;
132 
133 /*
134  * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
135  * once to minimize entering the ready loop with a poke pending, which would
136  * require aborting and retrying.
137  */
138 static cpumask_t cpuidle_coupled_poked;
139 
140 /**
141  * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
142  * @dev: cpuidle_device of the calling cpu
143  * @a:   atomic variable to hold the barrier
144  *
145  * No caller to this function will return from this function until all online
146  * cpus in the same coupled group have called this function.  Once any caller
147  * has returned from this function, the barrier is immediately available for
148  * reuse.
149  *
150  * The atomic variable must be initialized to 0 before any cpu calls
151  * this function, will be reset to 0 before any cpu returns from this function.
152  *
153  * Must only be called from within a coupled idle state handler
154  * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
155  *
156  * Provides full smp barrier semantics before and after calling.
157  */
158 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
159 {
160 	int n = dev->coupled->online_count;
161 
162 	smp_mb__before_atomic_inc();
163 	atomic_inc(a);
164 
165 	while (atomic_read(a) < n)
166 		cpu_relax();
167 
168 	if (atomic_inc_return(a) == n * 2) {
169 		atomic_set(a, 0);
170 		return;
171 	}
172 
173 	while (atomic_read(a) > n)
174 		cpu_relax();
175 }
176 
177 /**
178  * cpuidle_state_is_coupled - check if a state is part of a coupled set
179  * @dev: struct cpuidle_device for the current cpu
180  * @drv: struct cpuidle_driver for the platform
181  * @state: index of the target state in drv->states
182  *
183  * Returns true if the target state is coupled with cpus besides this one
184  */
185 bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
186 	struct cpuidle_driver *drv, int state)
187 {
188 	return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
189 }
190 
191 /**
192  * cpuidle_coupled_set_ready - mark a cpu as ready
193  * @coupled: the struct coupled that contains the current cpu
194  */
195 static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
196 {
197 	atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
198 }
199 
200 /**
201  * cpuidle_coupled_set_not_ready - mark a cpu as not ready
202  * @coupled: the struct coupled that contains the current cpu
203  *
204  * Decrements the ready counter, unless the ready (and thus the waiting) counter
205  * is equal to the number of online cpus.  Prevents a race where one cpu
206  * decrements the waiting counter and then re-increments it just before another
207  * cpu has decremented its ready counter, leading to the ready counter going
208  * down from the number of online cpus without going through the coupled idle
209  * state.
210  *
211  * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
212  * counter was equal to the number of online cpus.
213  */
214 static
215 inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
216 {
217 	int all;
218 	int ret;
219 
220 	all = coupled->online_count | (coupled->online_count << WAITING_BITS);
221 	ret = atomic_add_unless(&coupled->ready_waiting_counts,
222 		-MAX_WAITING_CPUS, all);
223 
224 	return ret ? 0 : -EINVAL;
225 }
226 
227 /**
228  * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
229  * @coupled: the struct coupled that contains the current cpu
230  *
231  * Returns true if all of the cpus in a coupled set are out of the ready loop.
232  */
233 static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
234 {
235 	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
236 	return r == 0;
237 }
238 
239 /**
240  * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
241  * @coupled: the struct coupled that contains the current cpu
242  *
243  * Returns true if all cpus coupled to this target state are in the ready loop
244  */
245 static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
246 {
247 	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
248 	return r == coupled->online_count;
249 }
250 
251 /**
252  * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
253  * @coupled: the struct coupled that contains the current cpu
254  *
255  * Returns true if all cpus coupled to this target state are in the wait loop
256  */
257 static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
258 {
259 	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
260 	return w == coupled->online_count;
261 }
262 
263 /**
264  * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
265  * @coupled: the struct coupled that contains the current cpu
266  *
267  * Returns true if all of the cpus in a coupled set are out of the waiting loop.
268  */
269 static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
270 {
271 	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
272 	return w == 0;
273 }
274 
275 /**
276  * cpuidle_coupled_get_state - determine the deepest idle state
277  * @dev: struct cpuidle_device for this cpu
278  * @coupled: the struct coupled that contains the current cpu
279  *
280  * Returns the deepest idle state that all coupled cpus can enter
281  */
282 static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
283 		struct cpuidle_coupled *coupled)
284 {
285 	int i;
286 	int state = INT_MAX;
287 
288 	/*
289 	 * Read barrier ensures that read of requested_state is ordered after
290 	 * reads of ready_count.  Matches the write barriers
291 	 * cpuidle_set_state_waiting.
292 	 */
293 	smp_rmb();
294 
295 	for_each_cpu_mask(i, coupled->coupled_cpus)
296 		if (cpu_online(i) && coupled->requested_state[i] < state)
297 			state = coupled->requested_state[i];
298 
299 	return state;
300 }
301 
302 static void cpuidle_coupled_handle_poke(void *info)
303 {
304 	int cpu = (unsigned long)info;
305 	cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
306 	cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
307 }
308 
309 /**
310  * cpuidle_coupled_poke - wake up a cpu that may be waiting
311  * @cpu: target cpu
312  *
313  * Ensures that the target cpu exits it's waiting idle state (if it is in it)
314  * and will see updates to waiting_count before it re-enters it's waiting idle
315  * state.
316  *
317  * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
318  * either has or will soon have a pending IPI that will wake it out of idle,
319  * or it is currently processing the IPI and is not in idle.
320  */
321 static void cpuidle_coupled_poke(int cpu)
322 {
323 	struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
324 
325 	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
326 		__smp_call_function_single(cpu, csd, 0);
327 }
328 
329 /**
330  * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
331  * @dev: struct cpuidle_device for this cpu
332  * @coupled: the struct coupled that contains the current cpu
333  *
334  * Calls cpuidle_coupled_poke on all other online cpus.
335  */
336 static void cpuidle_coupled_poke_others(int this_cpu,
337 		struct cpuidle_coupled *coupled)
338 {
339 	int cpu;
340 
341 	for_each_cpu_mask(cpu, coupled->coupled_cpus)
342 		if (cpu != this_cpu && cpu_online(cpu))
343 			cpuidle_coupled_poke(cpu);
344 }
345 
346 /**
347  * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
348  * @dev: struct cpuidle_device for this cpu
349  * @coupled: the struct coupled that contains the current cpu
350  * @next_state: the index in drv->states of the requested state for this cpu
351  *
352  * Updates the requested idle state for the specified cpuidle device.
353  * Returns the number of waiting cpus.
354  */
355 static int cpuidle_coupled_set_waiting(int cpu,
356 		struct cpuidle_coupled *coupled, int next_state)
357 {
358 	coupled->requested_state[cpu] = next_state;
359 
360 	/*
361 	 * The atomic_inc_return provides a write barrier to order the write
362 	 * to requested_state with the later write that increments ready_count.
363 	 */
364 	return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
365 }
366 
367 /**
368  * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
369  * @dev: struct cpuidle_device for this cpu
370  * @coupled: the struct coupled that contains the current cpu
371  *
372  * Removes the requested idle state for the specified cpuidle device.
373  */
374 static void cpuidle_coupled_set_not_waiting(int cpu,
375 		struct cpuidle_coupled *coupled)
376 {
377 	/*
378 	 * Decrementing waiting count can race with incrementing it in
379 	 * cpuidle_coupled_set_waiting, but that's OK.  Worst case, some
380 	 * cpus will increment ready_count and then spin until they
381 	 * notice that this cpu has cleared it's requested_state.
382 	 */
383 	atomic_dec(&coupled->ready_waiting_counts);
384 
385 	coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
386 }
387 
388 /**
389  * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
390  * @cpu: the current cpu
391  * @coupled: the struct coupled that contains the current cpu
392  *
393  * Marks this cpu as no longer in the ready and waiting loops.  Decrements
394  * the waiting count first to prevent another cpu looping back in and seeing
395  * this cpu as waiting just before it exits idle.
396  */
397 static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
398 {
399 	cpuidle_coupled_set_not_waiting(cpu, coupled);
400 	atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
401 }
402 
403 /**
404  * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
405  * @cpu - this cpu
406  *
407  * Turns on interrupts and spins until any outstanding poke interrupts have
408  * been processed and the poke bit has been cleared.
409  *
410  * Other interrupts may also be processed while interrupts are enabled, so
411  * need_resched() must be tested after this function returns to make sure
412  * the interrupt didn't schedule work that should take the cpu out of idle.
413  *
414  * Returns 0 if no poke was pending, 1 if a poke was cleared.
415  */
416 static int cpuidle_coupled_clear_pokes(int cpu)
417 {
418 	if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
419 		return 0;
420 
421 	local_irq_enable();
422 	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
423 		cpu_relax();
424 	local_irq_disable();
425 
426 	return 1;
427 }
428 
429 static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
430 {
431 	cpumask_t cpus;
432 	int ret;
433 
434 	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
435 	ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
436 
437 	return ret;
438 }
439 
440 /**
441  * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
442  * @dev: struct cpuidle_device for the current cpu
443  * @drv: struct cpuidle_driver for the platform
444  * @next_state: index of the requested state in drv->states
445  *
446  * Coordinate with coupled cpus to enter the target state.  This is a two
447  * stage process.  In the first stage, the cpus are operating independently,
448  * and may call into cpuidle_enter_state_coupled at completely different times.
449  * To save as much power as possible, the first cpus to call this function will
450  * go to an intermediate state (the cpuidle_device's safe state), and wait for
451  * all the other cpus to call this function.  Once all coupled cpus are idle,
452  * the second stage will start.  Each coupled cpu will spin until all cpus have
453  * guaranteed that they will call the target_state.
454  *
455  * This function must be called with interrupts disabled.  It may enable
456  * interrupts while preparing for idle, and it will always return with
457  * interrupts enabled.
458  */
459 int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
460 		struct cpuidle_driver *drv, int next_state)
461 {
462 	int entered_state = -1;
463 	struct cpuidle_coupled *coupled = dev->coupled;
464 	int w;
465 
466 	if (!coupled)
467 		return -EINVAL;
468 
469 	while (coupled->prevent) {
470 		cpuidle_coupled_clear_pokes(dev->cpu);
471 		if (need_resched()) {
472 			local_irq_enable();
473 			return entered_state;
474 		}
475 		entered_state = cpuidle_enter_state(dev, drv,
476 			dev->safe_state_index);
477 		local_irq_disable();
478 	}
479 
480 	/* Read barrier ensures online_count is read after prevent is cleared */
481 	smp_rmb();
482 
483 reset:
484 	cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
485 
486 	w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
487 	/*
488 	 * If this is the last cpu to enter the waiting state, poke
489 	 * all the other cpus out of their waiting state so they can
490 	 * enter a deeper state.  This can race with one of the cpus
491 	 * exiting the waiting state due to an interrupt and
492 	 * decrementing waiting_count, see comment below.
493 	 */
494 	if (w == coupled->online_count) {
495 		cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
496 		cpuidle_coupled_poke_others(dev->cpu, coupled);
497 	}
498 
499 retry:
500 	/*
501 	 * Wait for all coupled cpus to be idle, using the deepest state
502 	 * allowed for a single cpu.  If this was not the poking cpu, wait
503 	 * for at least one poke before leaving to avoid a race where
504 	 * two cpus could arrive at the waiting loop at the same time,
505 	 * but the first of the two to arrive could skip the loop without
506 	 * processing the pokes from the last to arrive.
507 	 */
508 	while (!cpuidle_coupled_cpus_waiting(coupled) ||
509 			!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
510 		if (cpuidle_coupled_clear_pokes(dev->cpu))
511 			continue;
512 
513 		if (need_resched()) {
514 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
515 			goto out;
516 		}
517 
518 		if (coupled->prevent) {
519 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
520 			goto out;
521 		}
522 
523 		entered_state = cpuidle_enter_state(dev, drv,
524 			dev->safe_state_index);
525 		local_irq_disable();
526 	}
527 
528 	cpuidle_coupled_clear_pokes(dev->cpu);
529 	if (need_resched()) {
530 		cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
531 		goto out;
532 	}
533 
534 	/*
535 	 * Make sure final poke status for this cpu is visible before setting
536 	 * cpu as ready.
537 	 */
538 	smp_wmb();
539 
540 	/*
541 	 * All coupled cpus are probably idle.  There is a small chance that
542 	 * one of the other cpus just became active.  Increment the ready count,
543 	 * and spin until all coupled cpus have incremented the counter. Once a
544 	 * cpu has incremented the ready counter, it cannot abort idle and must
545 	 * spin until either all cpus have incremented the ready counter, or
546 	 * another cpu leaves idle and decrements the waiting counter.
547 	 */
548 
549 	cpuidle_coupled_set_ready(coupled);
550 	while (!cpuidle_coupled_cpus_ready(coupled)) {
551 		/* Check if any other cpus bailed out of idle. */
552 		if (!cpuidle_coupled_cpus_waiting(coupled))
553 			if (!cpuidle_coupled_set_not_ready(coupled))
554 				goto retry;
555 
556 		cpu_relax();
557 	}
558 
559 	/*
560 	 * Make sure read of all cpus ready is done before reading pending pokes
561 	 */
562 	smp_rmb();
563 
564 	/*
565 	 * There is a small chance that a cpu left and reentered idle after this
566 	 * cpu saw that all cpus were waiting.  The cpu that reentered idle will
567 	 * have sent this cpu a poke, which will still be pending after the
568 	 * ready loop.  The pending interrupt may be lost by the interrupt
569 	 * controller when entering the deep idle state.  It's not possible to
570 	 * clear a pending interrupt without turning interrupts on and handling
571 	 * it, and it's too late to turn on interrupts here, so reset the
572 	 * coupled idle state of all cpus and retry.
573 	 */
574 	if (cpuidle_coupled_any_pokes_pending(coupled)) {
575 		cpuidle_coupled_set_done(dev->cpu, coupled);
576 		/* Wait for all cpus to see the pending pokes */
577 		cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
578 		goto reset;
579 	}
580 
581 	/* all cpus have acked the coupled state */
582 	next_state = cpuidle_coupled_get_state(dev, coupled);
583 
584 	entered_state = cpuidle_enter_state(dev, drv, next_state);
585 
586 	cpuidle_coupled_set_done(dev->cpu, coupled);
587 
588 out:
589 	/*
590 	 * Normal cpuidle states are expected to return with irqs enabled.
591 	 * That leads to an inefficiency where a cpu receiving an interrupt
592 	 * that brings it out of idle will process that interrupt before
593 	 * exiting the idle enter function and decrementing ready_count.  All
594 	 * other cpus will need to spin waiting for the cpu that is processing
595 	 * the interrupt.  If the driver returns with interrupts disabled,
596 	 * all other cpus will loop back into the safe idle state instead of
597 	 * spinning, saving power.
598 	 *
599 	 * Calling local_irq_enable here allows coupled states to return with
600 	 * interrupts disabled, but won't cause problems for drivers that
601 	 * exit with interrupts enabled.
602 	 */
603 	local_irq_enable();
604 
605 	/*
606 	 * Wait until all coupled cpus have exited idle.  There is no risk that
607 	 * a cpu exits and re-enters the ready state because this cpu has
608 	 * already decremented its waiting_count.
609 	 */
610 	while (!cpuidle_coupled_no_cpus_ready(coupled))
611 		cpu_relax();
612 
613 	return entered_state;
614 }
615 
616 static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
617 {
618 	cpumask_t cpus;
619 	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
620 	coupled->online_count = cpumask_weight(&cpus);
621 }
622 
623 /**
624  * cpuidle_coupled_register_device - register a coupled cpuidle device
625  * @dev: struct cpuidle_device for the current cpu
626  *
627  * Called from cpuidle_register_device to handle coupled idle init.  Finds the
628  * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
629  * exists yet.
630  */
631 int cpuidle_coupled_register_device(struct cpuidle_device *dev)
632 {
633 	int cpu;
634 	struct cpuidle_device *other_dev;
635 	struct call_single_data *csd;
636 	struct cpuidle_coupled *coupled;
637 
638 	if (cpumask_empty(&dev->coupled_cpus))
639 		return 0;
640 
641 	for_each_cpu_mask(cpu, dev->coupled_cpus) {
642 		other_dev = per_cpu(cpuidle_devices, cpu);
643 		if (other_dev && other_dev->coupled) {
644 			coupled = other_dev->coupled;
645 			goto have_coupled;
646 		}
647 	}
648 
649 	/* No existing coupled info found, create a new one */
650 	coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
651 	if (!coupled)
652 		return -ENOMEM;
653 
654 	coupled->coupled_cpus = dev->coupled_cpus;
655 
656 have_coupled:
657 	dev->coupled = coupled;
658 	if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
659 		coupled->prevent++;
660 
661 	cpuidle_coupled_update_online_cpus(coupled);
662 
663 	coupled->refcnt++;
664 
665 	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
666 	csd->func = cpuidle_coupled_handle_poke;
667 	csd->info = (void *)(unsigned long)dev->cpu;
668 
669 	return 0;
670 }
671 
672 /**
673  * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
674  * @dev: struct cpuidle_device for the current cpu
675  *
676  * Called from cpuidle_unregister_device to tear down coupled idle.  Removes the
677  * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
678  * this was the last cpu in the set.
679  */
680 void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
681 {
682 	struct cpuidle_coupled *coupled = dev->coupled;
683 
684 	if (cpumask_empty(&dev->coupled_cpus))
685 		return;
686 
687 	if (--coupled->refcnt)
688 		kfree(coupled);
689 	dev->coupled = NULL;
690 }
691 
692 /**
693  * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
694  * @coupled: the struct coupled that contains the cpu that is changing state
695  *
696  * Disables coupled cpuidle on a coupled set of cpus.  Used to ensure that
697  * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
698  */
699 static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
700 {
701 	int cpu = get_cpu();
702 
703 	/* Force all cpus out of the waiting loop. */
704 	coupled->prevent++;
705 	cpuidle_coupled_poke_others(cpu, coupled);
706 	put_cpu();
707 	while (!cpuidle_coupled_no_cpus_waiting(coupled))
708 		cpu_relax();
709 }
710 
711 /**
712  * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
713  * @coupled: the struct coupled that contains the cpu that is changing state
714  *
715  * Enables coupled cpuidle on a coupled set of cpus.  Used to ensure that
716  * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
717  */
718 static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
719 {
720 	int cpu = get_cpu();
721 
722 	/*
723 	 * Write barrier ensures readers see the new online_count when they
724 	 * see prevent == 0.
725 	 */
726 	smp_wmb();
727 	coupled->prevent--;
728 	/* Force cpus out of the prevent loop. */
729 	cpuidle_coupled_poke_others(cpu, coupled);
730 	put_cpu();
731 }
732 
733 /**
734  * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
735  * @nb: notifier block
736  * @action: hotplug transition
737  * @hcpu: target cpu number
738  *
739  * Called when a cpu is brought on or offline using hotplug.  Updates the
740  * coupled cpu set appropriately
741  */
742 static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
743 		unsigned long action, void *hcpu)
744 {
745 	int cpu = (unsigned long)hcpu;
746 	struct cpuidle_device *dev;
747 
748 	switch (action & ~CPU_TASKS_FROZEN) {
749 	case CPU_UP_PREPARE:
750 	case CPU_DOWN_PREPARE:
751 	case CPU_ONLINE:
752 	case CPU_DEAD:
753 	case CPU_UP_CANCELED:
754 	case CPU_DOWN_FAILED:
755 		break;
756 	default:
757 		return NOTIFY_OK;
758 	}
759 
760 	mutex_lock(&cpuidle_lock);
761 
762 	dev = per_cpu(cpuidle_devices, cpu);
763 	if (!dev || !dev->coupled)
764 		goto out;
765 
766 	switch (action & ~CPU_TASKS_FROZEN) {
767 	case CPU_UP_PREPARE:
768 	case CPU_DOWN_PREPARE:
769 		cpuidle_coupled_prevent_idle(dev->coupled);
770 		break;
771 	case CPU_ONLINE:
772 	case CPU_DEAD:
773 		cpuidle_coupled_update_online_cpus(dev->coupled);
774 		/* Fall through */
775 	case CPU_UP_CANCELED:
776 	case CPU_DOWN_FAILED:
777 		cpuidle_coupled_allow_idle(dev->coupled);
778 		break;
779 	}
780 
781 out:
782 	mutex_unlock(&cpuidle_lock);
783 	return NOTIFY_OK;
784 }
785 
786 static struct notifier_block cpuidle_coupled_cpu_notifier = {
787 	.notifier_call = cpuidle_coupled_cpu_notify,
788 };
789 
790 static int __init cpuidle_coupled_init(void)
791 {
792 	return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
793 }
794 core_initcall(cpuidle_coupled_init);
795