xref: /openbmc/linux/lib/sbitmap.c (revision 26edb30d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Facebook
4  * Copyright (C) 2013-2014 Jens Axboe
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
11 
12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
13 {
14 	unsigned depth = sb->depth;
15 
16 	sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
17 	if (!sb->alloc_hint)
18 		return -ENOMEM;
19 
20 	if (depth && !sb->round_robin) {
21 		int i;
22 
23 		for_each_possible_cpu(i)
24 			*per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth);
25 	}
26 	return 0;
27 }
28 
29 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
30 						    unsigned int depth)
31 {
32 	unsigned hint;
33 
34 	hint = this_cpu_read(*sb->alloc_hint);
35 	if (unlikely(hint >= depth)) {
36 		hint = depth ? prandom_u32_max(depth) : 0;
37 		this_cpu_write(*sb->alloc_hint, hint);
38 	}
39 
40 	return hint;
41 }
42 
43 static inline void update_alloc_hint_after_get(struct sbitmap *sb,
44 					       unsigned int depth,
45 					       unsigned int hint,
46 					       unsigned int nr)
47 {
48 	if (nr == -1) {
49 		/* If the map is full, a hint won't do us much good. */
50 		this_cpu_write(*sb->alloc_hint, 0);
51 	} else if (nr == hint || unlikely(sb->round_robin)) {
52 		/* Only update the hint if we used it. */
53 		hint = nr + 1;
54 		if (hint >= depth - 1)
55 			hint = 0;
56 		this_cpu_write(*sb->alloc_hint, hint);
57 	}
58 }
59 
60 /*
61  * See if we have deferred clears that we can batch move
62  */
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
64 {
65 	unsigned long mask;
66 
67 	if (!READ_ONCE(map->cleared))
68 		return false;
69 
70 	/*
71 	 * First get a stable cleared mask, setting the old mask to 0.
72 	 */
73 	mask = xchg(&map->cleared, 0);
74 
75 	/*
76 	 * Now clear the masked bits in our free word
77 	 */
78 	atomic_long_andnot(mask, (atomic_long_t *)&map->word);
79 	BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
80 	return true;
81 }
82 
83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
84 		      gfp_t flags, int node, bool round_robin,
85 		      bool alloc_hint)
86 {
87 	unsigned int bits_per_word;
88 
89 	if (shift < 0)
90 		shift = sbitmap_calculate_shift(depth);
91 
92 	bits_per_word = 1U << shift;
93 	if (bits_per_word > BITS_PER_LONG)
94 		return -EINVAL;
95 
96 	sb->shift = shift;
97 	sb->depth = depth;
98 	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
99 	sb->round_robin = round_robin;
100 
101 	if (depth == 0) {
102 		sb->map = NULL;
103 		return 0;
104 	}
105 
106 	if (alloc_hint) {
107 		if (init_alloc_hint(sb, flags))
108 			return -ENOMEM;
109 	} else {
110 		sb->alloc_hint = NULL;
111 	}
112 
113 	sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
114 	if (!sb->map) {
115 		free_percpu(sb->alloc_hint);
116 		return -ENOMEM;
117 	}
118 
119 	return 0;
120 }
121 EXPORT_SYMBOL_GPL(sbitmap_init_node);
122 
123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
124 {
125 	unsigned int bits_per_word = 1U << sb->shift;
126 	unsigned int i;
127 
128 	for (i = 0; i < sb->map_nr; i++)
129 		sbitmap_deferred_clear(&sb->map[i]);
130 
131 	sb->depth = depth;
132 	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
133 }
134 EXPORT_SYMBOL_GPL(sbitmap_resize);
135 
136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
137 			      unsigned int hint, bool wrap)
138 {
139 	int nr;
140 
141 	/* don't wrap if starting from 0 */
142 	wrap = wrap && hint;
143 
144 	while (1) {
145 		nr = find_next_zero_bit(word, depth, hint);
146 		if (unlikely(nr >= depth)) {
147 			/*
148 			 * We started with an offset, and we didn't reset the
149 			 * offset to 0 in a failure case, so start from 0 to
150 			 * exhaust the map.
151 			 */
152 			if (hint && wrap) {
153 				hint = 0;
154 				continue;
155 			}
156 			return -1;
157 		}
158 
159 		if (!test_and_set_bit_lock(nr, word))
160 			break;
161 
162 		hint = nr + 1;
163 		if (hint >= depth - 1)
164 			hint = 0;
165 	}
166 
167 	return nr;
168 }
169 
170 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
171 				     unsigned int alloc_hint)
172 {
173 	struct sbitmap_word *map = &sb->map[index];
174 	int nr;
175 
176 	do {
177 		nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
178 					alloc_hint, !sb->round_robin);
179 		if (nr != -1)
180 			break;
181 		if (!sbitmap_deferred_clear(map))
182 			break;
183 	} while (1);
184 
185 	return nr;
186 }
187 
188 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
189 {
190 	unsigned int i, index;
191 	int nr = -1;
192 
193 	index = SB_NR_TO_INDEX(sb, alloc_hint);
194 
195 	/*
196 	 * Unless we're doing round robin tag allocation, just use the
197 	 * alloc_hint to find the right word index. No point in looping
198 	 * twice in find_next_zero_bit() for that case.
199 	 */
200 	if (sb->round_robin)
201 		alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
202 	else
203 		alloc_hint = 0;
204 
205 	for (i = 0; i < sb->map_nr; i++) {
206 		nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
207 		if (nr != -1) {
208 			nr += index << sb->shift;
209 			break;
210 		}
211 
212 		/* Jump to next index. */
213 		alloc_hint = 0;
214 		if (++index >= sb->map_nr)
215 			index = 0;
216 	}
217 
218 	return nr;
219 }
220 
221 int sbitmap_get(struct sbitmap *sb)
222 {
223 	int nr;
224 	unsigned int hint, depth;
225 
226 	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
227 		return -1;
228 
229 	depth = READ_ONCE(sb->depth);
230 	hint = update_alloc_hint_before_get(sb, depth);
231 	nr = __sbitmap_get(sb, hint);
232 	update_alloc_hint_after_get(sb, depth, hint, nr);
233 
234 	return nr;
235 }
236 EXPORT_SYMBOL_GPL(sbitmap_get);
237 
238 static int __sbitmap_get_shallow(struct sbitmap *sb,
239 				 unsigned int alloc_hint,
240 				 unsigned long shallow_depth)
241 {
242 	unsigned int i, index;
243 	int nr = -1;
244 
245 	index = SB_NR_TO_INDEX(sb, alloc_hint);
246 
247 	for (i = 0; i < sb->map_nr; i++) {
248 again:
249 		nr = __sbitmap_get_word(&sb->map[index].word,
250 					min_t(unsigned int,
251 					      __map_depth(sb, index),
252 					      shallow_depth),
253 					SB_NR_TO_BIT(sb, alloc_hint), true);
254 		if (nr != -1) {
255 			nr += index << sb->shift;
256 			break;
257 		}
258 
259 		if (sbitmap_deferred_clear(&sb->map[index]))
260 			goto again;
261 
262 		/* Jump to next index. */
263 		index++;
264 		alloc_hint = index << sb->shift;
265 
266 		if (index >= sb->map_nr) {
267 			index = 0;
268 			alloc_hint = 0;
269 		}
270 	}
271 
272 	return nr;
273 }
274 
275 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
276 {
277 	int nr;
278 	unsigned int hint, depth;
279 
280 	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
281 		return -1;
282 
283 	depth = READ_ONCE(sb->depth);
284 	hint = update_alloc_hint_before_get(sb, depth);
285 	nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
286 	update_alloc_hint_after_get(sb, depth, hint, nr);
287 
288 	return nr;
289 }
290 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
291 
292 bool sbitmap_any_bit_set(const struct sbitmap *sb)
293 {
294 	unsigned int i;
295 
296 	for (i = 0; i < sb->map_nr; i++) {
297 		if (sb->map[i].word & ~sb->map[i].cleared)
298 			return true;
299 	}
300 	return false;
301 }
302 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
303 
304 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
305 {
306 	unsigned int i, weight = 0;
307 
308 	for (i = 0; i < sb->map_nr; i++) {
309 		const struct sbitmap_word *word = &sb->map[i];
310 		unsigned int word_depth = __map_depth(sb, i);
311 
312 		if (set)
313 			weight += bitmap_weight(&word->word, word_depth);
314 		else
315 			weight += bitmap_weight(&word->cleared, word_depth);
316 	}
317 	return weight;
318 }
319 
320 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
321 {
322 	return __sbitmap_weight(sb, false);
323 }
324 
325 unsigned int sbitmap_weight(const struct sbitmap *sb)
326 {
327 	return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
328 }
329 EXPORT_SYMBOL_GPL(sbitmap_weight);
330 
331 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
332 {
333 	seq_printf(m, "depth=%u\n", sb->depth);
334 	seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
335 	seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
336 	seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
337 	seq_printf(m, "map_nr=%u\n", sb->map_nr);
338 }
339 EXPORT_SYMBOL_GPL(sbitmap_show);
340 
341 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
342 {
343 	if ((offset & 0xf) == 0) {
344 		if (offset != 0)
345 			seq_putc(m, '\n');
346 		seq_printf(m, "%08x:", offset);
347 	}
348 	if ((offset & 0x1) == 0)
349 		seq_putc(m, ' ');
350 	seq_printf(m, "%02x", byte);
351 }
352 
353 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
354 {
355 	u8 byte = 0;
356 	unsigned int byte_bits = 0;
357 	unsigned int offset = 0;
358 	int i;
359 
360 	for (i = 0; i < sb->map_nr; i++) {
361 		unsigned long word = READ_ONCE(sb->map[i].word);
362 		unsigned long cleared = READ_ONCE(sb->map[i].cleared);
363 		unsigned int word_bits = __map_depth(sb, i);
364 
365 		word &= ~cleared;
366 
367 		while (word_bits > 0) {
368 			unsigned int bits = min(8 - byte_bits, word_bits);
369 
370 			byte |= (word & (BIT(bits) - 1)) << byte_bits;
371 			byte_bits += bits;
372 			if (byte_bits == 8) {
373 				emit_byte(m, offset, byte);
374 				byte = 0;
375 				byte_bits = 0;
376 				offset++;
377 			}
378 			word >>= bits;
379 			word_bits -= bits;
380 		}
381 	}
382 	if (byte_bits) {
383 		emit_byte(m, offset, byte);
384 		offset++;
385 	}
386 	if (offset)
387 		seq_putc(m, '\n');
388 }
389 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
390 
391 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
392 					unsigned int depth)
393 {
394 	unsigned int wake_batch;
395 	unsigned int shallow_depth;
396 
397 	/*
398 	 * For each batch, we wake up one queue. We need to make sure that our
399 	 * batch size is small enough that the full depth of the bitmap,
400 	 * potentially limited by a shallow depth, is enough to wake up all of
401 	 * the queues.
402 	 *
403 	 * Each full word of the bitmap has bits_per_word bits, and there might
404 	 * be a partial word. There are depth / bits_per_word full words and
405 	 * depth % bits_per_word bits left over. In bitwise arithmetic:
406 	 *
407 	 * bits_per_word = 1 << shift
408 	 * depth / bits_per_word = depth >> shift
409 	 * depth % bits_per_word = depth & ((1 << shift) - 1)
410 	 *
411 	 * Each word can be limited to sbq->min_shallow_depth bits.
412 	 */
413 	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
414 	depth = ((depth >> sbq->sb.shift) * shallow_depth +
415 		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
416 	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
417 			     SBQ_WAKE_BATCH);
418 
419 	return wake_batch;
420 }
421 
422 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
423 			    int shift, bool round_robin, gfp_t flags, int node)
424 {
425 	int ret;
426 	int i;
427 
428 	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
429 				round_robin, true);
430 	if (ret)
431 		return ret;
432 
433 	sbq->min_shallow_depth = UINT_MAX;
434 	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
435 	atomic_set(&sbq->wake_index, 0);
436 	atomic_set(&sbq->ws_active, 0);
437 	atomic_set(&sbq->completion_cnt, 0);
438 	atomic_set(&sbq->wakeup_cnt, 0);
439 
440 	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
441 	if (!sbq->ws) {
442 		sbitmap_free(&sbq->sb);
443 		return -ENOMEM;
444 	}
445 
446 	for (i = 0; i < SBQ_WAIT_QUEUES; i++)
447 		init_waitqueue_head(&sbq->ws[i].wait);
448 
449 	return 0;
450 }
451 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
452 
453 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
454 					    unsigned int depth)
455 {
456 	unsigned int wake_batch;
457 
458 	wake_batch = sbq_calc_wake_batch(sbq, depth);
459 	if (sbq->wake_batch != wake_batch)
460 		WRITE_ONCE(sbq->wake_batch, wake_batch);
461 }
462 
463 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
464 					    unsigned int users)
465 {
466 	unsigned int wake_batch;
467 	unsigned int min_batch;
468 	unsigned int depth = (sbq->sb.depth + users - 1) / users;
469 
470 	min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
471 
472 	wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
473 			min_batch, SBQ_WAKE_BATCH);
474 
475 	WRITE_ONCE(sbq->wake_batch, wake_batch);
476 }
477 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
478 
479 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
480 {
481 	sbitmap_queue_update_wake_batch(sbq, depth);
482 	sbitmap_resize(&sbq->sb, depth);
483 }
484 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
485 
486 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
487 {
488 	return sbitmap_get(&sbq->sb);
489 }
490 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
491 
492 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
493 					unsigned int *offset)
494 {
495 	struct sbitmap *sb = &sbq->sb;
496 	unsigned int hint, depth;
497 	unsigned long index, nr;
498 	int i;
499 
500 	if (unlikely(sb->round_robin))
501 		return 0;
502 
503 	depth = READ_ONCE(sb->depth);
504 	hint = update_alloc_hint_before_get(sb, depth);
505 
506 	index = SB_NR_TO_INDEX(sb, hint);
507 
508 	for (i = 0; i < sb->map_nr; i++) {
509 		struct sbitmap_word *map = &sb->map[index];
510 		unsigned long get_mask;
511 		unsigned int map_depth = __map_depth(sb, index);
512 
513 		sbitmap_deferred_clear(map);
514 		if (map->word == (1UL << (map_depth - 1)) - 1)
515 			goto next;
516 
517 		nr = find_first_zero_bit(&map->word, map_depth);
518 		if (nr + nr_tags <= map_depth) {
519 			atomic_long_t *ptr = (atomic_long_t *) &map->word;
520 			unsigned long val;
521 
522 			get_mask = ((1UL << nr_tags) - 1) << nr;
523 			val = READ_ONCE(map->word);
524 			do {
525 				if ((val & ~get_mask) != val)
526 					goto next;
527 			} while (!atomic_long_try_cmpxchg(ptr, &val,
528 							  get_mask | val));
529 			get_mask = (get_mask & ~val) >> nr;
530 			if (get_mask) {
531 				*offset = nr + (index << sb->shift);
532 				update_alloc_hint_after_get(sb, depth, hint,
533 							*offset + nr_tags - 1);
534 				return get_mask;
535 			}
536 		}
537 next:
538 		/* Jump to next index. */
539 		if (++index >= sb->map_nr)
540 			index = 0;
541 	}
542 
543 	return 0;
544 }
545 
546 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
547 			      unsigned int shallow_depth)
548 {
549 	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
550 
551 	return sbitmap_get_shallow(&sbq->sb, shallow_depth);
552 }
553 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
554 
555 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
556 				     unsigned int min_shallow_depth)
557 {
558 	sbq->min_shallow_depth = min_shallow_depth;
559 	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
560 }
561 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
562 
563 static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
564 {
565 	int i, wake_index;
566 
567 	if (!atomic_read(&sbq->ws_active))
568 		return;
569 
570 	wake_index = atomic_read(&sbq->wake_index);
571 	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
572 		struct sbq_wait_state *ws = &sbq->ws[wake_index];
573 
574 		/*
575 		 * Advance the index before checking the current queue.
576 		 * It improves fairness, by ensuring the queue doesn't
577 		 * need to be fully emptied before trying to wake up
578 		 * from the next one.
579 		 */
580 		wake_index = sbq_index_inc(wake_index);
581 
582 		/*
583 		 * It is sufficient to wake up at least one waiter to
584 		 * guarantee forward progress.
585 		 */
586 		if (waitqueue_active(&ws->wait) &&
587 		    wake_up_nr(&ws->wait, nr))
588 			break;
589 	}
590 
591 	if (wake_index != atomic_read(&sbq->wake_index))
592 		atomic_set(&sbq->wake_index, wake_index);
593 }
594 
595 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
596 {
597 	unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
598 	unsigned int wakeups;
599 
600 	if (!atomic_read(&sbq->ws_active))
601 		return;
602 
603 	atomic_add(nr, &sbq->completion_cnt);
604 	wakeups = atomic_read(&sbq->wakeup_cnt);
605 
606 	do {
607 		if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
608 			return;
609 	} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
610 				     &wakeups, wakeups + wake_batch));
611 
612 	__sbitmap_queue_wake_up(sbq, wake_batch);
613 }
614 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
615 
616 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
617 {
618 	if (likely(!sb->round_robin && tag < sb->depth))
619 		data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
620 }
621 
622 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
623 				int *tags, int nr_tags)
624 {
625 	struct sbitmap *sb = &sbq->sb;
626 	unsigned long *addr = NULL;
627 	unsigned long mask = 0;
628 	int i;
629 
630 	smp_mb__before_atomic();
631 	for (i = 0; i < nr_tags; i++) {
632 		const int tag = tags[i] - offset;
633 		unsigned long *this_addr;
634 
635 		/* since we're clearing a batch, skip the deferred map */
636 		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
637 		if (!addr) {
638 			addr = this_addr;
639 		} else if (addr != this_addr) {
640 			atomic_long_andnot(mask, (atomic_long_t *) addr);
641 			mask = 0;
642 			addr = this_addr;
643 		}
644 		mask |= (1UL << SB_NR_TO_BIT(sb, tag));
645 	}
646 
647 	if (mask)
648 		atomic_long_andnot(mask, (atomic_long_t *) addr);
649 
650 	smp_mb__after_atomic();
651 	sbitmap_queue_wake_up(sbq, nr_tags);
652 	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
653 					tags[nr_tags - 1] - offset);
654 }
655 
656 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
657 			 unsigned int cpu)
658 {
659 	/*
660 	 * Once the clear bit is set, the bit may be allocated out.
661 	 *
662 	 * Orders READ/WRITE on the associated instance(such as request
663 	 * of blk_mq) by this bit for avoiding race with re-allocation,
664 	 * and its pair is the memory barrier implied in __sbitmap_get_word.
665 	 *
666 	 * One invariant is that the clear bit has to be zero when the bit
667 	 * is in use.
668 	 */
669 	smp_mb__before_atomic();
670 	sbitmap_deferred_clear_bit(&sbq->sb, nr);
671 
672 	/*
673 	 * Pairs with the memory barrier in set_current_state() to ensure the
674 	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
675 	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
676 	 * waiter. See the comment on waitqueue_active().
677 	 */
678 	smp_mb__after_atomic();
679 	sbitmap_queue_wake_up(sbq, 1);
680 	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
681 }
682 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
683 
684 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
685 {
686 	int i, wake_index;
687 
688 	/*
689 	 * Pairs with the memory barrier in set_current_state() like in
690 	 * sbitmap_queue_wake_up().
691 	 */
692 	smp_mb();
693 	wake_index = atomic_read(&sbq->wake_index);
694 	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
695 		struct sbq_wait_state *ws = &sbq->ws[wake_index];
696 
697 		if (waitqueue_active(&ws->wait))
698 			wake_up(&ws->wait);
699 
700 		wake_index = sbq_index_inc(wake_index);
701 	}
702 }
703 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
704 
705 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
706 {
707 	bool first;
708 	int i;
709 
710 	sbitmap_show(&sbq->sb, m);
711 
712 	seq_puts(m, "alloc_hint={");
713 	first = true;
714 	for_each_possible_cpu(i) {
715 		if (!first)
716 			seq_puts(m, ", ");
717 		first = false;
718 		seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
719 	}
720 	seq_puts(m, "}\n");
721 
722 	seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
723 	seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
724 	seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
725 
726 	seq_puts(m, "ws={\n");
727 	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
728 		struct sbq_wait_state *ws = &sbq->ws[i];
729 		seq_printf(m, "\t{.wait=%s},\n",
730 			   waitqueue_active(&ws->wait) ? "active" : "inactive");
731 	}
732 	seq_puts(m, "}\n");
733 
734 	seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
735 	seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
736 }
737 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
738 
739 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
740 			    struct sbq_wait_state *ws,
741 			    struct sbq_wait *sbq_wait)
742 {
743 	if (!sbq_wait->sbq) {
744 		sbq_wait->sbq = sbq;
745 		atomic_inc(&sbq->ws_active);
746 		add_wait_queue(&ws->wait, &sbq_wait->wait);
747 	}
748 }
749 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
750 
751 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
752 {
753 	list_del_init(&sbq_wait->wait.entry);
754 	if (sbq_wait->sbq) {
755 		atomic_dec(&sbq_wait->sbq->ws_active);
756 		sbq_wait->sbq = NULL;
757 	}
758 }
759 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
760 
761 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
762 			     struct sbq_wait_state *ws,
763 			     struct sbq_wait *sbq_wait, int state)
764 {
765 	if (!sbq_wait->sbq) {
766 		atomic_inc(&sbq->ws_active);
767 		sbq_wait->sbq = sbq;
768 	}
769 	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
770 }
771 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
772 
773 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
774 			 struct sbq_wait *sbq_wait)
775 {
776 	finish_wait(&ws->wait, &sbq_wait->wait);
777 	if (sbq_wait->sbq) {
778 		atomic_dec(&sbq->ws_active);
779 		sbq_wait->sbq = NULL;
780 	}
781 }
782 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
783