1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2009-2011 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * This file is released under the GPL.
8 */
9
10 #include <linux/dm-bufio.h>
11
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/slab.h>
15 #include <linux/sched/mm.h>
16 #include <linux/jiffies.h>
17 #include <linux/vmalloc.h>
18 #include <linux/shrinker.h>
19 #include <linux/module.h>
20 #include <linux/rbtree.h>
21 #include <linux/stacktrace.h>
22 #include <linux/jump_label.h>
23
24 #include "dm.h"
25
26 #define DM_MSG_PREFIX "bufio"
27
28 /*
29 * Memory management policy:
30 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
31 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
32 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
33 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
34 * dirty buffers.
35 */
36 #define DM_BUFIO_MIN_BUFFERS 8
37
38 #define DM_BUFIO_MEMORY_PERCENT 2
39 #define DM_BUFIO_VMALLOC_PERCENT 25
40 #define DM_BUFIO_WRITEBACK_RATIO 3
41 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
42
43 /*
44 * Check buffer ages in this interval (seconds)
45 */
46 #define DM_BUFIO_WORK_TIMER_SECS 30
47
48 /*
49 * Free buffers when they are older than this (seconds)
50 */
51 #define DM_BUFIO_DEFAULT_AGE_SECS 300
52
53 /*
54 * The nr of bytes of cached data to keep around.
55 */
56 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
57
58 /*
59 * Align buffer writes to this boundary.
60 * Tests show that SSDs have the highest IOPS when using 4k writes.
61 */
62 #define DM_BUFIO_WRITE_ALIGN 4096
63
64 /*
65 * dm_buffer->list_mode
66 */
67 #define LIST_CLEAN 0
68 #define LIST_DIRTY 1
69 #define LIST_SIZE 2
70
71 #define SCAN_RESCHED_CYCLE 16
72
73 /*--------------------------------------------------------------*/
74
75 /*
76 * Rather than use an LRU list, we use a clock algorithm where entries
77 * are held in a circular list. When an entry is 'hit' a reference bit
78 * is set. The least recently used entry is approximated by running a
79 * cursor around the list selecting unreferenced entries. Referenced
80 * entries have their reference bit cleared as the cursor passes them.
81 */
82 struct lru_entry {
83 struct list_head list;
84 atomic_t referenced;
85 };
86
87 struct lru_iter {
88 struct lru *lru;
89 struct list_head list;
90 struct lru_entry *stop;
91 struct lru_entry *e;
92 };
93
94 struct lru {
95 struct list_head *cursor;
96 unsigned long count;
97
98 struct list_head iterators;
99 };
100
101 /*--------------*/
102
lru_init(struct lru * lru)103 static void lru_init(struct lru *lru)
104 {
105 lru->cursor = NULL;
106 lru->count = 0;
107 INIT_LIST_HEAD(&lru->iterators);
108 }
109
lru_destroy(struct lru * lru)110 static void lru_destroy(struct lru *lru)
111 {
112 WARN_ON_ONCE(lru->cursor);
113 WARN_ON_ONCE(!list_empty(&lru->iterators));
114 }
115
116 /*
117 * Insert a new entry into the lru.
118 */
lru_insert(struct lru * lru,struct lru_entry * le)119 static void lru_insert(struct lru *lru, struct lru_entry *le)
120 {
121 /*
122 * Don't be tempted to set to 1, makes the lru aspect
123 * perform poorly.
124 */
125 atomic_set(&le->referenced, 0);
126
127 if (lru->cursor) {
128 list_add_tail(&le->list, lru->cursor);
129 } else {
130 INIT_LIST_HEAD(&le->list);
131 lru->cursor = &le->list;
132 }
133 lru->count++;
134 }
135
136 /*--------------*/
137
138 /*
139 * Convert a list_head pointer to an lru_entry pointer.
140 */
to_le(struct list_head * l)141 static inline struct lru_entry *to_le(struct list_head *l)
142 {
143 return container_of(l, struct lru_entry, list);
144 }
145
146 /*
147 * Initialize an lru_iter and add it to the list of cursors in the lru.
148 */
lru_iter_begin(struct lru * lru,struct lru_iter * it)149 static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
150 {
151 it->lru = lru;
152 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
153 it->e = lru->cursor ? to_le(lru->cursor) : NULL;
154 list_add(&it->list, &lru->iterators);
155 }
156
157 /*
158 * Remove an lru_iter from the list of cursors in the lru.
159 */
lru_iter_end(struct lru_iter * it)160 static inline void lru_iter_end(struct lru_iter *it)
161 {
162 list_del(&it->list);
163 }
164
165 /* Predicate function type to be used with lru_iter_next */
166 typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
167
168 /*
169 * Advance the cursor to the next entry that passes the
170 * predicate, and return that entry. Returns NULL if the
171 * iteration is complete.
172 */
lru_iter_next(struct lru_iter * it,iter_predicate pred,void * context)173 static struct lru_entry *lru_iter_next(struct lru_iter *it,
174 iter_predicate pred, void *context)
175 {
176 struct lru_entry *e;
177
178 while (it->e) {
179 e = it->e;
180
181 /* advance the cursor */
182 if (it->e == it->stop)
183 it->e = NULL;
184 else
185 it->e = to_le(it->e->list.next);
186
187 if (pred(e, context))
188 return e;
189 }
190
191 return NULL;
192 }
193
194 /*
195 * Invalidate a specific lru_entry and update all cursors in
196 * the lru accordingly.
197 */
lru_iter_invalidate(struct lru * lru,struct lru_entry * e)198 static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
199 {
200 struct lru_iter *it;
201
202 list_for_each_entry(it, &lru->iterators, list) {
203 /* Move c->e forwards if necc. */
204 if (it->e == e) {
205 it->e = to_le(it->e->list.next);
206 if (it->e == e)
207 it->e = NULL;
208 }
209
210 /* Move it->stop backwards if necc. */
211 if (it->stop == e) {
212 it->stop = to_le(it->stop->list.prev);
213 if (it->stop == e)
214 it->stop = NULL;
215 }
216 }
217 }
218
219 /*--------------*/
220
221 /*
222 * Remove a specific entry from the lru.
223 */
lru_remove(struct lru * lru,struct lru_entry * le)224 static void lru_remove(struct lru *lru, struct lru_entry *le)
225 {
226 lru_iter_invalidate(lru, le);
227 if (lru->count == 1) {
228 lru->cursor = NULL;
229 } else {
230 if (lru->cursor == &le->list)
231 lru->cursor = lru->cursor->next;
232 list_del(&le->list);
233 }
234 lru->count--;
235 }
236
237 /*
238 * Mark as referenced.
239 */
lru_reference(struct lru_entry * le)240 static inline void lru_reference(struct lru_entry *le)
241 {
242 atomic_set(&le->referenced, 1);
243 }
244
245 /*--------------*/
246
247 /*
248 * Remove the least recently used entry (approx), that passes the predicate.
249 * Returns NULL on failure.
250 */
251 enum evict_result {
252 ER_EVICT,
253 ER_DONT_EVICT,
254 ER_STOP, /* stop looking for something to evict */
255 };
256
257 typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
258
259 static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
260 {
261 unsigned long tested = 0;
262 struct list_head *h = lru->cursor;
263 struct lru_entry *le;
264
265 if (!h)
266 return NULL;
267 /*
268 * In the worst case we have to loop around twice. Once to clear
269 * the reference flags, and then again to discover the predicate
270 * fails for all entries.
271 */
272 while (tested < lru->count) {
273 le = container_of(h, struct lru_entry, list);
274
275 if (atomic_read(&le->referenced)) {
276 atomic_set(&le->referenced, 0);
277 } else {
278 tested++;
279 switch (pred(le, context)) {
280 case ER_EVICT:
281 /*
282 * Adjust the cursor, so we start the next
283 * search from here.
284 */
285 lru->cursor = le->list.next;
286 lru_remove(lru, le);
287 return le;
288
289 case ER_DONT_EVICT:
290 break;
291
292 case ER_STOP:
293 lru->cursor = le->list.next;
294 return NULL;
295 }
296 }
297
298 h = h->next;
299
300 if (!no_sleep)
301 cond_resched();
302 }
303
304 return NULL;
305 }
306
307 /*--------------------------------------------------------------*/
308
309 /*
310 * Buffer state bits.
311 */
312 #define B_READING 0
313 #define B_WRITING 1
314 #define B_DIRTY 2
315
316 /*
317 * Describes how the block was allocated:
318 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
319 * See the comment at alloc_buffer_data.
320 */
321 enum data_mode {
322 DATA_MODE_SLAB = 0,
323 DATA_MODE_GET_FREE_PAGES = 1,
324 DATA_MODE_VMALLOC = 2,
325 DATA_MODE_LIMIT = 3
326 };
327
328 struct dm_buffer {
329 /* protected by the locks in dm_buffer_cache */
330 struct rb_node node;
331
332 /* immutable, so don't need protecting */
333 sector_t block;
334 void *data;
335 unsigned char data_mode; /* DATA_MODE_* */
336
337 /*
338 * These two fields are used in isolation, so do not need
339 * a surrounding lock.
340 */
341 atomic_t hold_count;
342 unsigned long last_accessed;
343
344 /*
345 * Everything else is protected by the mutex in
346 * dm_bufio_client
347 */
348 unsigned long state;
349 struct lru_entry lru;
350 unsigned char list_mode; /* LIST_* */
351 blk_status_t read_error;
352 blk_status_t write_error;
353 unsigned int dirty_start;
354 unsigned int dirty_end;
355 unsigned int write_start;
356 unsigned int write_end;
357 struct list_head write_list;
358 struct dm_bufio_client *c;
359 void (*end_io)(struct dm_buffer *b, blk_status_t bs);
360 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
361 #define MAX_STACK 10
362 unsigned int stack_len;
363 unsigned long stack_entries[MAX_STACK];
364 #endif
365 };
366
367 /*--------------------------------------------------------------*/
368
369 /*
370 * The buffer cache manages buffers, particularly:
371 * - inc/dec of holder count
372 * - setting the last_accessed field
373 * - maintains clean/dirty state along with lru
374 * - selecting buffers that match predicates
375 *
376 * It does *not* handle:
377 * - allocation/freeing of buffers.
378 * - IO
379 * - Eviction or cache sizing.
380 *
381 * cache_get() and cache_put() are threadsafe, you do not need to
382 * protect these calls with a surrounding mutex. All the other
383 * methods are not threadsafe; they do use locking primitives, but
384 * only enough to ensure get/put are threadsafe.
385 */
386
387 struct buffer_tree {
388 union {
389 struct rw_semaphore lock;
390 rwlock_t spinlock;
391 } u;
392 struct rb_root root;
393 } ____cacheline_aligned_in_smp;
394
395 struct dm_buffer_cache {
396 struct lru lru[LIST_SIZE];
397 /*
398 * We spread entries across multiple trees to reduce contention
399 * on the locks.
400 */
401 unsigned int num_locks;
402 bool no_sleep;
403 struct buffer_tree trees[];
404 };
405
406 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
407
cache_index(sector_t block,unsigned int num_locks)408 static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
409 {
410 return dm_hash_locks_index(block, num_locks);
411 }
412
cache_read_lock(struct dm_buffer_cache * bc,sector_t block)413 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
414 {
415 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
416 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
417 else
418 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
419 }
420
cache_read_unlock(struct dm_buffer_cache * bc,sector_t block)421 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
422 {
423 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
424 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
425 else
426 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
427 }
428
cache_write_lock(struct dm_buffer_cache * bc,sector_t block)429 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
430 {
431 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
432 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
433 else
434 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
435 }
436
cache_write_unlock(struct dm_buffer_cache * bc,sector_t block)437 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
438 {
439 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
440 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
441 else
442 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
443 }
444
445 /*
446 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
447 * This struct helps avoid redundant drop and gets of the same lock.
448 */
449 struct lock_history {
450 struct dm_buffer_cache *cache;
451 bool write;
452 unsigned int previous;
453 unsigned int no_previous;
454 };
455
lh_init(struct lock_history * lh,struct dm_buffer_cache * cache,bool write)456 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
457 {
458 lh->cache = cache;
459 lh->write = write;
460 lh->no_previous = cache->num_locks;
461 lh->previous = lh->no_previous;
462 }
463
__lh_lock(struct lock_history * lh,unsigned int index)464 static void __lh_lock(struct lock_history *lh, unsigned int index)
465 {
466 if (lh->write) {
467 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
468 write_lock_bh(&lh->cache->trees[index].u.spinlock);
469 else
470 down_write(&lh->cache->trees[index].u.lock);
471 } else {
472 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
473 read_lock_bh(&lh->cache->trees[index].u.spinlock);
474 else
475 down_read(&lh->cache->trees[index].u.lock);
476 }
477 }
478
__lh_unlock(struct lock_history * lh,unsigned int index)479 static void __lh_unlock(struct lock_history *lh, unsigned int index)
480 {
481 if (lh->write) {
482 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
483 write_unlock_bh(&lh->cache->trees[index].u.spinlock);
484 else
485 up_write(&lh->cache->trees[index].u.lock);
486 } else {
487 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
488 read_unlock_bh(&lh->cache->trees[index].u.spinlock);
489 else
490 up_read(&lh->cache->trees[index].u.lock);
491 }
492 }
493
494 /*
495 * Make sure you call this since it will unlock the final lock.
496 */
lh_exit(struct lock_history * lh)497 static void lh_exit(struct lock_history *lh)
498 {
499 if (lh->previous != lh->no_previous) {
500 __lh_unlock(lh, lh->previous);
501 lh->previous = lh->no_previous;
502 }
503 }
504
505 /*
506 * Named 'next' because there is no corresponding
507 * 'up/unlock' call since it's done automatically.
508 */
lh_next(struct lock_history * lh,sector_t b)509 static void lh_next(struct lock_history *lh, sector_t b)
510 {
511 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
512
513 if (lh->previous != lh->no_previous) {
514 if (lh->previous != index) {
515 __lh_unlock(lh, lh->previous);
516 __lh_lock(lh, index);
517 lh->previous = index;
518 }
519 } else {
520 __lh_lock(lh, index);
521 lh->previous = index;
522 }
523 }
524
le_to_buffer(struct lru_entry * le)525 static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
526 {
527 return container_of(le, struct dm_buffer, lru);
528 }
529
list_to_buffer(struct list_head * l)530 static struct dm_buffer *list_to_buffer(struct list_head *l)
531 {
532 struct lru_entry *le = list_entry(l, struct lru_entry, list);
533
534 if (!le)
535 return NULL;
536
537 return le_to_buffer(le);
538 }
539
cache_init(struct dm_buffer_cache * bc,unsigned int num_locks,bool no_sleep)540 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
541 {
542 unsigned int i;
543
544 bc->num_locks = num_locks;
545 bc->no_sleep = no_sleep;
546
547 for (i = 0; i < bc->num_locks; i++) {
548 if (no_sleep)
549 rwlock_init(&bc->trees[i].u.spinlock);
550 else
551 init_rwsem(&bc->trees[i].u.lock);
552 bc->trees[i].root = RB_ROOT;
553 }
554
555 lru_init(&bc->lru[LIST_CLEAN]);
556 lru_init(&bc->lru[LIST_DIRTY]);
557 }
558
cache_destroy(struct dm_buffer_cache * bc)559 static void cache_destroy(struct dm_buffer_cache *bc)
560 {
561 unsigned int i;
562
563 for (i = 0; i < bc->num_locks; i++)
564 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
565
566 lru_destroy(&bc->lru[LIST_CLEAN]);
567 lru_destroy(&bc->lru[LIST_DIRTY]);
568 }
569
570 /*--------------*/
571
572 /*
573 * not threadsafe, or racey depending how you look at it
574 */
cache_count(struct dm_buffer_cache * bc,int list_mode)575 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
576 {
577 return bc->lru[list_mode].count;
578 }
579
cache_total(struct dm_buffer_cache * bc)580 static inline unsigned long cache_total(struct dm_buffer_cache *bc)
581 {
582 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
583 }
584
585 /*--------------*/
586
587 /*
588 * Gets a specific buffer, indexed by block.
589 * If the buffer is found then its holder count will be incremented and
590 * lru_reference will be called.
591 *
592 * threadsafe
593 */
__cache_get(const struct rb_root * root,sector_t block)594 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
595 {
596 struct rb_node *n = root->rb_node;
597 struct dm_buffer *b;
598
599 while (n) {
600 b = container_of(n, struct dm_buffer, node);
601
602 if (b->block == block)
603 return b;
604
605 n = block < b->block ? n->rb_left : n->rb_right;
606 }
607
608 return NULL;
609 }
610
__cache_inc_buffer(struct dm_buffer * b)611 static void __cache_inc_buffer(struct dm_buffer *b)
612 {
613 atomic_inc(&b->hold_count);
614 WRITE_ONCE(b->last_accessed, jiffies);
615 }
616
cache_get(struct dm_buffer_cache * bc,sector_t block)617 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
618 {
619 struct dm_buffer *b;
620
621 cache_read_lock(bc, block);
622 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
623 if (b) {
624 lru_reference(&b->lru);
625 __cache_inc_buffer(b);
626 }
627 cache_read_unlock(bc, block);
628
629 return b;
630 }
631
632 /*--------------*/
633
634 /*
635 * Returns true if the hold count hits zero.
636 * threadsafe
637 */
cache_put(struct dm_buffer_cache * bc,struct dm_buffer * b)638 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
639 {
640 bool r;
641
642 cache_read_lock(bc, b->block);
643 BUG_ON(!atomic_read(&b->hold_count));
644 r = atomic_dec_and_test(&b->hold_count);
645 cache_read_unlock(bc, b->block);
646
647 return r;
648 }
649
650 /*--------------*/
651
652 typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
653
654 /*
655 * Evicts a buffer based on a predicate. The oldest buffer that
656 * matches the predicate will be selected. In addition to the
657 * predicate the hold_count of the selected buffer will be zero.
658 */
659 struct evict_wrapper {
660 struct lock_history *lh;
661 b_predicate pred;
662 void *context;
663 };
664
665 /*
666 * Wraps the buffer predicate turning it into an lru predicate. Adds
667 * extra test for hold_count.
668 */
__evict_pred(struct lru_entry * le,void * context)669 static enum evict_result __evict_pred(struct lru_entry *le, void *context)
670 {
671 struct evict_wrapper *w = context;
672 struct dm_buffer *b = le_to_buffer(le);
673
674 lh_next(w->lh, b->block);
675
676 if (atomic_read(&b->hold_count))
677 return ER_DONT_EVICT;
678
679 return w->pred(b, w->context);
680 }
681
__cache_evict(struct dm_buffer_cache * bc,int list_mode,b_predicate pred,void * context,struct lock_history * lh)682 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
683 b_predicate pred, void *context,
684 struct lock_history *lh)
685 {
686 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
687 struct lru_entry *le;
688 struct dm_buffer *b;
689
690 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
691 if (!le)
692 return NULL;
693
694 b = le_to_buffer(le);
695 /* __evict_pred will have locked the appropriate tree. */
696 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
697
698 return b;
699 }
700
cache_evict(struct dm_buffer_cache * bc,int list_mode,b_predicate pred,void * context)701 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
702 b_predicate pred, void *context)
703 {
704 struct dm_buffer *b;
705 struct lock_history lh;
706
707 lh_init(&lh, bc, true);
708 b = __cache_evict(bc, list_mode, pred, context, &lh);
709 lh_exit(&lh);
710
711 return b;
712 }
713
714 /*--------------*/
715
716 /*
717 * Mark a buffer as clean or dirty. Not threadsafe.
718 */
cache_mark(struct dm_buffer_cache * bc,struct dm_buffer * b,int list_mode)719 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
720 {
721 cache_write_lock(bc, b->block);
722 if (list_mode != b->list_mode) {
723 lru_remove(&bc->lru[b->list_mode], &b->lru);
724 b->list_mode = list_mode;
725 lru_insert(&bc->lru[b->list_mode], &b->lru);
726 }
727 cache_write_unlock(bc, b->block);
728 }
729
730 /*--------------*/
731
732 /*
733 * Runs through the lru associated with 'old_mode', if the predicate matches then
734 * it moves them to 'new_mode'. Not threadsafe.
735 */
__cache_mark_many(struct dm_buffer_cache * bc,int old_mode,int new_mode,b_predicate pred,void * context,struct lock_history * lh)736 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
737 b_predicate pred, void *context, struct lock_history *lh)
738 {
739 struct lru_entry *le;
740 struct dm_buffer *b;
741 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
742
743 while (true) {
744 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
745 if (!le)
746 break;
747
748 b = le_to_buffer(le);
749 b->list_mode = new_mode;
750 lru_insert(&bc->lru[b->list_mode], &b->lru);
751 }
752 }
753
cache_mark_many(struct dm_buffer_cache * bc,int old_mode,int new_mode,b_predicate pred,void * context)754 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
755 b_predicate pred, void *context)
756 {
757 struct lock_history lh;
758
759 lh_init(&lh, bc, true);
760 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
761 lh_exit(&lh);
762 }
763
764 /*--------------*/
765
766 /*
767 * Iterates through all clean or dirty entries calling a function for each
768 * entry. The callback may terminate the iteration early. Not threadsafe.
769 */
770
771 /*
772 * Iterator functions should return one of these actions to indicate
773 * how the iteration should proceed.
774 */
775 enum it_action {
776 IT_NEXT,
777 IT_COMPLETE,
778 };
779
780 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
781
782 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
783 iter_fn fn, void *context, struct lock_history *lh)
784 {
785 struct lru *lru = &bc->lru[list_mode];
786 struct lru_entry *le, *first;
787
788 if (!lru->cursor)
789 return;
790
791 first = le = to_le(lru->cursor);
792 do {
793 struct dm_buffer *b = le_to_buffer(le);
794
795 lh_next(lh, b->block);
796
797 switch (fn(b, context)) {
798 case IT_NEXT:
799 break;
800
801 case IT_COMPLETE:
802 return;
803 }
804 cond_resched();
805
806 le = to_le(le->list.next);
807 } while (le != first);
808 }
809
cache_iterate(struct dm_buffer_cache * bc,int list_mode,iter_fn fn,void * context)810 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
811 iter_fn fn, void *context)
812 {
813 struct lock_history lh;
814
815 lh_init(&lh, bc, false);
816 __cache_iterate(bc, list_mode, fn, context, &lh);
817 lh_exit(&lh);
818 }
819
820 /*--------------*/
821
822 /*
823 * Passes ownership of the buffer to the cache. Returns false if the
824 * buffer was already present (in which case ownership does not pass).
825 * eg, a race with another thread.
826 *
827 * Holder count should be 1 on insertion.
828 *
829 * Not threadsafe.
830 */
__cache_insert(struct rb_root * root,struct dm_buffer * b)831 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
832 {
833 struct rb_node **new = &root->rb_node, *parent = NULL;
834 struct dm_buffer *found;
835
836 while (*new) {
837 found = container_of(*new, struct dm_buffer, node);
838
839 if (found->block == b->block)
840 return false;
841
842 parent = *new;
843 new = b->block < found->block ?
844 &found->node.rb_left : &found->node.rb_right;
845 }
846
847 rb_link_node(&b->node, parent, new);
848 rb_insert_color(&b->node, root);
849
850 return true;
851 }
852
cache_insert(struct dm_buffer_cache * bc,struct dm_buffer * b)853 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
854 {
855 bool r;
856
857 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
858 return false;
859
860 cache_write_lock(bc, b->block);
861 BUG_ON(atomic_read(&b->hold_count) != 1);
862 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
863 if (r)
864 lru_insert(&bc->lru[b->list_mode], &b->lru);
865 cache_write_unlock(bc, b->block);
866
867 return r;
868 }
869
870 /*--------------*/
871
872 /*
873 * Removes buffer from cache, ownership of the buffer passes back to the caller.
874 * Fails if the hold_count is not one (ie. the caller holds the only reference).
875 *
876 * Not threadsafe.
877 */
cache_remove(struct dm_buffer_cache * bc,struct dm_buffer * b)878 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
879 {
880 bool r;
881
882 cache_write_lock(bc, b->block);
883
884 if (atomic_read(&b->hold_count) != 1) {
885 r = false;
886 } else {
887 r = true;
888 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
889 lru_remove(&bc->lru[b->list_mode], &b->lru);
890 }
891
892 cache_write_unlock(bc, b->block);
893
894 return r;
895 }
896
897 /*--------------*/
898
899 typedef void (*b_release)(struct dm_buffer *);
900
__find_next(struct rb_root * root,sector_t block)901 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
902 {
903 struct rb_node *n = root->rb_node;
904 struct dm_buffer *b;
905 struct dm_buffer *best = NULL;
906
907 while (n) {
908 b = container_of(n, struct dm_buffer, node);
909
910 if (b->block == block)
911 return b;
912
913 if (block <= b->block) {
914 n = n->rb_left;
915 best = b;
916 } else {
917 n = n->rb_right;
918 }
919 }
920
921 return best;
922 }
923
__remove_range(struct dm_buffer_cache * bc,struct rb_root * root,sector_t begin,sector_t end,b_predicate pred,b_release release)924 static void __remove_range(struct dm_buffer_cache *bc,
925 struct rb_root *root,
926 sector_t begin, sector_t end,
927 b_predicate pred, b_release release)
928 {
929 struct dm_buffer *b;
930
931 while (true) {
932 cond_resched();
933
934 b = __find_next(root, begin);
935 if (!b || (b->block >= end))
936 break;
937
938 begin = b->block + 1;
939
940 if (atomic_read(&b->hold_count))
941 continue;
942
943 if (pred(b, NULL) == ER_EVICT) {
944 rb_erase(&b->node, root);
945 lru_remove(&bc->lru[b->list_mode], &b->lru);
946 release(b);
947 }
948 }
949 }
950
cache_remove_range(struct dm_buffer_cache * bc,sector_t begin,sector_t end,b_predicate pred,b_release release)951 static void cache_remove_range(struct dm_buffer_cache *bc,
952 sector_t begin, sector_t end,
953 b_predicate pred, b_release release)
954 {
955 unsigned int i;
956
957 BUG_ON(bc->no_sleep);
958 for (i = 0; i < bc->num_locks; i++) {
959 down_write(&bc->trees[i].u.lock);
960 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
961 up_write(&bc->trees[i].u.lock);
962 }
963 }
964
965 /*----------------------------------------------------------------*/
966
967 /*
968 * Linking of buffers:
969 * All buffers are linked to buffer_cache with their node field.
970 *
971 * Clean buffers that are not being written (B_WRITING not set)
972 * are linked to lru[LIST_CLEAN] with their lru_list field.
973 *
974 * Dirty and clean buffers that are being written are linked to
975 * lru[LIST_DIRTY] with their lru_list field. When the write
976 * finishes, the buffer cannot be relinked immediately (because we
977 * are in an interrupt context and relinking requires process
978 * context), so some clean-not-writing buffers can be held on
979 * dirty_lru too. They are later added to lru in the process
980 * context.
981 */
982 struct dm_bufio_client {
983 struct block_device *bdev;
984 unsigned int block_size;
985 s8 sectors_per_block_bits;
986
987 bool no_sleep;
988 struct mutex lock;
989 spinlock_t spinlock;
990
991 int async_write_error;
992
993 void (*alloc_callback)(struct dm_buffer *buf);
994 void (*write_callback)(struct dm_buffer *buf);
995 struct kmem_cache *slab_buffer;
996 struct kmem_cache *slab_cache;
997 struct dm_io_client *dm_io;
998
999 struct list_head reserved_buffers;
1000 unsigned int need_reserved_buffers;
1001
1002 unsigned int minimum_buffers;
1003
1004 sector_t start;
1005
1006 struct shrinker shrinker;
1007 struct work_struct shrink_work;
1008 atomic_long_t need_shrink;
1009
1010 wait_queue_head_t free_buffer_wait;
1011
1012 struct list_head client_list;
1013
1014 /*
1015 * Used by global_cleanup to sort the clients list.
1016 */
1017 unsigned long oldest_buffer;
1018
1019 struct dm_buffer_cache cache; /* must be last member */
1020 };
1021
1022 /*----------------------------------------------------------------*/
1023
1024 #define dm_bufio_in_request() (!!current->bio_list)
1025
dm_bufio_lock(struct dm_bufio_client * c)1026 static void dm_bufio_lock(struct dm_bufio_client *c)
1027 {
1028 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1029 spin_lock_bh(&c->spinlock);
1030 else
1031 mutex_lock_nested(&c->lock, dm_bufio_in_request());
1032 }
1033
dm_bufio_unlock(struct dm_bufio_client * c)1034 static void dm_bufio_unlock(struct dm_bufio_client *c)
1035 {
1036 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1037 spin_unlock_bh(&c->spinlock);
1038 else
1039 mutex_unlock(&c->lock);
1040 }
1041
1042 /*----------------------------------------------------------------*/
1043
1044 /*
1045 * Default cache size: available memory divided by the ratio.
1046 */
1047 static unsigned long dm_bufio_default_cache_size;
1048
1049 /*
1050 * Total cache size set by the user.
1051 */
1052 static unsigned long dm_bufio_cache_size;
1053
1054 /*
1055 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1056 * at any time. If it disagrees, the user has changed cache size.
1057 */
1058 static unsigned long dm_bufio_cache_size_latch;
1059
1060 static DEFINE_SPINLOCK(global_spinlock);
1061
1062 /*
1063 * Buffers are freed after this timeout
1064 */
1065 static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1066 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1067
1068 static unsigned long dm_bufio_peak_allocated;
1069 static unsigned long dm_bufio_allocated_kmem_cache;
1070 static unsigned long dm_bufio_allocated_get_free_pages;
1071 static unsigned long dm_bufio_allocated_vmalloc;
1072 static unsigned long dm_bufio_current_allocated;
1073
1074 /*----------------------------------------------------------------*/
1075
1076 /*
1077 * The current number of clients.
1078 */
1079 static int dm_bufio_client_count;
1080
1081 /*
1082 * The list of all clients.
1083 */
1084 static LIST_HEAD(dm_bufio_all_clients);
1085
1086 /*
1087 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1088 */
1089 static DEFINE_MUTEX(dm_bufio_clients_lock);
1090
1091 static struct workqueue_struct *dm_bufio_wq;
1092 static struct delayed_work dm_bufio_cleanup_old_work;
1093 static struct work_struct dm_bufio_replacement_work;
1094
1095
1096 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)1097 static void buffer_record_stack(struct dm_buffer *b)
1098 {
1099 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1100 }
1101 #endif
1102
1103 /*----------------------------------------------------------------*/
1104
adjust_total_allocated(struct dm_buffer * b,bool unlink)1105 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1106 {
1107 unsigned char data_mode;
1108 long diff;
1109
1110 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1111 &dm_bufio_allocated_kmem_cache,
1112 &dm_bufio_allocated_get_free_pages,
1113 &dm_bufio_allocated_vmalloc,
1114 };
1115
1116 data_mode = b->data_mode;
1117 diff = (long)b->c->block_size;
1118 if (unlink)
1119 diff = -diff;
1120
1121 spin_lock(&global_spinlock);
1122
1123 *class_ptr[data_mode] += diff;
1124
1125 dm_bufio_current_allocated += diff;
1126
1127 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1128 dm_bufio_peak_allocated = dm_bufio_current_allocated;
1129
1130 if (!unlink) {
1131 if (dm_bufio_current_allocated > dm_bufio_cache_size)
1132 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1133 }
1134
1135 spin_unlock(&global_spinlock);
1136 }
1137
1138 /*
1139 * Change the number of clients and recalculate per-client limit.
1140 */
__cache_size_refresh(void)1141 static void __cache_size_refresh(void)
1142 {
1143 if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1144 return;
1145 if (WARN_ON(dm_bufio_client_count < 0))
1146 return;
1147
1148 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1149
1150 /*
1151 * Use default if set to 0 and report the actual cache size used.
1152 */
1153 if (!dm_bufio_cache_size_latch) {
1154 (void)cmpxchg(&dm_bufio_cache_size, 0,
1155 dm_bufio_default_cache_size);
1156 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1157 }
1158 }
1159
1160 /*
1161 * Allocating buffer data.
1162 *
1163 * Small buffers are allocated with kmem_cache, to use space optimally.
1164 *
1165 * For large buffers, we choose between get_free_pages and vmalloc.
1166 * Each has advantages and disadvantages.
1167 *
1168 * __get_free_pages can randomly fail if the memory is fragmented.
1169 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1170 * as low as 128M) so using it for caching is not appropriate.
1171 *
1172 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1173 * won't have a fatal effect here, but it just causes flushes of some other
1174 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1175 * always fails (i.e. order > MAX_ORDER).
1176 *
1177 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1178 * initial reserve allocation, so there's no risk of wasting all vmalloc
1179 * space.
1180 */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)1181 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1182 unsigned char *data_mode)
1183 {
1184 if (unlikely(c->slab_cache != NULL)) {
1185 *data_mode = DATA_MODE_SLAB;
1186 return kmem_cache_alloc(c->slab_cache, gfp_mask);
1187 }
1188
1189 if (c->block_size <= KMALLOC_MAX_SIZE &&
1190 gfp_mask & __GFP_NORETRY) {
1191 *data_mode = DATA_MODE_GET_FREE_PAGES;
1192 return (void *)__get_free_pages(gfp_mask,
1193 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1194 }
1195
1196 *data_mode = DATA_MODE_VMALLOC;
1197
1198 return __vmalloc(c->block_size, gfp_mask);
1199 }
1200
1201 /*
1202 * Free buffer's data.
1203 */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)1204 static void free_buffer_data(struct dm_bufio_client *c,
1205 void *data, unsigned char data_mode)
1206 {
1207 switch (data_mode) {
1208 case DATA_MODE_SLAB:
1209 kmem_cache_free(c->slab_cache, data);
1210 break;
1211
1212 case DATA_MODE_GET_FREE_PAGES:
1213 free_pages((unsigned long)data,
1214 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1215 break;
1216
1217 case DATA_MODE_VMALLOC:
1218 vfree(data);
1219 break;
1220
1221 default:
1222 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1223 data_mode);
1224 BUG();
1225 }
1226 }
1227
1228 /*
1229 * Allocate buffer and its data.
1230 */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)1231 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1232 {
1233 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1234
1235 if (!b)
1236 return NULL;
1237
1238 b->c = c;
1239
1240 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1241 if (!b->data) {
1242 kmem_cache_free(c->slab_buffer, b);
1243 return NULL;
1244 }
1245 adjust_total_allocated(b, false);
1246
1247 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1248 b->stack_len = 0;
1249 #endif
1250 return b;
1251 }
1252
1253 /*
1254 * Free buffer and its data.
1255 */
free_buffer(struct dm_buffer * b)1256 static void free_buffer(struct dm_buffer *b)
1257 {
1258 struct dm_bufio_client *c = b->c;
1259
1260 adjust_total_allocated(b, true);
1261 free_buffer_data(c, b->data, b->data_mode);
1262 kmem_cache_free(c->slab_buffer, b);
1263 }
1264
1265 /*
1266 *--------------------------------------------------------------------------
1267 * Submit I/O on the buffer.
1268 *
1269 * Bio interface is faster but it has some problems:
1270 * the vector list is limited (increasing this limit increases
1271 * memory-consumption per buffer, so it is not viable);
1272 *
1273 * the memory must be direct-mapped, not vmalloced;
1274 *
1275 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1276 * it is not vmalloced, try using the bio interface.
1277 *
1278 * If the buffer is big, if it is vmalloced or if the underlying device
1279 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1280 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1281 * shortcomings.
1282 *--------------------------------------------------------------------------
1283 */
1284
1285 /*
1286 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1287 * that the request was handled directly with bio interface.
1288 */
dmio_complete(unsigned long error,void * context)1289 static void dmio_complete(unsigned long error, void *context)
1290 {
1291 struct dm_buffer *b = context;
1292
1293 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1294 }
1295
use_dmio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)1296 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1297 unsigned int n_sectors, unsigned int offset)
1298 {
1299 int r;
1300 struct dm_io_request io_req = {
1301 .bi_opf = op,
1302 .notify.fn = dmio_complete,
1303 .notify.context = b,
1304 .client = b->c->dm_io,
1305 };
1306 struct dm_io_region region = {
1307 .bdev = b->c->bdev,
1308 .sector = sector,
1309 .count = n_sectors,
1310 };
1311
1312 if (b->data_mode != DATA_MODE_VMALLOC) {
1313 io_req.mem.type = DM_IO_KMEM;
1314 io_req.mem.ptr.addr = (char *)b->data + offset;
1315 } else {
1316 io_req.mem.type = DM_IO_VMA;
1317 io_req.mem.ptr.vma = (char *)b->data + offset;
1318 }
1319
1320 r = dm_io(&io_req, 1, ®ion, NULL, IOPRIO_DEFAULT);
1321 if (unlikely(r))
1322 b->end_io(b, errno_to_blk_status(r));
1323 }
1324
bio_complete(struct bio * bio)1325 static void bio_complete(struct bio *bio)
1326 {
1327 struct dm_buffer *b = bio->bi_private;
1328 blk_status_t status = bio->bi_status;
1329
1330 bio_uninit(bio);
1331 kfree(bio);
1332 b->end_io(b, status);
1333 }
1334
use_bio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)1335 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1336 unsigned int n_sectors, unsigned int offset)
1337 {
1338 struct bio *bio;
1339 char *ptr;
1340 unsigned int len;
1341
1342 bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1343 if (!bio) {
1344 use_dmio(b, op, sector, n_sectors, offset);
1345 return;
1346 }
1347 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1348 bio->bi_iter.bi_sector = sector;
1349 bio->bi_end_io = bio_complete;
1350 bio->bi_private = b;
1351
1352 ptr = (char *)b->data + offset;
1353 len = n_sectors << SECTOR_SHIFT;
1354
1355 __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1356
1357 submit_bio(bio);
1358 }
1359
block_to_sector(struct dm_bufio_client * c,sector_t block)1360 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1361 {
1362 sector_t sector;
1363
1364 if (likely(c->sectors_per_block_bits >= 0))
1365 sector = block << c->sectors_per_block_bits;
1366 else
1367 sector = block * (c->block_size >> SECTOR_SHIFT);
1368 sector += c->start;
1369
1370 return sector;
1371 }
1372
submit_io(struct dm_buffer * b,enum req_op op,void (* end_io)(struct dm_buffer *,blk_status_t))1373 static void submit_io(struct dm_buffer *b, enum req_op op,
1374 void (*end_io)(struct dm_buffer *, blk_status_t))
1375 {
1376 unsigned int n_sectors;
1377 sector_t sector;
1378 unsigned int offset, end;
1379
1380 b->end_io = end_io;
1381
1382 sector = block_to_sector(b->c, b->block);
1383
1384 if (op != REQ_OP_WRITE) {
1385 n_sectors = b->c->block_size >> SECTOR_SHIFT;
1386 offset = 0;
1387 } else {
1388 if (b->c->write_callback)
1389 b->c->write_callback(b);
1390 offset = b->write_start;
1391 end = b->write_end;
1392 offset &= -DM_BUFIO_WRITE_ALIGN;
1393 end += DM_BUFIO_WRITE_ALIGN - 1;
1394 end &= -DM_BUFIO_WRITE_ALIGN;
1395 if (unlikely(end > b->c->block_size))
1396 end = b->c->block_size;
1397
1398 sector += offset >> SECTOR_SHIFT;
1399 n_sectors = (end - offset) >> SECTOR_SHIFT;
1400 }
1401
1402 if (b->data_mode != DATA_MODE_VMALLOC)
1403 use_bio(b, op, sector, n_sectors, offset);
1404 else
1405 use_dmio(b, op, sector, n_sectors, offset);
1406 }
1407
1408 /*
1409 *--------------------------------------------------------------
1410 * Writing dirty buffers
1411 *--------------------------------------------------------------
1412 */
1413
1414 /*
1415 * The endio routine for write.
1416 *
1417 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1418 * it.
1419 */
write_endio(struct dm_buffer * b,blk_status_t status)1420 static void write_endio(struct dm_buffer *b, blk_status_t status)
1421 {
1422 b->write_error = status;
1423 if (unlikely(status)) {
1424 struct dm_bufio_client *c = b->c;
1425
1426 (void)cmpxchg(&c->async_write_error, 0,
1427 blk_status_to_errno(status));
1428 }
1429
1430 BUG_ON(!test_bit(B_WRITING, &b->state));
1431
1432 smp_mb__before_atomic();
1433 clear_bit(B_WRITING, &b->state);
1434 smp_mb__after_atomic();
1435
1436 wake_up_bit(&b->state, B_WRITING);
1437 }
1438
1439 /*
1440 * Initiate a write on a dirty buffer, but don't wait for it.
1441 *
1442 * - If the buffer is not dirty, exit.
1443 * - If there some previous write going on, wait for it to finish (we can't
1444 * have two writes on the same buffer simultaneously).
1445 * - Submit our write and don't wait on it. We set B_WRITING indicating
1446 * that there is a write in progress.
1447 */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)1448 static void __write_dirty_buffer(struct dm_buffer *b,
1449 struct list_head *write_list)
1450 {
1451 if (!test_bit(B_DIRTY, &b->state))
1452 return;
1453
1454 clear_bit(B_DIRTY, &b->state);
1455 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1456
1457 b->write_start = b->dirty_start;
1458 b->write_end = b->dirty_end;
1459
1460 if (!write_list)
1461 submit_io(b, REQ_OP_WRITE, write_endio);
1462 else
1463 list_add_tail(&b->write_list, write_list);
1464 }
1465
__flush_write_list(struct list_head * write_list)1466 static void __flush_write_list(struct list_head *write_list)
1467 {
1468 struct blk_plug plug;
1469
1470 blk_start_plug(&plug);
1471 while (!list_empty(write_list)) {
1472 struct dm_buffer *b =
1473 list_entry(write_list->next, struct dm_buffer, write_list);
1474 list_del(&b->write_list);
1475 submit_io(b, REQ_OP_WRITE, write_endio);
1476 cond_resched();
1477 }
1478 blk_finish_plug(&plug);
1479 }
1480
1481 /*
1482 * Wait until any activity on the buffer finishes. Possibly write the
1483 * buffer if it is dirty. When this function finishes, there is no I/O
1484 * running on the buffer and the buffer is not dirty.
1485 */
__make_buffer_clean(struct dm_buffer * b)1486 static void __make_buffer_clean(struct dm_buffer *b)
1487 {
1488 BUG_ON(atomic_read(&b->hold_count));
1489
1490 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1491 if (!smp_load_acquire(&b->state)) /* fast case */
1492 return;
1493
1494 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1495 __write_dirty_buffer(b, NULL);
1496 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1497 }
1498
is_clean(struct dm_buffer * b,void * context)1499 static enum evict_result is_clean(struct dm_buffer *b, void *context)
1500 {
1501 struct dm_bufio_client *c = context;
1502
1503 /* These should never happen */
1504 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1505 return ER_DONT_EVICT;
1506 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1507 return ER_DONT_EVICT;
1508 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1509 return ER_DONT_EVICT;
1510
1511 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1512 unlikely(test_bit(B_READING, &b->state)))
1513 return ER_DONT_EVICT;
1514
1515 return ER_EVICT;
1516 }
1517
is_dirty(struct dm_buffer * b,void * context)1518 static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1519 {
1520 /* These should never happen */
1521 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1522 return ER_DONT_EVICT;
1523 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1524 return ER_DONT_EVICT;
1525
1526 return ER_EVICT;
1527 }
1528
1529 /*
1530 * Find some buffer that is not held by anybody, clean it, unlink it and
1531 * return it.
1532 */
__get_unclaimed_buffer(struct dm_bufio_client * c)1533 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1534 {
1535 struct dm_buffer *b;
1536
1537 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1538 if (b) {
1539 /* this also waits for pending reads */
1540 __make_buffer_clean(b);
1541 return b;
1542 }
1543
1544 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1545 return NULL;
1546
1547 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1548 if (b) {
1549 __make_buffer_clean(b);
1550 return b;
1551 }
1552
1553 return NULL;
1554 }
1555
1556 /*
1557 * Wait until some other threads free some buffer or release hold count on
1558 * some buffer.
1559 *
1560 * This function is entered with c->lock held, drops it and regains it
1561 * before exiting.
1562 */
__wait_for_free_buffer(struct dm_bufio_client * c)1563 static void __wait_for_free_buffer(struct dm_bufio_client *c)
1564 {
1565 DECLARE_WAITQUEUE(wait, current);
1566
1567 add_wait_queue(&c->free_buffer_wait, &wait);
1568 set_current_state(TASK_UNINTERRUPTIBLE);
1569 dm_bufio_unlock(c);
1570
1571 /*
1572 * It's possible to miss a wake up event since we don't always
1573 * hold c->lock when wake_up is called. So we have a timeout here,
1574 * just in case.
1575 */
1576 io_schedule_timeout(5 * HZ);
1577
1578 remove_wait_queue(&c->free_buffer_wait, &wait);
1579
1580 dm_bufio_lock(c);
1581 }
1582
1583 enum new_flag {
1584 NF_FRESH = 0,
1585 NF_READ = 1,
1586 NF_GET = 2,
1587 NF_PREFETCH = 3
1588 };
1589
1590 /*
1591 * Allocate a new buffer. If the allocation is not possible, wait until
1592 * some other thread frees a buffer.
1593 *
1594 * May drop the lock and regain it.
1595 */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)1596 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1597 {
1598 struct dm_buffer *b;
1599 bool tried_noio_alloc = false;
1600
1601 /*
1602 * dm-bufio is resistant to allocation failures (it just keeps
1603 * one buffer reserved in cases all the allocations fail).
1604 * So set flags to not try too hard:
1605 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1606 * mutex and wait ourselves.
1607 * __GFP_NORETRY: don't retry and rather return failure
1608 * __GFP_NOMEMALLOC: don't use emergency reserves
1609 * __GFP_NOWARN: don't print a warning in case of failure
1610 *
1611 * For debugging, if we set the cache size to 1, no new buffers will
1612 * be allocated.
1613 */
1614 while (1) {
1615 if (dm_bufio_cache_size_latch != 1) {
1616 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1617 if (b)
1618 return b;
1619 }
1620
1621 if (nf == NF_PREFETCH)
1622 return NULL;
1623
1624 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1625 dm_bufio_unlock(c);
1626 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1627 dm_bufio_lock(c);
1628 if (b)
1629 return b;
1630 tried_noio_alloc = true;
1631 }
1632
1633 if (!list_empty(&c->reserved_buffers)) {
1634 b = list_to_buffer(c->reserved_buffers.next);
1635 list_del(&b->lru.list);
1636 c->need_reserved_buffers++;
1637
1638 return b;
1639 }
1640
1641 b = __get_unclaimed_buffer(c);
1642 if (b)
1643 return b;
1644
1645 __wait_for_free_buffer(c);
1646 }
1647 }
1648
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)1649 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1650 {
1651 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1652
1653 if (!b)
1654 return NULL;
1655
1656 if (c->alloc_callback)
1657 c->alloc_callback(b);
1658
1659 return b;
1660 }
1661
1662 /*
1663 * Free a buffer and wake other threads waiting for free buffers.
1664 */
__free_buffer_wake(struct dm_buffer * b)1665 static void __free_buffer_wake(struct dm_buffer *b)
1666 {
1667 struct dm_bufio_client *c = b->c;
1668
1669 b->block = -1;
1670 if (!c->need_reserved_buffers)
1671 free_buffer(b);
1672 else {
1673 list_add(&b->lru.list, &c->reserved_buffers);
1674 c->need_reserved_buffers--;
1675 }
1676
1677 /*
1678 * We hold the bufio lock here, so no one can add entries to the
1679 * wait queue anyway.
1680 */
1681 if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1682 wake_up(&c->free_buffer_wait);
1683 }
1684
cleaned(struct dm_buffer * b,void * context)1685 static enum evict_result cleaned(struct dm_buffer *b, void *context)
1686 {
1687 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1688 return ER_DONT_EVICT; /* should never happen */
1689
1690 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1691 return ER_DONT_EVICT;
1692 else
1693 return ER_EVICT;
1694 }
1695
__move_clean_buffers(struct dm_bufio_client * c)1696 static void __move_clean_buffers(struct dm_bufio_client *c)
1697 {
1698 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1699 }
1700
1701 struct write_context {
1702 int no_wait;
1703 struct list_head *write_list;
1704 };
1705
write_one(struct dm_buffer * b,void * context)1706 static enum it_action write_one(struct dm_buffer *b, void *context)
1707 {
1708 struct write_context *wc = context;
1709
1710 if (wc->no_wait && test_bit(B_WRITING, &b->state))
1711 return IT_COMPLETE;
1712
1713 __write_dirty_buffer(b, wc->write_list);
1714 return IT_NEXT;
1715 }
1716
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)1717 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1718 struct list_head *write_list)
1719 {
1720 struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1721
1722 __move_clean_buffers(c);
1723 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1724 }
1725
1726 /*
1727 * Check if we're over watermark.
1728 * If we are over threshold_buffers, start freeing buffers.
1729 * If we're over "limit_buffers", block until we get under the limit.
1730 */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)1731 static void __check_watermark(struct dm_bufio_client *c,
1732 struct list_head *write_list)
1733 {
1734 if (cache_count(&c->cache, LIST_DIRTY) >
1735 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1736 __write_dirty_buffers_async(c, 1, write_list);
1737 }
1738
1739 /*
1740 *--------------------------------------------------------------
1741 * Getting a buffer
1742 *--------------------------------------------------------------
1743 */
1744
cache_put_and_wake(struct dm_bufio_client * c,struct dm_buffer * b)1745 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1746 {
1747 /*
1748 * Relying on waitqueue_active() is racey, but we sleep
1749 * with schedule_timeout anyway.
1750 */
1751 if (cache_put(&c->cache, b) &&
1752 unlikely(waitqueue_active(&c->free_buffer_wait)))
1753 wake_up(&c->free_buffer_wait);
1754 }
1755
1756 /*
1757 * This assumes you have already checked the cache to see if the buffer
1758 * is already present (it will recheck after dropping the lock for allocation).
1759 */
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)1760 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1761 enum new_flag nf, int *need_submit,
1762 struct list_head *write_list)
1763 {
1764 struct dm_buffer *b, *new_b = NULL;
1765
1766 *need_submit = 0;
1767
1768 /* This can't be called with NF_GET */
1769 if (WARN_ON_ONCE(nf == NF_GET))
1770 return NULL;
1771
1772 new_b = __alloc_buffer_wait(c, nf);
1773 if (!new_b)
1774 return NULL;
1775
1776 /*
1777 * We've had a period where the mutex was unlocked, so need to
1778 * recheck the buffer tree.
1779 */
1780 b = cache_get(&c->cache, block);
1781 if (b) {
1782 __free_buffer_wake(new_b);
1783 goto found_buffer;
1784 }
1785
1786 __check_watermark(c, write_list);
1787
1788 b = new_b;
1789 atomic_set(&b->hold_count, 1);
1790 WRITE_ONCE(b->last_accessed, jiffies);
1791 b->block = block;
1792 b->read_error = 0;
1793 b->write_error = 0;
1794 b->list_mode = LIST_CLEAN;
1795
1796 if (nf == NF_FRESH)
1797 b->state = 0;
1798 else {
1799 b->state = 1 << B_READING;
1800 *need_submit = 1;
1801 }
1802
1803 /*
1804 * We mustn't insert into the cache until the B_READING state
1805 * is set. Otherwise another thread could get it and use
1806 * it before it had been read.
1807 */
1808 cache_insert(&c->cache, b);
1809
1810 return b;
1811
1812 found_buffer:
1813 if (nf == NF_PREFETCH) {
1814 cache_put_and_wake(c, b);
1815 return NULL;
1816 }
1817
1818 /*
1819 * Note: it is essential that we don't wait for the buffer to be
1820 * read if dm_bufio_get function is used. Both dm_bufio_get and
1821 * dm_bufio_prefetch can be used in the driver request routine.
1822 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1823 * the same buffer, it would deadlock if we waited.
1824 */
1825 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1826 cache_put_and_wake(c, b);
1827 return NULL;
1828 }
1829
1830 return b;
1831 }
1832
1833 /*
1834 * The endio routine for reading: set the error, clear the bit and wake up
1835 * anyone waiting on the buffer.
1836 */
read_endio(struct dm_buffer * b,blk_status_t status)1837 static void read_endio(struct dm_buffer *b, blk_status_t status)
1838 {
1839 b->read_error = status;
1840
1841 BUG_ON(!test_bit(B_READING, &b->state));
1842
1843 smp_mb__before_atomic();
1844 clear_bit(B_READING, &b->state);
1845 smp_mb__after_atomic();
1846
1847 wake_up_bit(&b->state, B_READING);
1848 }
1849
1850 /*
1851 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1852 * functions is similar except that dm_bufio_new doesn't read the
1853 * buffer from the disk (assuming that the caller overwrites all the data
1854 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1855 */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)1856 static void *new_read(struct dm_bufio_client *c, sector_t block,
1857 enum new_flag nf, struct dm_buffer **bp)
1858 {
1859 int need_submit = 0;
1860 struct dm_buffer *b;
1861
1862 LIST_HEAD(write_list);
1863
1864 *bp = NULL;
1865
1866 /*
1867 * Fast path, hopefully the block is already in the cache. No need
1868 * to get the client lock for this.
1869 */
1870 b = cache_get(&c->cache, block);
1871 if (b) {
1872 if (nf == NF_PREFETCH) {
1873 cache_put_and_wake(c, b);
1874 return NULL;
1875 }
1876
1877 /*
1878 * Note: it is essential that we don't wait for the buffer to be
1879 * read if dm_bufio_get function is used. Both dm_bufio_get and
1880 * dm_bufio_prefetch can be used in the driver request routine.
1881 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1882 * the same buffer, it would deadlock if we waited.
1883 */
1884 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1885 cache_put_and_wake(c, b);
1886 return NULL;
1887 }
1888 }
1889
1890 if (!b) {
1891 if (nf == NF_GET)
1892 return NULL;
1893
1894 dm_bufio_lock(c);
1895 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1896 dm_bufio_unlock(c);
1897 }
1898
1899 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1900 if (b && (atomic_read(&b->hold_count) == 1))
1901 buffer_record_stack(b);
1902 #endif
1903
1904 __flush_write_list(&write_list);
1905
1906 if (!b)
1907 return NULL;
1908
1909 if (need_submit)
1910 submit_io(b, REQ_OP_READ, read_endio);
1911
1912 if (nf != NF_GET) /* we already tested this condition above */
1913 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1914
1915 if (b->read_error) {
1916 int error = blk_status_to_errno(b->read_error);
1917
1918 dm_bufio_release(b);
1919
1920 return ERR_PTR(error);
1921 }
1922
1923 *bp = b;
1924
1925 return b->data;
1926 }
1927
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1928 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1929 struct dm_buffer **bp)
1930 {
1931 return new_read(c, block, NF_GET, bp);
1932 }
1933 EXPORT_SYMBOL_GPL(dm_bufio_get);
1934
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1935 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1936 struct dm_buffer **bp)
1937 {
1938 if (WARN_ON_ONCE(dm_bufio_in_request()))
1939 return ERR_PTR(-EINVAL);
1940
1941 return new_read(c, block, NF_READ, bp);
1942 }
1943 EXPORT_SYMBOL_GPL(dm_bufio_read);
1944
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1945 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1946 struct dm_buffer **bp)
1947 {
1948 if (WARN_ON_ONCE(dm_bufio_in_request()))
1949 return ERR_PTR(-EINVAL);
1950
1951 return new_read(c, block, NF_FRESH, bp);
1952 }
1953 EXPORT_SYMBOL_GPL(dm_bufio_new);
1954
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks)1955 void dm_bufio_prefetch(struct dm_bufio_client *c,
1956 sector_t block, unsigned int n_blocks)
1957 {
1958 struct blk_plug plug;
1959
1960 LIST_HEAD(write_list);
1961
1962 if (WARN_ON_ONCE(dm_bufio_in_request()))
1963 return; /* should never happen */
1964
1965 blk_start_plug(&plug);
1966
1967 for (; n_blocks--; block++) {
1968 int need_submit;
1969 struct dm_buffer *b;
1970
1971 b = cache_get(&c->cache, block);
1972 if (b) {
1973 /* already in cache */
1974 cache_put_and_wake(c, b);
1975 continue;
1976 }
1977
1978 dm_bufio_lock(c);
1979 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1980 &write_list);
1981 if (unlikely(!list_empty(&write_list))) {
1982 dm_bufio_unlock(c);
1983 blk_finish_plug(&plug);
1984 __flush_write_list(&write_list);
1985 blk_start_plug(&plug);
1986 dm_bufio_lock(c);
1987 }
1988 if (unlikely(b != NULL)) {
1989 dm_bufio_unlock(c);
1990
1991 if (need_submit)
1992 submit_io(b, REQ_OP_READ, read_endio);
1993 dm_bufio_release(b);
1994
1995 cond_resched();
1996
1997 if (!n_blocks)
1998 goto flush_plug;
1999 dm_bufio_lock(c);
2000 }
2001 dm_bufio_unlock(c);
2002 }
2003
2004 flush_plug:
2005 blk_finish_plug(&plug);
2006 }
2007 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2008
dm_bufio_release(struct dm_buffer * b)2009 void dm_bufio_release(struct dm_buffer *b)
2010 {
2011 struct dm_bufio_client *c = b->c;
2012
2013 /*
2014 * If there were errors on the buffer, and the buffer is not
2015 * to be written, free the buffer. There is no point in caching
2016 * invalid buffer.
2017 */
2018 if ((b->read_error || b->write_error) &&
2019 !test_bit_acquire(B_READING, &b->state) &&
2020 !test_bit(B_WRITING, &b->state) &&
2021 !test_bit(B_DIRTY, &b->state)) {
2022 dm_bufio_lock(c);
2023
2024 /* cache remove can fail if there are other holders */
2025 if (cache_remove(&c->cache, b)) {
2026 __free_buffer_wake(b);
2027 dm_bufio_unlock(c);
2028 return;
2029 }
2030
2031 dm_bufio_unlock(c);
2032 }
2033
2034 cache_put_and_wake(c, b);
2035 }
2036 EXPORT_SYMBOL_GPL(dm_bufio_release);
2037
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned int start,unsigned int end)2038 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2039 unsigned int start, unsigned int end)
2040 {
2041 struct dm_bufio_client *c = b->c;
2042
2043 BUG_ON(start >= end);
2044 BUG_ON(end > b->c->block_size);
2045
2046 dm_bufio_lock(c);
2047
2048 BUG_ON(test_bit(B_READING, &b->state));
2049
2050 if (!test_and_set_bit(B_DIRTY, &b->state)) {
2051 b->dirty_start = start;
2052 b->dirty_end = end;
2053 cache_mark(&c->cache, b, LIST_DIRTY);
2054 } else {
2055 if (start < b->dirty_start)
2056 b->dirty_start = start;
2057 if (end > b->dirty_end)
2058 b->dirty_end = end;
2059 }
2060
2061 dm_bufio_unlock(c);
2062 }
2063 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2064
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)2065 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2066 {
2067 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2068 }
2069 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2070
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)2071 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2072 {
2073 LIST_HEAD(write_list);
2074
2075 if (WARN_ON_ONCE(dm_bufio_in_request()))
2076 return; /* should never happen */
2077
2078 dm_bufio_lock(c);
2079 __write_dirty_buffers_async(c, 0, &write_list);
2080 dm_bufio_unlock(c);
2081 __flush_write_list(&write_list);
2082 }
2083 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2084
2085 /*
2086 * For performance, it is essential that the buffers are written asynchronously
2087 * and simultaneously (so that the block layer can merge the writes) and then
2088 * waited upon.
2089 *
2090 * Finally, we flush hardware disk cache.
2091 */
is_writing(struct lru_entry * e,void * context)2092 static bool is_writing(struct lru_entry *e, void *context)
2093 {
2094 struct dm_buffer *b = le_to_buffer(e);
2095
2096 return test_bit(B_WRITING, &b->state);
2097 }
2098
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)2099 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2100 {
2101 int a, f;
2102 unsigned long nr_buffers;
2103 struct lru_entry *e;
2104 struct lru_iter it;
2105
2106 LIST_HEAD(write_list);
2107
2108 dm_bufio_lock(c);
2109 __write_dirty_buffers_async(c, 0, &write_list);
2110 dm_bufio_unlock(c);
2111 __flush_write_list(&write_list);
2112 dm_bufio_lock(c);
2113
2114 nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2115 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2116 while ((e = lru_iter_next(&it, is_writing, c))) {
2117 struct dm_buffer *b = le_to_buffer(e);
2118 __cache_inc_buffer(b);
2119
2120 BUG_ON(test_bit(B_READING, &b->state));
2121
2122 if (nr_buffers) {
2123 nr_buffers--;
2124 dm_bufio_unlock(c);
2125 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2126 dm_bufio_lock(c);
2127 } else {
2128 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2129 }
2130
2131 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2132 cache_mark(&c->cache, b, LIST_CLEAN);
2133
2134 cache_put_and_wake(c, b);
2135
2136 cond_resched();
2137 }
2138 lru_iter_end(&it);
2139
2140 wake_up(&c->free_buffer_wait);
2141 dm_bufio_unlock(c);
2142
2143 a = xchg(&c->async_write_error, 0);
2144 f = dm_bufio_issue_flush(c);
2145 if (a)
2146 return a;
2147
2148 return f;
2149 }
2150 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2151
2152 /*
2153 * Use dm-io to send an empty barrier to flush the device.
2154 */
dm_bufio_issue_flush(struct dm_bufio_client * c)2155 int dm_bufio_issue_flush(struct dm_bufio_client *c)
2156 {
2157 struct dm_io_request io_req = {
2158 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2159 .mem.type = DM_IO_KMEM,
2160 .mem.ptr.addr = NULL,
2161 .client = c->dm_io,
2162 };
2163 struct dm_io_region io_reg = {
2164 .bdev = c->bdev,
2165 .sector = 0,
2166 .count = 0,
2167 };
2168
2169 if (WARN_ON_ONCE(dm_bufio_in_request()))
2170 return -EINVAL;
2171
2172 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2173 }
2174 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2175
2176 /*
2177 * Use dm-io to send a discard request to flush the device.
2178 */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)2179 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2180 {
2181 struct dm_io_request io_req = {
2182 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2183 .mem.type = DM_IO_KMEM,
2184 .mem.ptr.addr = NULL,
2185 .client = c->dm_io,
2186 };
2187 struct dm_io_region io_reg = {
2188 .bdev = c->bdev,
2189 .sector = block_to_sector(c, block),
2190 .count = block_to_sector(c, count),
2191 };
2192
2193 if (WARN_ON_ONCE(dm_bufio_in_request()))
2194 return -EINVAL; /* discards are optional */
2195
2196 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2197 }
2198 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2199
forget_buffer(struct dm_bufio_client * c,sector_t block)2200 static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2201 {
2202 struct dm_buffer *b;
2203
2204 b = cache_get(&c->cache, block);
2205 if (b) {
2206 if (likely(!smp_load_acquire(&b->state))) {
2207 if (cache_remove(&c->cache, b))
2208 __free_buffer_wake(b);
2209 else
2210 cache_put_and_wake(c, b);
2211 } else {
2212 cache_put_and_wake(c, b);
2213 }
2214 }
2215
2216 return b ? true : false;
2217 }
2218
2219 /*
2220 * Free the given buffer.
2221 *
2222 * This is just a hint, if the buffer is in use or dirty, this function
2223 * does nothing.
2224 */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)2225 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2226 {
2227 dm_bufio_lock(c);
2228 forget_buffer(c, block);
2229 dm_bufio_unlock(c);
2230 }
2231 EXPORT_SYMBOL_GPL(dm_bufio_forget);
2232
idle(struct dm_buffer * b,void * context)2233 static enum evict_result idle(struct dm_buffer *b, void *context)
2234 {
2235 return b->state ? ER_DONT_EVICT : ER_EVICT;
2236 }
2237
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)2238 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2239 {
2240 dm_bufio_lock(c);
2241 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2242 dm_bufio_unlock(c);
2243 }
2244 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2245
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned int n)2246 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2247 {
2248 c->minimum_buffers = n;
2249 }
2250 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2251
dm_bufio_get_block_size(struct dm_bufio_client * c)2252 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2253 {
2254 return c->block_size;
2255 }
2256 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2257
dm_bufio_get_device_size(struct dm_bufio_client * c)2258 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2259 {
2260 sector_t s = bdev_nr_sectors(c->bdev);
2261
2262 if (s >= c->start)
2263 s -= c->start;
2264 else
2265 s = 0;
2266 if (likely(c->sectors_per_block_bits >= 0))
2267 s >>= c->sectors_per_block_bits;
2268 else
2269 sector_div(s, c->block_size >> SECTOR_SHIFT);
2270 return s;
2271 }
2272 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2273
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)2274 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2275 {
2276 return c->dm_io;
2277 }
2278 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2279
dm_bufio_get_block_number(struct dm_buffer * b)2280 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2281 {
2282 return b->block;
2283 }
2284 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2285
dm_bufio_get_block_data(struct dm_buffer * b)2286 void *dm_bufio_get_block_data(struct dm_buffer *b)
2287 {
2288 return b->data;
2289 }
2290 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2291
dm_bufio_get_aux_data(struct dm_buffer * b)2292 void *dm_bufio_get_aux_data(struct dm_buffer *b)
2293 {
2294 return b + 1;
2295 }
2296 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2297
dm_bufio_get_client(struct dm_buffer * b)2298 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2299 {
2300 return b->c;
2301 }
2302 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2303
warn_leak(struct dm_buffer * b,void * context)2304 static enum it_action warn_leak(struct dm_buffer *b, void *context)
2305 {
2306 bool *warned = context;
2307
2308 WARN_ON(!(*warned));
2309 *warned = true;
2310 DMERR("leaked buffer %llx, hold count %u, list %d",
2311 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2312 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2313 stack_trace_print(b->stack_entries, b->stack_len, 1);
2314 /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2315 atomic_set(&b->hold_count, 0);
2316 #endif
2317 return IT_NEXT;
2318 }
2319
drop_buffers(struct dm_bufio_client * c)2320 static void drop_buffers(struct dm_bufio_client *c)
2321 {
2322 int i;
2323 struct dm_buffer *b;
2324
2325 if (WARN_ON(dm_bufio_in_request()))
2326 return; /* should never happen */
2327
2328 /*
2329 * An optimization so that the buffers are not written one-by-one.
2330 */
2331 dm_bufio_write_dirty_buffers_async(c);
2332
2333 dm_bufio_lock(c);
2334
2335 while ((b = __get_unclaimed_buffer(c)))
2336 __free_buffer_wake(b);
2337
2338 for (i = 0; i < LIST_SIZE; i++) {
2339 bool warned = false;
2340
2341 cache_iterate(&c->cache, i, warn_leak, &warned);
2342 }
2343
2344 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2345 while ((b = __get_unclaimed_buffer(c)))
2346 __free_buffer_wake(b);
2347 #endif
2348
2349 for (i = 0; i < LIST_SIZE; i++)
2350 WARN_ON(cache_count(&c->cache, i));
2351
2352 dm_bufio_unlock(c);
2353 }
2354
get_retain_buffers(struct dm_bufio_client * c)2355 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2356 {
2357 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2358
2359 if (likely(c->sectors_per_block_bits >= 0))
2360 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2361 else
2362 retain_bytes /= c->block_size;
2363
2364 return retain_bytes;
2365 }
2366
__scan(struct dm_bufio_client * c)2367 static void __scan(struct dm_bufio_client *c)
2368 {
2369 int l;
2370 struct dm_buffer *b;
2371 unsigned long freed = 0;
2372 unsigned long retain_target = get_retain_buffers(c);
2373 unsigned long count = cache_total(&c->cache);
2374
2375 for (l = 0; l < LIST_SIZE; l++) {
2376 while (true) {
2377 if (count - freed <= retain_target)
2378 atomic_long_set(&c->need_shrink, 0);
2379 if (!atomic_long_read(&c->need_shrink))
2380 break;
2381
2382 b = cache_evict(&c->cache, l,
2383 l == LIST_CLEAN ? is_clean : is_dirty, c);
2384 if (!b)
2385 break;
2386
2387 __make_buffer_clean(b);
2388 __free_buffer_wake(b);
2389
2390 atomic_long_dec(&c->need_shrink);
2391 freed++;
2392
2393 if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
2394 dm_bufio_unlock(c);
2395 cond_resched();
2396 dm_bufio_lock(c);
2397 }
2398 }
2399 }
2400 }
2401
shrink_work(struct work_struct * w)2402 static void shrink_work(struct work_struct *w)
2403 {
2404 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2405
2406 dm_bufio_lock(c);
2407 __scan(c);
2408 dm_bufio_unlock(c);
2409 }
2410
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)2411 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2412 {
2413 struct dm_bufio_client *c;
2414
2415 c = container_of(shrink, struct dm_bufio_client, shrinker);
2416 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2417 queue_work(dm_bufio_wq, &c->shrink_work);
2418
2419 return sc->nr_to_scan;
2420 }
2421
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)2422 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2423 {
2424 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
2425 unsigned long count = cache_total(&c->cache);
2426 unsigned long retain_target = get_retain_buffers(c);
2427 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2428
2429 if (unlikely(count < retain_target))
2430 count = 0;
2431 else
2432 count -= retain_target;
2433
2434 if (unlikely(count < queued_for_cleanup))
2435 count = 0;
2436 else
2437 count -= queued_for_cleanup;
2438
2439 return count;
2440 }
2441
2442 /*
2443 * Create the buffering interface
2444 */
dm_bufio_client_create(struct block_device * bdev,unsigned int block_size,unsigned int reserved_buffers,unsigned int aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *),unsigned int flags)2445 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2446 unsigned int reserved_buffers, unsigned int aux_size,
2447 void (*alloc_callback)(struct dm_buffer *),
2448 void (*write_callback)(struct dm_buffer *),
2449 unsigned int flags)
2450 {
2451 int r;
2452 unsigned int num_locks;
2453 struct dm_bufio_client *c;
2454 char slab_name[64];
2455 static atomic_t seqno = ATOMIC_INIT(0);
2456
2457 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2458 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2459 r = -EINVAL;
2460 goto bad_client;
2461 }
2462
2463 num_locks = dm_num_hash_locks();
2464 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2465 if (!c) {
2466 r = -ENOMEM;
2467 goto bad_client;
2468 }
2469 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2470
2471 c->bdev = bdev;
2472 c->block_size = block_size;
2473 if (is_power_of_2(block_size))
2474 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2475 else
2476 c->sectors_per_block_bits = -1;
2477
2478 c->alloc_callback = alloc_callback;
2479 c->write_callback = write_callback;
2480
2481 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2482 c->no_sleep = true;
2483 static_branch_inc(&no_sleep_enabled);
2484 }
2485
2486 mutex_init(&c->lock);
2487 spin_lock_init(&c->spinlock);
2488 INIT_LIST_HEAD(&c->reserved_buffers);
2489 c->need_reserved_buffers = reserved_buffers;
2490
2491 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2492
2493 init_waitqueue_head(&c->free_buffer_wait);
2494 c->async_write_error = 0;
2495
2496 c->dm_io = dm_io_client_create();
2497 if (IS_ERR(c->dm_io)) {
2498 r = PTR_ERR(c->dm_io);
2499 goto bad_dm_io;
2500 }
2501
2502 if (block_size <= KMALLOC_MAX_SIZE &&
2503 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
2504 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2505
2506 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
2507 block_size, atomic_inc_return(&seqno));
2508 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2509 SLAB_RECLAIM_ACCOUNT, NULL);
2510 if (!c->slab_cache) {
2511 r = -ENOMEM;
2512 goto bad;
2513 }
2514 }
2515 if (aux_size)
2516 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
2517 aux_size, atomic_inc_return(&seqno));
2518 else
2519 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
2520 atomic_inc_return(&seqno));
2521 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2522 0, SLAB_RECLAIM_ACCOUNT, NULL);
2523 if (!c->slab_buffer) {
2524 r = -ENOMEM;
2525 goto bad;
2526 }
2527
2528 while (c->need_reserved_buffers) {
2529 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2530
2531 if (!b) {
2532 r = -ENOMEM;
2533 goto bad;
2534 }
2535 __free_buffer_wake(b);
2536 }
2537
2538 INIT_WORK(&c->shrink_work, shrink_work);
2539 atomic_long_set(&c->need_shrink, 0);
2540
2541 c->shrinker.count_objects = dm_bufio_shrink_count;
2542 c->shrinker.scan_objects = dm_bufio_shrink_scan;
2543 c->shrinker.seeks = 1;
2544 c->shrinker.batch = 0;
2545 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
2546 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2547 if (r)
2548 goto bad;
2549
2550 mutex_lock(&dm_bufio_clients_lock);
2551 dm_bufio_client_count++;
2552 list_add(&c->client_list, &dm_bufio_all_clients);
2553 __cache_size_refresh();
2554 mutex_unlock(&dm_bufio_clients_lock);
2555
2556 return c;
2557
2558 bad:
2559 while (!list_empty(&c->reserved_buffers)) {
2560 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2561
2562 list_del(&b->lru.list);
2563 free_buffer(b);
2564 }
2565 kmem_cache_destroy(c->slab_cache);
2566 kmem_cache_destroy(c->slab_buffer);
2567 dm_io_client_destroy(c->dm_io);
2568 bad_dm_io:
2569 mutex_destroy(&c->lock);
2570 if (c->no_sleep)
2571 static_branch_dec(&no_sleep_enabled);
2572 kfree(c);
2573 bad_client:
2574 return ERR_PTR(r);
2575 }
2576 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2577
2578 /*
2579 * Free the buffering interface.
2580 * It is required that there are no references on any buffers.
2581 */
dm_bufio_client_destroy(struct dm_bufio_client * c)2582 void dm_bufio_client_destroy(struct dm_bufio_client *c)
2583 {
2584 unsigned int i;
2585
2586 drop_buffers(c);
2587
2588 unregister_shrinker(&c->shrinker);
2589 flush_work(&c->shrink_work);
2590
2591 mutex_lock(&dm_bufio_clients_lock);
2592
2593 list_del(&c->client_list);
2594 dm_bufio_client_count--;
2595 __cache_size_refresh();
2596
2597 mutex_unlock(&dm_bufio_clients_lock);
2598
2599 WARN_ON(c->need_reserved_buffers);
2600
2601 while (!list_empty(&c->reserved_buffers)) {
2602 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2603
2604 list_del(&b->lru.list);
2605 free_buffer(b);
2606 }
2607
2608 for (i = 0; i < LIST_SIZE; i++)
2609 if (cache_count(&c->cache, i))
2610 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2611
2612 for (i = 0; i < LIST_SIZE; i++)
2613 WARN_ON(cache_count(&c->cache, i));
2614
2615 cache_destroy(&c->cache);
2616 kmem_cache_destroy(c->slab_cache);
2617 kmem_cache_destroy(c->slab_buffer);
2618 dm_io_client_destroy(c->dm_io);
2619 mutex_destroy(&c->lock);
2620 if (c->no_sleep)
2621 static_branch_dec(&no_sleep_enabled);
2622 kfree(c);
2623 }
2624 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2625
dm_bufio_client_reset(struct dm_bufio_client * c)2626 void dm_bufio_client_reset(struct dm_bufio_client *c)
2627 {
2628 drop_buffers(c);
2629 flush_work(&c->shrink_work);
2630 }
2631 EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2632
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)2633 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2634 {
2635 c->start = start;
2636 }
2637 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2638
2639 /*--------------------------------------------------------------*/
2640
get_max_age_hz(void)2641 static unsigned int get_max_age_hz(void)
2642 {
2643 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2644
2645 if (max_age > UINT_MAX / HZ)
2646 max_age = UINT_MAX / HZ;
2647
2648 return max_age * HZ;
2649 }
2650
older_than(struct dm_buffer * b,unsigned long age_hz)2651 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2652 {
2653 return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2654 }
2655
2656 struct evict_params {
2657 gfp_t gfp;
2658 unsigned long age_hz;
2659
2660 /*
2661 * This gets updated with the largest last_accessed (ie. most
2662 * recently used) of the evicted buffers. It will not be reinitialised
2663 * by __evict_many(), so you can use it across multiple invocations.
2664 */
2665 unsigned long last_accessed;
2666 };
2667
2668 /*
2669 * We may not be able to evict this buffer if IO pending or the client
2670 * is still using it.
2671 *
2672 * And if GFP_NOFS is used, we must not do any I/O because we hold
2673 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2674 * rerouted to different bufio client.
2675 */
select_for_evict(struct dm_buffer * b,void * context)2676 static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2677 {
2678 struct evict_params *params = context;
2679
2680 if (!(params->gfp & __GFP_FS) ||
2681 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2682 if (test_bit_acquire(B_READING, &b->state) ||
2683 test_bit(B_WRITING, &b->state) ||
2684 test_bit(B_DIRTY, &b->state))
2685 return ER_DONT_EVICT;
2686 }
2687
2688 return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2689 }
2690
__evict_many(struct dm_bufio_client * c,struct evict_params * params,int list_mode,unsigned long max_count)2691 static unsigned long __evict_many(struct dm_bufio_client *c,
2692 struct evict_params *params,
2693 int list_mode, unsigned long max_count)
2694 {
2695 unsigned long count;
2696 unsigned long last_accessed;
2697 struct dm_buffer *b;
2698
2699 for (count = 0; count < max_count; count++) {
2700 b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2701 if (!b)
2702 break;
2703
2704 last_accessed = READ_ONCE(b->last_accessed);
2705 if (time_after_eq(params->last_accessed, last_accessed))
2706 params->last_accessed = last_accessed;
2707
2708 __make_buffer_clean(b);
2709 __free_buffer_wake(b);
2710
2711 cond_resched();
2712 }
2713
2714 return count;
2715 }
2716
evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)2717 static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2718 {
2719 struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2720 unsigned long retain = get_retain_buffers(c);
2721 unsigned long count;
2722 LIST_HEAD(write_list);
2723
2724 dm_bufio_lock(c);
2725
2726 __check_watermark(c, &write_list);
2727 if (unlikely(!list_empty(&write_list))) {
2728 dm_bufio_unlock(c);
2729 __flush_write_list(&write_list);
2730 dm_bufio_lock(c);
2731 }
2732
2733 count = cache_total(&c->cache);
2734 if (count > retain)
2735 __evict_many(c, ¶ms, LIST_CLEAN, count - retain);
2736
2737 dm_bufio_unlock(c);
2738 }
2739
cleanup_old_buffers(void)2740 static void cleanup_old_buffers(void)
2741 {
2742 unsigned long max_age_hz = get_max_age_hz();
2743 struct dm_bufio_client *c;
2744
2745 mutex_lock(&dm_bufio_clients_lock);
2746
2747 __cache_size_refresh();
2748
2749 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2750 evict_old_buffers(c, max_age_hz);
2751
2752 mutex_unlock(&dm_bufio_clients_lock);
2753 }
2754
work_fn(struct work_struct * w)2755 static void work_fn(struct work_struct *w)
2756 {
2757 cleanup_old_buffers();
2758
2759 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2760 DM_BUFIO_WORK_TIMER_SECS * HZ);
2761 }
2762
2763 /*--------------------------------------------------------------*/
2764
2765 /*
2766 * Global cleanup tries to evict the oldest buffers from across _all_
2767 * the clients. It does this by repeatedly evicting a few buffers from
2768 * the client that holds the oldest buffer. It's approximate, but hopefully
2769 * good enough.
2770 */
__pop_client(void)2771 static struct dm_bufio_client *__pop_client(void)
2772 {
2773 struct list_head *h;
2774
2775 if (list_empty(&dm_bufio_all_clients))
2776 return NULL;
2777
2778 h = dm_bufio_all_clients.next;
2779 list_del(h);
2780 return container_of(h, struct dm_bufio_client, client_list);
2781 }
2782
2783 /*
2784 * Inserts the client in the global client list based on its
2785 * 'oldest_buffer' field.
2786 */
__insert_client(struct dm_bufio_client * new_client)2787 static void __insert_client(struct dm_bufio_client *new_client)
2788 {
2789 struct dm_bufio_client *c;
2790 struct list_head *h = dm_bufio_all_clients.next;
2791
2792 while (h != &dm_bufio_all_clients) {
2793 c = container_of(h, struct dm_bufio_client, client_list);
2794 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2795 break;
2796 h = h->next;
2797 }
2798
2799 list_add_tail(&new_client->client_list, h);
2800 }
2801
__evict_a_few(unsigned long nr_buffers)2802 static unsigned long __evict_a_few(unsigned long nr_buffers)
2803 {
2804 unsigned long count;
2805 struct dm_bufio_client *c;
2806 struct evict_params params = {
2807 .gfp = GFP_KERNEL,
2808 .age_hz = 0,
2809 /* set to jiffies in case there are no buffers in this client */
2810 .last_accessed = jiffies
2811 };
2812
2813 c = __pop_client();
2814 if (!c)
2815 return 0;
2816
2817 dm_bufio_lock(c);
2818 count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers);
2819 dm_bufio_unlock(c);
2820
2821 if (count)
2822 c->oldest_buffer = params.last_accessed;
2823 __insert_client(c);
2824
2825 return count;
2826 }
2827
check_watermarks(void)2828 static void check_watermarks(void)
2829 {
2830 LIST_HEAD(write_list);
2831 struct dm_bufio_client *c;
2832
2833 mutex_lock(&dm_bufio_clients_lock);
2834 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2835 dm_bufio_lock(c);
2836 __check_watermark(c, &write_list);
2837 dm_bufio_unlock(c);
2838 }
2839 mutex_unlock(&dm_bufio_clients_lock);
2840
2841 __flush_write_list(&write_list);
2842 }
2843
evict_old(void)2844 static void evict_old(void)
2845 {
2846 unsigned long threshold = dm_bufio_cache_size -
2847 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2848
2849 mutex_lock(&dm_bufio_clients_lock);
2850 while (dm_bufio_current_allocated > threshold) {
2851 if (!__evict_a_few(64))
2852 break;
2853 cond_resched();
2854 }
2855 mutex_unlock(&dm_bufio_clients_lock);
2856 }
2857
do_global_cleanup(struct work_struct * w)2858 static void do_global_cleanup(struct work_struct *w)
2859 {
2860 check_watermarks();
2861 evict_old();
2862 }
2863
2864 /*
2865 *--------------------------------------------------------------
2866 * Module setup
2867 *--------------------------------------------------------------
2868 */
2869
2870 /*
2871 * This is called only once for the whole dm_bufio module.
2872 * It initializes memory limit.
2873 */
dm_bufio_init(void)2874 static int __init dm_bufio_init(void)
2875 {
2876 __u64 mem;
2877
2878 dm_bufio_allocated_kmem_cache = 0;
2879 dm_bufio_allocated_get_free_pages = 0;
2880 dm_bufio_allocated_vmalloc = 0;
2881 dm_bufio_current_allocated = 0;
2882
2883 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2884 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2885
2886 if (mem > ULONG_MAX)
2887 mem = ULONG_MAX;
2888
2889 #ifdef CONFIG_MMU
2890 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2891 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2892 #endif
2893
2894 dm_bufio_default_cache_size = mem;
2895
2896 mutex_lock(&dm_bufio_clients_lock);
2897 __cache_size_refresh();
2898 mutex_unlock(&dm_bufio_clients_lock);
2899
2900 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2901 if (!dm_bufio_wq)
2902 return -ENOMEM;
2903
2904 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2905 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2906 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2907 DM_BUFIO_WORK_TIMER_SECS * HZ);
2908
2909 return 0;
2910 }
2911
2912 /*
2913 * This is called once when unloading the dm_bufio module.
2914 */
dm_bufio_exit(void)2915 static void __exit dm_bufio_exit(void)
2916 {
2917 int bug = 0;
2918
2919 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2920 destroy_workqueue(dm_bufio_wq);
2921
2922 if (dm_bufio_client_count) {
2923 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2924 __func__, dm_bufio_client_count);
2925 bug = 1;
2926 }
2927
2928 if (dm_bufio_current_allocated) {
2929 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2930 __func__, dm_bufio_current_allocated);
2931 bug = 1;
2932 }
2933
2934 if (dm_bufio_allocated_get_free_pages) {
2935 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2936 __func__, dm_bufio_allocated_get_free_pages);
2937 bug = 1;
2938 }
2939
2940 if (dm_bufio_allocated_vmalloc) {
2941 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2942 __func__, dm_bufio_allocated_vmalloc);
2943 bug = 1;
2944 }
2945
2946 WARN_ON(bug); /* leaks are not worth crashing the system */
2947 }
2948
2949 module_init(dm_bufio_init)
2950 module_exit(dm_bufio_exit)
2951
2952 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2953 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2954
2955 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2956 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2957
2958 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2959 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2960
2961 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2962 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2963
2964 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2965 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2966
2967 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2968 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2969
2970 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2971 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2972
2973 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2974 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2975
2976 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2977 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2978 MODULE_LICENSE("GPL");
2979