1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2009-2011 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * This file is released under the GPL.
8 */
9
10 #include <linux/dm-bufio.h>
11
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/slab.h>
15 #include <linux/sched/mm.h>
16 #include <linux/jiffies.h>
17 #include <linux/vmalloc.h>
18 #include <linux/shrinker.h>
19 #include <linux/module.h>
20 #include <linux/rbtree.h>
21 #include <linux/stacktrace.h>
22 #include <linux/jump_label.h>
23
24 #include "dm.h"
25
26 #define DM_MSG_PREFIX "bufio"
27
28 /*
29 * Memory management policy:
30 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
31 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
32 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
33 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
34 * dirty buffers.
35 */
36 #define DM_BUFIO_MIN_BUFFERS 8
37
38 #define DM_BUFIO_MEMORY_PERCENT 2
39 #define DM_BUFIO_VMALLOC_PERCENT 25
40 #define DM_BUFIO_WRITEBACK_RATIO 3
41 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
42
43 /*
44 * Check buffer ages in this interval (seconds)
45 */
46 #define DM_BUFIO_WORK_TIMER_SECS 30
47
48 /*
49 * Free buffers when they are older than this (seconds)
50 */
51 #define DM_BUFIO_DEFAULT_AGE_SECS 300
52
53 /*
54 * The nr of bytes of cached data to keep around.
55 */
56 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
57
58 /*
59 * Align buffer writes to this boundary.
60 * Tests show that SSDs have the highest IOPS when using 4k writes.
61 */
62 #define DM_BUFIO_WRITE_ALIGN 4096
63
64 /*
65 * dm_buffer->list_mode
66 */
67 #define LIST_CLEAN 0
68 #define LIST_DIRTY 1
69 #define LIST_SIZE 2
70
71 /*--------------------------------------------------------------*/
72
73 /*
74 * Rather than use an LRU list, we use a clock algorithm where entries
75 * are held in a circular list. When an entry is 'hit' a reference bit
76 * is set. The least recently used entry is approximated by running a
77 * cursor around the list selecting unreferenced entries. Referenced
78 * entries have their reference bit cleared as the cursor passes them.
79 */
80 struct lru_entry {
81 struct list_head list;
82 atomic_t referenced;
83 };
84
85 struct lru_iter {
86 struct lru *lru;
87 struct list_head list;
88 struct lru_entry *stop;
89 struct lru_entry *e;
90 };
91
92 struct lru {
93 struct list_head *cursor;
94 unsigned long count;
95
96 struct list_head iterators;
97 };
98
99 /*--------------*/
100
lru_init(struct lru * lru)101 static void lru_init(struct lru *lru)
102 {
103 lru->cursor = NULL;
104 lru->count = 0;
105 INIT_LIST_HEAD(&lru->iterators);
106 }
107
lru_destroy(struct lru * lru)108 static void lru_destroy(struct lru *lru)
109 {
110 WARN_ON_ONCE(lru->cursor);
111 WARN_ON_ONCE(!list_empty(&lru->iterators));
112 }
113
114 /*
115 * Insert a new entry into the lru.
116 */
lru_insert(struct lru * lru,struct lru_entry * le)117 static void lru_insert(struct lru *lru, struct lru_entry *le)
118 {
119 /*
120 * Don't be tempted to set to 1, makes the lru aspect
121 * perform poorly.
122 */
123 atomic_set(&le->referenced, 0);
124
125 if (lru->cursor) {
126 list_add_tail(&le->list, lru->cursor);
127 } else {
128 INIT_LIST_HEAD(&le->list);
129 lru->cursor = &le->list;
130 }
131 lru->count++;
132 }
133
134 /*--------------*/
135
136 /*
137 * Convert a list_head pointer to an lru_entry pointer.
138 */
to_le(struct list_head * l)139 static inline struct lru_entry *to_le(struct list_head *l)
140 {
141 return container_of(l, struct lru_entry, list);
142 }
143
144 /*
145 * Initialize an lru_iter and add it to the list of cursors in the lru.
146 */
lru_iter_begin(struct lru * lru,struct lru_iter * it)147 static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
148 {
149 it->lru = lru;
150 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
151 it->e = lru->cursor ? to_le(lru->cursor) : NULL;
152 list_add(&it->list, &lru->iterators);
153 }
154
155 /*
156 * Remove an lru_iter from the list of cursors in the lru.
157 */
lru_iter_end(struct lru_iter * it)158 static inline void lru_iter_end(struct lru_iter *it)
159 {
160 list_del(&it->list);
161 }
162
163 /* Predicate function type to be used with lru_iter_next */
164 typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
165
166 /*
167 * Advance the cursor to the next entry that passes the
168 * predicate, and return that entry. Returns NULL if the
169 * iteration is complete.
170 */
lru_iter_next(struct lru_iter * it,iter_predicate pred,void * context)171 static struct lru_entry *lru_iter_next(struct lru_iter *it,
172 iter_predicate pred, void *context)
173 {
174 struct lru_entry *e;
175
176 while (it->e) {
177 e = it->e;
178
179 /* advance the cursor */
180 if (it->e == it->stop)
181 it->e = NULL;
182 else
183 it->e = to_le(it->e->list.next);
184
185 if (pred(e, context))
186 return e;
187 }
188
189 return NULL;
190 }
191
192 /*
193 * Invalidate a specific lru_entry and update all cursors in
194 * the lru accordingly.
195 */
lru_iter_invalidate(struct lru * lru,struct lru_entry * e)196 static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
197 {
198 struct lru_iter *it;
199
200 list_for_each_entry(it, &lru->iterators, list) {
201 /* Move c->e forwards if necc. */
202 if (it->e == e) {
203 it->e = to_le(it->e->list.next);
204 if (it->e == e)
205 it->e = NULL;
206 }
207
208 /* Move it->stop backwards if necc. */
209 if (it->stop == e) {
210 it->stop = to_le(it->stop->list.prev);
211 if (it->stop == e)
212 it->stop = NULL;
213 }
214 }
215 }
216
217 /*--------------*/
218
219 /*
220 * Remove a specific entry from the lru.
221 */
lru_remove(struct lru * lru,struct lru_entry * le)222 static void lru_remove(struct lru *lru, struct lru_entry *le)
223 {
224 lru_iter_invalidate(lru, le);
225 if (lru->count == 1) {
226 lru->cursor = NULL;
227 } else {
228 if (lru->cursor == &le->list)
229 lru->cursor = lru->cursor->next;
230 list_del(&le->list);
231 }
232 lru->count--;
233 }
234
235 /*
236 * Mark as referenced.
237 */
lru_reference(struct lru_entry * le)238 static inline void lru_reference(struct lru_entry *le)
239 {
240 atomic_set(&le->referenced, 1);
241 }
242
243 /*--------------*/
244
245 /*
246 * Remove the least recently used entry (approx), that passes the predicate.
247 * Returns NULL on failure.
248 */
249 enum evict_result {
250 ER_EVICT,
251 ER_DONT_EVICT,
252 ER_STOP, /* stop looking for something to evict */
253 };
254
255 typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
256
257 static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
258 {
259 unsigned long tested = 0;
260 struct list_head *h = lru->cursor;
261 struct lru_entry *le;
262
263 if (!h)
264 return NULL;
265 /*
266 * In the worst case we have to loop around twice. Once to clear
267 * the reference flags, and then again to discover the predicate
268 * fails for all entries.
269 */
270 while (tested < lru->count) {
271 le = container_of(h, struct lru_entry, list);
272
273 if (atomic_read(&le->referenced)) {
274 atomic_set(&le->referenced, 0);
275 } else {
276 tested++;
277 switch (pred(le, context)) {
278 case ER_EVICT:
279 /*
280 * Adjust the cursor, so we start the next
281 * search from here.
282 */
283 lru->cursor = le->list.next;
284 lru_remove(lru, le);
285 return le;
286
287 case ER_DONT_EVICT:
288 break;
289
290 case ER_STOP:
291 lru->cursor = le->list.next;
292 return NULL;
293 }
294 }
295
296 h = h->next;
297
298 if (!no_sleep)
299 cond_resched();
300 }
301
302 return NULL;
303 }
304
305 /*--------------------------------------------------------------*/
306
307 /*
308 * Buffer state bits.
309 */
310 #define B_READING 0
311 #define B_WRITING 1
312 #define B_DIRTY 2
313
314 /*
315 * Describes how the block was allocated:
316 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
317 * See the comment at alloc_buffer_data.
318 */
319 enum data_mode {
320 DATA_MODE_SLAB = 0,
321 DATA_MODE_GET_FREE_PAGES = 1,
322 DATA_MODE_VMALLOC = 2,
323 DATA_MODE_LIMIT = 3
324 };
325
326 struct dm_buffer {
327 /* protected by the locks in dm_buffer_cache */
328 struct rb_node node;
329
330 /* immutable, so don't need protecting */
331 sector_t block;
332 void *data;
333 unsigned char data_mode; /* DATA_MODE_* */
334
335 /*
336 * These two fields are used in isolation, so do not need
337 * a surrounding lock.
338 */
339 atomic_t hold_count;
340 unsigned long last_accessed;
341
342 /*
343 * Everything else is protected by the mutex in
344 * dm_bufio_client
345 */
346 unsigned long state;
347 struct lru_entry lru;
348 unsigned char list_mode; /* LIST_* */
349 blk_status_t read_error;
350 blk_status_t write_error;
351 unsigned int dirty_start;
352 unsigned int dirty_end;
353 unsigned int write_start;
354 unsigned int write_end;
355 struct list_head write_list;
356 struct dm_bufio_client *c;
357 void (*end_io)(struct dm_buffer *b, blk_status_t bs);
358 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
359 #define MAX_STACK 10
360 unsigned int stack_len;
361 unsigned long stack_entries[MAX_STACK];
362 #endif
363 };
364
365 /*--------------------------------------------------------------*/
366
367 /*
368 * The buffer cache manages buffers, particularly:
369 * - inc/dec of holder count
370 * - setting the last_accessed field
371 * - maintains clean/dirty state along with lru
372 * - selecting buffers that match predicates
373 *
374 * It does *not* handle:
375 * - allocation/freeing of buffers.
376 * - IO
377 * - Eviction or cache sizing.
378 *
379 * cache_get() and cache_put() are threadsafe, you do not need to
380 * protect these calls with a surrounding mutex. All the other
381 * methods are not threadsafe; they do use locking primitives, but
382 * only enough to ensure get/put are threadsafe.
383 */
384
385 struct buffer_tree {
386 union {
387 struct rw_semaphore lock;
388 rwlock_t spinlock;
389 } u;
390 struct rb_root root;
391 } ____cacheline_aligned_in_smp;
392
393 struct dm_buffer_cache {
394 struct lru lru[LIST_SIZE];
395 /*
396 * We spread entries across multiple trees to reduce contention
397 * on the locks.
398 */
399 unsigned int num_locks;
400 bool no_sleep;
401 struct buffer_tree trees[];
402 };
403
404 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
405
cache_index(sector_t block,unsigned int num_locks)406 static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
407 {
408 return dm_hash_locks_index(block, num_locks);
409 }
410
cache_read_lock(struct dm_buffer_cache * bc,sector_t block)411 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
412 {
413 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
415 else
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
417 }
418
cache_read_unlock(struct dm_buffer_cache * bc,sector_t block)419 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
420 {
421 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
423 else
424 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
425 }
426
cache_write_lock(struct dm_buffer_cache * bc,sector_t block)427 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
428 {
429 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
430 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
431 else
432 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
433 }
434
cache_write_unlock(struct dm_buffer_cache * bc,sector_t block)435 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
436 {
437 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
438 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
439 else
440 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
441 }
442
443 /*
444 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
445 * This struct helps avoid redundant drop and gets of the same lock.
446 */
447 struct lock_history {
448 struct dm_buffer_cache *cache;
449 bool write;
450 unsigned int previous;
451 unsigned int no_previous;
452 };
453
lh_init(struct lock_history * lh,struct dm_buffer_cache * cache,bool write)454 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
455 {
456 lh->cache = cache;
457 lh->write = write;
458 lh->no_previous = cache->num_locks;
459 lh->previous = lh->no_previous;
460 }
461
__lh_lock(struct lock_history * lh,unsigned int index)462 static void __lh_lock(struct lock_history *lh, unsigned int index)
463 {
464 if (lh->write) {
465 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
466 write_lock_bh(&lh->cache->trees[index].u.spinlock);
467 else
468 down_write(&lh->cache->trees[index].u.lock);
469 } else {
470 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
471 read_lock_bh(&lh->cache->trees[index].u.spinlock);
472 else
473 down_read(&lh->cache->trees[index].u.lock);
474 }
475 }
476
__lh_unlock(struct lock_history * lh,unsigned int index)477 static void __lh_unlock(struct lock_history *lh, unsigned int index)
478 {
479 if (lh->write) {
480 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
481 write_unlock_bh(&lh->cache->trees[index].u.spinlock);
482 else
483 up_write(&lh->cache->trees[index].u.lock);
484 } else {
485 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
486 read_unlock_bh(&lh->cache->trees[index].u.spinlock);
487 else
488 up_read(&lh->cache->trees[index].u.lock);
489 }
490 }
491
492 /*
493 * Make sure you call this since it will unlock the final lock.
494 */
lh_exit(struct lock_history * lh)495 static void lh_exit(struct lock_history *lh)
496 {
497 if (lh->previous != lh->no_previous) {
498 __lh_unlock(lh, lh->previous);
499 lh->previous = lh->no_previous;
500 }
501 }
502
503 /*
504 * Named 'next' because there is no corresponding
505 * 'up/unlock' call since it's done automatically.
506 */
lh_next(struct lock_history * lh,sector_t b)507 static void lh_next(struct lock_history *lh, sector_t b)
508 {
509 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
510
511 if (lh->previous != lh->no_previous) {
512 if (lh->previous != index) {
513 __lh_unlock(lh, lh->previous);
514 __lh_lock(lh, index);
515 lh->previous = index;
516 }
517 } else {
518 __lh_lock(lh, index);
519 lh->previous = index;
520 }
521 }
522
le_to_buffer(struct lru_entry * le)523 static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
524 {
525 return container_of(le, struct dm_buffer, lru);
526 }
527
list_to_buffer(struct list_head * l)528 static struct dm_buffer *list_to_buffer(struct list_head *l)
529 {
530 struct lru_entry *le = list_entry(l, struct lru_entry, list);
531
532 if (!le)
533 return NULL;
534
535 return le_to_buffer(le);
536 }
537
cache_init(struct dm_buffer_cache * bc,unsigned int num_locks,bool no_sleep)538 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
539 {
540 unsigned int i;
541
542 bc->num_locks = num_locks;
543 bc->no_sleep = no_sleep;
544
545 for (i = 0; i < bc->num_locks; i++) {
546 if (no_sleep)
547 rwlock_init(&bc->trees[i].u.spinlock);
548 else
549 init_rwsem(&bc->trees[i].u.lock);
550 bc->trees[i].root = RB_ROOT;
551 }
552
553 lru_init(&bc->lru[LIST_CLEAN]);
554 lru_init(&bc->lru[LIST_DIRTY]);
555 }
556
cache_destroy(struct dm_buffer_cache * bc)557 static void cache_destroy(struct dm_buffer_cache *bc)
558 {
559 unsigned int i;
560
561 for (i = 0; i < bc->num_locks; i++)
562 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
563
564 lru_destroy(&bc->lru[LIST_CLEAN]);
565 lru_destroy(&bc->lru[LIST_DIRTY]);
566 }
567
568 /*--------------*/
569
570 /*
571 * not threadsafe, or racey depending how you look at it
572 */
cache_count(struct dm_buffer_cache * bc,int list_mode)573 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
574 {
575 return bc->lru[list_mode].count;
576 }
577
cache_total(struct dm_buffer_cache * bc)578 static inline unsigned long cache_total(struct dm_buffer_cache *bc)
579 {
580 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
581 }
582
583 /*--------------*/
584
585 /*
586 * Gets a specific buffer, indexed by block.
587 * If the buffer is found then its holder count will be incremented and
588 * lru_reference will be called.
589 *
590 * threadsafe
591 */
__cache_get(const struct rb_root * root,sector_t block)592 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
593 {
594 struct rb_node *n = root->rb_node;
595 struct dm_buffer *b;
596
597 while (n) {
598 b = container_of(n, struct dm_buffer, node);
599
600 if (b->block == block)
601 return b;
602
603 n = block < b->block ? n->rb_left : n->rb_right;
604 }
605
606 return NULL;
607 }
608
__cache_inc_buffer(struct dm_buffer * b)609 static void __cache_inc_buffer(struct dm_buffer *b)
610 {
611 atomic_inc(&b->hold_count);
612 WRITE_ONCE(b->last_accessed, jiffies);
613 }
614
cache_get(struct dm_buffer_cache * bc,sector_t block)615 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
616 {
617 struct dm_buffer *b;
618
619 cache_read_lock(bc, block);
620 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
621 if (b) {
622 lru_reference(&b->lru);
623 __cache_inc_buffer(b);
624 }
625 cache_read_unlock(bc, block);
626
627 return b;
628 }
629
630 /*--------------*/
631
632 /*
633 * Returns true if the hold count hits zero.
634 * threadsafe
635 */
cache_put(struct dm_buffer_cache * bc,struct dm_buffer * b)636 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
637 {
638 bool r;
639
640 cache_read_lock(bc, b->block);
641 BUG_ON(!atomic_read(&b->hold_count));
642 r = atomic_dec_and_test(&b->hold_count);
643 cache_read_unlock(bc, b->block);
644
645 return r;
646 }
647
648 /*--------------*/
649
650 typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
651
652 /*
653 * Evicts a buffer based on a predicate. The oldest buffer that
654 * matches the predicate will be selected. In addition to the
655 * predicate the hold_count of the selected buffer will be zero.
656 */
657 struct evict_wrapper {
658 struct lock_history *lh;
659 b_predicate pred;
660 void *context;
661 };
662
663 /*
664 * Wraps the buffer predicate turning it into an lru predicate. Adds
665 * extra test for hold_count.
666 */
__evict_pred(struct lru_entry * le,void * context)667 static enum evict_result __evict_pred(struct lru_entry *le, void *context)
668 {
669 struct evict_wrapper *w = context;
670 struct dm_buffer *b = le_to_buffer(le);
671
672 lh_next(w->lh, b->block);
673
674 if (atomic_read(&b->hold_count))
675 return ER_DONT_EVICT;
676
677 return w->pred(b, w->context);
678 }
679
__cache_evict(struct dm_buffer_cache * bc,int list_mode,b_predicate pred,void * context,struct lock_history * lh)680 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
681 b_predicate pred, void *context,
682 struct lock_history *lh)
683 {
684 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
685 struct lru_entry *le;
686 struct dm_buffer *b;
687
688 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
689 if (!le)
690 return NULL;
691
692 b = le_to_buffer(le);
693 /* __evict_pred will have locked the appropriate tree. */
694 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
695
696 return b;
697 }
698
cache_evict(struct dm_buffer_cache * bc,int list_mode,b_predicate pred,void * context)699 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
700 b_predicate pred, void *context)
701 {
702 struct dm_buffer *b;
703 struct lock_history lh;
704
705 lh_init(&lh, bc, true);
706 b = __cache_evict(bc, list_mode, pred, context, &lh);
707 lh_exit(&lh);
708
709 return b;
710 }
711
712 /*--------------*/
713
714 /*
715 * Mark a buffer as clean or dirty. Not threadsafe.
716 */
cache_mark(struct dm_buffer_cache * bc,struct dm_buffer * b,int list_mode)717 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
718 {
719 cache_write_lock(bc, b->block);
720 if (list_mode != b->list_mode) {
721 lru_remove(&bc->lru[b->list_mode], &b->lru);
722 b->list_mode = list_mode;
723 lru_insert(&bc->lru[b->list_mode], &b->lru);
724 }
725 cache_write_unlock(bc, b->block);
726 }
727
728 /*--------------*/
729
730 /*
731 * Runs through the lru associated with 'old_mode', if the predicate matches then
732 * it moves them to 'new_mode'. Not threadsafe.
733 */
__cache_mark_many(struct dm_buffer_cache * bc,int old_mode,int new_mode,b_predicate pred,void * context,struct lock_history * lh)734 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
735 b_predicate pred, void *context, struct lock_history *lh)
736 {
737 struct lru_entry *le;
738 struct dm_buffer *b;
739 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
740
741 while (true) {
742 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
743 if (!le)
744 break;
745
746 b = le_to_buffer(le);
747 b->list_mode = new_mode;
748 lru_insert(&bc->lru[b->list_mode], &b->lru);
749 }
750 }
751
cache_mark_many(struct dm_buffer_cache * bc,int old_mode,int new_mode,b_predicate pred,void * context)752 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
753 b_predicate pred, void *context)
754 {
755 struct lock_history lh;
756
757 lh_init(&lh, bc, true);
758 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
759 lh_exit(&lh);
760 }
761
762 /*--------------*/
763
764 /*
765 * Iterates through all clean or dirty entries calling a function for each
766 * entry. The callback may terminate the iteration early. Not threadsafe.
767 */
768
769 /*
770 * Iterator functions should return one of these actions to indicate
771 * how the iteration should proceed.
772 */
773 enum it_action {
774 IT_NEXT,
775 IT_COMPLETE,
776 };
777
778 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
779
780 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
781 iter_fn fn, void *context, struct lock_history *lh)
782 {
783 struct lru *lru = &bc->lru[list_mode];
784 struct lru_entry *le, *first;
785
786 if (!lru->cursor)
787 return;
788
789 first = le = to_le(lru->cursor);
790 do {
791 struct dm_buffer *b = le_to_buffer(le);
792
793 lh_next(lh, b->block);
794
795 switch (fn(b, context)) {
796 case IT_NEXT:
797 break;
798
799 case IT_COMPLETE:
800 return;
801 }
802 cond_resched();
803
804 le = to_le(le->list.next);
805 } while (le != first);
806 }
807
cache_iterate(struct dm_buffer_cache * bc,int list_mode,iter_fn fn,void * context)808 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
809 iter_fn fn, void *context)
810 {
811 struct lock_history lh;
812
813 lh_init(&lh, bc, false);
814 __cache_iterate(bc, list_mode, fn, context, &lh);
815 lh_exit(&lh);
816 }
817
818 /*--------------*/
819
820 /*
821 * Passes ownership of the buffer to the cache. Returns false if the
822 * buffer was already present (in which case ownership does not pass).
823 * eg, a race with another thread.
824 *
825 * Holder count should be 1 on insertion.
826 *
827 * Not threadsafe.
828 */
__cache_insert(struct rb_root * root,struct dm_buffer * b)829 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
830 {
831 struct rb_node **new = &root->rb_node, *parent = NULL;
832 struct dm_buffer *found;
833
834 while (*new) {
835 found = container_of(*new, struct dm_buffer, node);
836
837 if (found->block == b->block)
838 return false;
839
840 parent = *new;
841 new = b->block < found->block ?
842 &found->node.rb_left : &found->node.rb_right;
843 }
844
845 rb_link_node(&b->node, parent, new);
846 rb_insert_color(&b->node, root);
847
848 return true;
849 }
850
cache_insert(struct dm_buffer_cache * bc,struct dm_buffer * b)851 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
852 {
853 bool r;
854
855 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
856 return false;
857
858 cache_write_lock(bc, b->block);
859 BUG_ON(atomic_read(&b->hold_count) != 1);
860 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
861 if (r)
862 lru_insert(&bc->lru[b->list_mode], &b->lru);
863 cache_write_unlock(bc, b->block);
864
865 return r;
866 }
867
868 /*--------------*/
869
870 /*
871 * Removes buffer from cache, ownership of the buffer passes back to the caller.
872 * Fails if the hold_count is not one (ie. the caller holds the only reference).
873 *
874 * Not threadsafe.
875 */
cache_remove(struct dm_buffer_cache * bc,struct dm_buffer * b)876 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
877 {
878 bool r;
879
880 cache_write_lock(bc, b->block);
881
882 if (atomic_read(&b->hold_count) != 1) {
883 r = false;
884 } else {
885 r = true;
886 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
887 lru_remove(&bc->lru[b->list_mode], &b->lru);
888 }
889
890 cache_write_unlock(bc, b->block);
891
892 return r;
893 }
894
895 /*--------------*/
896
897 typedef void (*b_release)(struct dm_buffer *);
898
__find_next(struct rb_root * root,sector_t block)899 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
900 {
901 struct rb_node *n = root->rb_node;
902 struct dm_buffer *b;
903 struct dm_buffer *best = NULL;
904
905 while (n) {
906 b = container_of(n, struct dm_buffer, node);
907
908 if (b->block == block)
909 return b;
910
911 if (block <= b->block) {
912 n = n->rb_left;
913 best = b;
914 } else {
915 n = n->rb_right;
916 }
917 }
918
919 return best;
920 }
921
__remove_range(struct dm_buffer_cache * bc,struct rb_root * root,sector_t begin,sector_t end,b_predicate pred,b_release release)922 static void __remove_range(struct dm_buffer_cache *bc,
923 struct rb_root *root,
924 sector_t begin, sector_t end,
925 b_predicate pred, b_release release)
926 {
927 struct dm_buffer *b;
928
929 while (true) {
930 cond_resched();
931
932 b = __find_next(root, begin);
933 if (!b || (b->block >= end))
934 break;
935
936 begin = b->block + 1;
937
938 if (atomic_read(&b->hold_count))
939 continue;
940
941 if (pred(b, NULL) == ER_EVICT) {
942 rb_erase(&b->node, root);
943 lru_remove(&bc->lru[b->list_mode], &b->lru);
944 release(b);
945 }
946 }
947 }
948
cache_remove_range(struct dm_buffer_cache * bc,sector_t begin,sector_t end,b_predicate pred,b_release release)949 static void cache_remove_range(struct dm_buffer_cache *bc,
950 sector_t begin, sector_t end,
951 b_predicate pred, b_release release)
952 {
953 unsigned int i;
954
955 BUG_ON(bc->no_sleep);
956 for (i = 0; i < bc->num_locks; i++) {
957 down_write(&bc->trees[i].u.lock);
958 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
959 up_write(&bc->trees[i].u.lock);
960 }
961 }
962
963 /*----------------------------------------------------------------*/
964
965 /*
966 * Linking of buffers:
967 * All buffers are linked to buffer_cache with their node field.
968 *
969 * Clean buffers that are not being written (B_WRITING not set)
970 * are linked to lru[LIST_CLEAN] with their lru_list field.
971 *
972 * Dirty and clean buffers that are being written are linked to
973 * lru[LIST_DIRTY] with their lru_list field. When the write
974 * finishes, the buffer cannot be relinked immediately (because we
975 * are in an interrupt context and relinking requires process
976 * context), so some clean-not-writing buffers can be held on
977 * dirty_lru too. They are later added to lru in the process
978 * context.
979 */
980 struct dm_bufio_client {
981 struct block_device *bdev;
982 unsigned int block_size;
983 s8 sectors_per_block_bits;
984
985 bool no_sleep;
986 struct mutex lock;
987 spinlock_t spinlock;
988
989 int async_write_error;
990
991 void (*alloc_callback)(struct dm_buffer *buf);
992 void (*write_callback)(struct dm_buffer *buf);
993 struct kmem_cache *slab_buffer;
994 struct kmem_cache *slab_cache;
995 struct dm_io_client *dm_io;
996
997 struct list_head reserved_buffers;
998 unsigned int need_reserved_buffers;
999
1000 unsigned int minimum_buffers;
1001
1002 sector_t start;
1003
1004 struct shrinker shrinker;
1005 struct work_struct shrink_work;
1006 atomic_long_t need_shrink;
1007
1008 wait_queue_head_t free_buffer_wait;
1009
1010 struct list_head client_list;
1011
1012 /*
1013 * Used by global_cleanup to sort the clients list.
1014 */
1015 unsigned long oldest_buffer;
1016
1017 struct dm_buffer_cache cache; /* must be last member */
1018 };
1019
1020 /*----------------------------------------------------------------*/
1021
1022 #define dm_bufio_in_request() (!!current->bio_list)
1023
dm_bufio_lock(struct dm_bufio_client * c)1024 static void dm_bufio_lock(struct dm_bufio_client *c)
1025 {
1026 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1027 spin_lock_bh(&c->spinlock);
1028 else
1029 mutex_lock_nested(&c->lock, dm_bufio_in_request());
1030 }
1031
dm_bufio_unlock(struct dm_bufio_client * c)1032 static void dm_bufio_unlock(struct dm_bufio_client *c)
1033 {
1034 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1035 spin_unlock_bh(&c->spinlock);
1036 else
1037 mutex_unlock(&c->lock);
1038 }
1039
1040 /*----------------------------------------------------------------*/
1041
1042 /*
1043 * Default cache size: available memory divided by the ratio.
1044 */
1045 static unsigned long dm_bufio_default_cache_size;
1046
1047 /*
1048 * Total cache size set by the user.
1049 */
1050 static unsigned long dm_bufio_cache_size;
1051
1052 /*
1053 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1054 * at any time. If it disagrees, the user has changed cache size.
1055 */
1056 static unsigned long dm_bufio_cache_size_latch;
1057
1058 static DEFINE_SPINLOCK(global_spinlock);
1059
1060 /*
1061 * Buffers are freed after this timeout
1062 */
1063 static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1064 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1065
1066 static unsigned long dm_bufio_peak_allocated;
1067 static unsigned long dm_bufio_allocated_kmem_cache;
1068 static unsigned long dm_bufio_allocated_get_free_pages;
1069 static unsigned long dm_bufio_allocated_vmalloc;
1070 static unsigned long dm_bufio_current_allocated;
1071
1072 /*----------------------------------------------------------------*/
1073
1074 /*
1075 * The current number of clients.
1076 */
1077 static int dm_bufio_client_count;
1078
1079 /*
1080 * The list of all clients.
1081 */
1082 static LIST_HEAD(dm_bufio_all_clients);
1083
1084 /*
1085 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1086 */
1087 static DEFINE_MUTEX(dm_bufio_clients_lock);
1088
1089 static struct workqueue_struct *dm_bufio_wq;
1090 static struct delayed_work dm_bufio_cleanup_old_work;
1091 static struct work_struct dm_bufio_replacement_work;
1092
1093
1094 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)1095 static void buffer_record_stack(struct dm_buffer *b)
1096 {
1097 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1098 }
1099 #endif
1100
1101 /*----------------------------------------------------------------*/
1102
adjust_total_allocated(struct dm_buffer * b,bool unlink)1103 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1104 {
1105 unsigned char data_mode;
1106 long diff;
1107
1108 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1109 &dm_bufio_allocated_kmem_cache,
1110 &dm_bufio_allocated_get_free_pages,
1111 &dm_bufio_allocated_vmalloc,
1112 };
1113
1114 data_mode = b->data_mode;
1115 diff = (long)b->c->block_size;
1116 if (unlink)
1117 diff = -diff;
1118
1119 spin_lock(&global_spinlock);
1120
1121 *class_ptr[data_mode] += diff;
1122
1123 dm_bufio_current_allocated += diff;
1124
1125 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1126 dm_bufio_peak_allocated = dm_bufio_current_allocated;
1127
1128 if (!unlink) {
1129 if (dm_bufio_current_allocated > dm_bufio_cache_size)
1130 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1131 }
1132
1133 spin_unlock(&global_spinlock);
1134 }
1135
1136 /*
1137 * Change the number of clients and recalculate per-client limit.
1138 */
__cache_size_refresh(void)1139 static void __cache_size_refresh(void)
1140 {
1141 if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1142 return;
1143 if (WARN_ON(dm_bufio_client_count < 0))
1144 return;
1145
1146 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1147
1148 /*
1149 * Use default if set to 0 and report the actual cache size used.
1150 */
1151 if (!dm_bufio_cache_size_latch) {
1152 (void)cmpxchg(&dm_bufio_cache_size, 0,
1153 dm_bufio_default_cache_size);
1154 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1155 }
1156 }
1157
1158 /*
1159 * Allocating buffer data.
1160 *
1161 * Small buffers are allocated with kmem_cache, to use space optimally.
1162 *
1163 * For large buffers, we choose between get_free_pages and vmalloc.
1164 * Each has advantages and disadvantages.
1165 *
1166 * __get_free_pages can randomly fail if the memory is fragmented.
1167 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1168 * as low as 128M) so using it for caching is not appropriate.
1169 *
1170 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1171 * won't have a fatal effect here, but it just causes flushes of some other
1172 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1173 * always fails (i.e. order > MAX_ORDER).
1174 *
1175 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1176 * initial reserve allocation, so there's no risk of wasting all vmalloc
1177 * space.
1178 */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)1179 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1180 unsigned char *data_mode)
1181 {
1182 if (unlikely(c->slab_cache != NULL)) {
1183 *data_mode = DATA_MODE_SLAB;
1184 return kmem_cache_alloc(c->slab_cache, gfp_mask);
1185 }
1186
1187 if (c->block_size <= KMALLOC_MAX_SIZE &&
1188 gfp_mask & __GFP_NORETRY) {
1189 *data_mode = DATA_MODE_GET_FREE_PAGES;
1190 return (void *)__get_free_pages(gfp_mask,
1191 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1192 }
1193
1194 *data_mode = DATA_MODE_VMALLOC;
1195
1196 return __vmalloc(c->block_size, gfp_mask);
1197 }
1198
1199 /*
1200 * Free buffer's data.
1201 */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)1202 static void free_buffer_data(struct dm_bufio_client *c,
1203 void *data, unsigned char data_mode)
1204 {
1205 switch (data_mode) {
1206 case DATA_MODE_SLAB:
1207 kmem_cache_free(c->slab_cache, data);
1208 break;
1209
1210 case DATA_MODE_GET_FREE_PAGES:
1211 free_pages((unsigned long)data,
1212 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1213 break;
1214
1215 case DATA_MODE_VMALLOC:
1216 vfree(data);
1217 break;
1218
1219 default:
1220 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1221 data_mode);
1222 BUG();
1223 }
1224 }
1225
1226 /*
1227 * Allocate buffer and its data.
1228 */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)1229 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1230 {
1231 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1232
1233 if (!b)
1234 return NULL;
1235
1236 b->c = c;
1237
1238 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1239 if (!b->data) {
1240 kmem_cache_free(c->slab_buffer, b);
1241 return NULL;
1242 }
1243 adjust_total_allocated(b, false);
1244
1245 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1246 b->stack_len = 0;
1247 #endif
1248 return b;
1249 }
1250
1251 /*
1252 * Free buffer and its data.
1253 */
free_buffer(struct dm_buffer * b)1254 static void free_buffer(struct dm_buffer *b)
1255 {
1256 struct dm_bufio_client *c = b->c;
1257
1258 adjust_total_allocated(b, true);
1259 free_buffer_data(c, b->data, b->data_mode);
1260 kmem_cache_free(c->slab_buffer, b);
1261 }
1262
1263 /*
1264 *--------------------------------------------------------------------------
1265 * Submit I/O on the buffer.
1266 *
1267 * Bio interface is faster but it has some problems:
1268 * the vector list is limited (increasing this limit increases
1269 * memory-consumption per buffer, so it is not viable);
1270 *
1271 * the memory must be direct-mapped, not vmalloced;
1272 *
1273 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1274 * it is not vmalloced, try using the bio interface.
1275 *
1276 * If the buffer is big, if it is vmalloced or if the underlying device
1277 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1278 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1279 * shortcomings.
1280 *--------------------------------------------------------------------------
1281 */
1282
1283 /*
1284 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1285 * that the request was handled directly with bio interface.
1286 */
dmio_complete(unsigned long error,void * context)1287 static void dmio_complete(unsigned long error, void *context)
1288 {
1289 struct dm_buffer *b = context;
1290
1291 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1292 }
1293
use_dmio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)1294 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1295 unsigned int n_sectors, unsigned int offset)
1296 {
1297 int r;
1298 struct dm_io_request io_req = {
1299 .bi_opf = op,
1300 .notify.fn = dmio_complete,
1301 .notify.context = b,
1302 .client = b->c->dm_io,
1303 };
1304 struct dm_io_region region = {
1305 .bdev = b->c->bdev,
1306 .sector = sector,
1307 .count = n_sectors,
1308 };
1309
1310 if (b->data_mode != DATA_MODE_VMALLOC) {
1311 io_req.mem.type = DM_IO_KMEM;
1312 io_req.mem.ptr.addr = (char *)b->data + offset;
1313 } else {
1314 io_req.mem.type = DM_IO_VMA;
1315 io_req.mem.ptr.vma = (char *)b->data + offset;
1316 }
1317
1318 r = dm_io(&io_req, 1, ®ion, NULL, IOPRIO_DEFAULT);
1319 if (unlikely(r))
1320 b->end_io(b, errno_to_blk_status(r));
1321 }
1322
bio_complete(struct bio * bio)1323 static void bio_complete(struct bio *bio)
1324 {
1325 struct dm_buffer *b = bio->bi_private;
1326 blk_status_t status = bio->bi_status;
1327
1328 bio_uninit(bio);
1329 kfree(bio);
1330 b->end_io(b, status);
1331 }
1332
use_bio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)1333 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1334 unsigned int n_sectors, unsigned int offset)
1335 {
1336 struct bio *bio;
1337 char *ptr;
1338 unsigned int len;
1339
1340 bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1341 if (!bio) {
1342 use_dmio(b, op, sector, n_sectors, offset);
1343 return;
1344 }
1345 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1346 bio->bi_iter.bi_sector = sector;
1347 bio->bi_end_io = bio_complete;
1348 bio->bi_private = b;
1349
1350 ptr = (char *)b->data + offset;
1351 len = n_sectors << SECTOR_SHIFT;
1352
1353 __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1354
1355 submit_bio(bio);
1356 }
1357
block_to_sector(struct dm_bufio_client * c,sector_t block)1358 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1359 {
1360 sector_t sector;
1361
1362 if (likely(c->sectors_per_block_bits >= 0))
1363 sector = block << c->sectors_per_block_bits;
1364 else
1365 sector = block * (c->block_size >> SECTOR_SHIFT);
1366 sector += c->start;
1367
1368 return sector;
1369 }
1370
submit_io(struct dm_buffer * b,enum req_op op,void (* end_io)(struct dm_buffer *,blk_status_t))1371 static void submit_io(struct dm_buffer *b, enum req_op op,
1372 void (*end_io)(struct dm_buffer *, blk_status_t))
1373 {
1374 unsigned int n_sectors;
1375 sector_t sector;
1376 unsigned int offset, end;
1377
1378 b->end_io = end_io;
1379
1380 sector = block_to_sector(b->c, b->block);
1381
1382 if (op != REQ_OP_WRITE) {
1383 n_sectors = b->c->block_size >> SECTOR_SHIFT;
1384 offset = 0;
1385 } else {
1386 if (b->c->write_callback)
1387 b->c->write_callback(b);
1388 offset = b->write_start;
1389 end = b->write_end;
1390 offset &= -DM_BUFIO_WRITE_ALIGN;
1391 end += DM_BUFIO_WRITE_ALIGN - 1;
1392 end &= -DM_BUFIO_WRITE_ALIGN;
1393 if (unlikely(end > b->c->block_size))
1394 end = b->c->block_size;
1395
1396 sector += offset >> SECTOR_SHIFT;
1397 n_sectors = (end - offset) >> SECTOR_SHIFT;
1398 }
1399
1400 if (b->data_mode != DATA_MODE_VMALLOC)
1401 use_bio(b, op, sector, n_sectors, offset);
1402 else
1403 use_dmio(b, op, sector, n_sectors, offset);
1404 }
1405
1406 /*
1407 *--------------------------------------------------------------
1408 * Writing dirty buffers
1409 *--------------------------------------------------------------
1410 */
1411
1412 /*
1413 * The endio routine for write.
1414 *
1415 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1416 * it.
1417 */
write_endio(struct dm_buffer * b,blk_status_t status)1418 static void write_endio(struct dm_buffer *b, blk_status_t status)
1419 {
1420 b->write_error = status;
1421 if (unlikely(status)) {
1422 struct dm_bufio_client *c = b->c;
1423
1424 (void)cmpxchg(&c->async_write_error, 0,
1425 blk_status_to_errno(status));
1426 }
1427
1428 BUG_ON(!test_bit(B_WRITING, &b->state));
1429
1430 smp_mb__before_atomic();
1431 clear_bit(B_WRITING, &b->state);
1432 smp_mb__after_atomic();
1433
1434 wake_up_bit(&b->state, B_WRITING);
1435 }
1436
1437 /*
1438 * Initiate a write on a dirty buffer, but don't wait for it.
1439 *
1440 * - If the buffer is not dirty, exit.
1441 * - If there some previous write going on, wait for it to finish (we can't
1442 * have two writes on the same buffer simultaneously).
1443 * - Submit our write and don't wait on it. We set B_WRITING indicating
1444 * that there is a write in progress.
1445 */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)1446 static void __write_dirty_buffer(struct dm_buffer *b,
1447 struct list_head *write_list)
1448 {
1449 if (!test_bit(B_DIRTY, &b->state))
1450 return;
1451
1452 clear_bit(B_DIRTY, &b->state);
1453 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1454
1455 b->write_start = b->dirty_start;
1456 b->write_end = b->dirty_end;
1457
1458 if (!write_list)
1459 submit_io(b, REQ_OP_WRITE, write_endio);
1460 else
1461 list_add_tail(&b->write_list, write_list);
1462 }
1463
__flush_write_list(struct list_head * write_list)1464 static void __flush_write_list(struct list_head *write_list)
1465 {
1466 struct blk_plug plug;
1467
1468 blk_start_plug(&plug);
1469 while (!list_empty(write_list)) {
1470 struct dm_buffer *b =
1471 list_entry(write_list->next, struct dm_buffer, write_list);
1472 list_del(&b->write_list);
1473 submit_io(b, REQ_OP_WRITE, write_endio);
1474 cond_resched();
1475 }
1476 blk_finish_plug(&plug);
1477 }
1478
1479 /*
1480 * Wait until any activity on the buffer finishes. Possibly write the
1481 * buffer if it is dirty. When this function finishes, there is no I/O
1482 * running on the buffer and the buffer is not dirty.
1483 */
__make_buffer_clean(struct dm_buffer * b)1484 static void __make_buffer_clean(struct dm_buffer *b)
1485 {
1486 BUG_ON(atomic_read(&b->hold_count));
1487
1488 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1489 if (!smp_load_acquire(&b->state)) /* fast case */
1490 return;
1491
1492 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1493 __write_dirty_buffer(b, NULL);
1494 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1495 }
1496
is_clean(struct dm_buffer * b,void * context)1497 static enum evict_result is_clean(struct dm_buffer *b, void *context)
1498 {
1499 struct dm_bufio_client *c = context;
1500
1501 /* These should never happen */
1502 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1503 return ER_DONT_EVICT;
1504 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1505 return ER_DONT_EVICT;
1506 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1507 return ER_DONT_EVICT;
1508
1509 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1510 unlikely(test_bit(B_READING, &b->state)))
1511 return ER_DONT_EVICT;
1512
1513 return ER_EVICT;
1514 }
1515
is_dirty(struct dm_buffer * b,void * context)1516 static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1517 {
1518 /* These should never happen */
1519 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1520 return ER_DONT_EVICT;
1521 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1522 return ER_DONT_EVICT;
1523
1524 return ER_EVICT;
1525 }
1526
1527 /*
1528 * Find some buffer that is not held by anybody, clean it, unlink it and
1529 * return it.
1530 */
__get_unclaimed_buffer(struct dm_bufio_client * c)1531 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1532 {
1533 struct dm_buffer *b;
1534
1535 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1536 if (b) {
1537 /* this also waits for pending reads */
1538 __make_buffer_clean(b);
1539 return b;
1540 }
1541
1542 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1543 return NULL;
1544
1545 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1546 if (b) {
1547 __make_buffer_clean(b);
1548 return b;
1549 }
1550
1551 return NULL;
1552 }
1553
1554 /*
1555 * Wait until some other threads free some buffer or release hold count on
1556 * some buffer.
1557 *
1558 * This function is entered with c->lock held, drops it and regains it
1559 * before exiting.
1560 */
__wait_for_free_buffer(struct dm_bufio_client * c)1561 static void __wait_for_free_buffer(struct dm_bufio_client *c)
1562 {
1563 DECLARE_WAITQUEUE(wait, current);
1564
1565 add_wait_queue(&c->free_buffer_wait, &wait);
1566 set_current_state(TASK_UNINTERRUPTIBLE);
1567 dm_bufio_unlock(c);
1568
1569 /*
1570 * It's possible to miss a wake up event since we don't always
1571 * hold c->lock when wake_up is called. So we have a timeout here,
1572 * just in case.
1573 */
1574 io_schedule_timeout(5 * HZ);
1575
1576 remove_wait_queue(&c->free_buffer_wait, &wait);
1577
1578 dm_bufio_lock(c);
1579 }
1580
1581 enum new_flag {
1582 NF_FRESH = 0,
1583 NF_READ = 1,
1584 NF_GET = 2,
1585 NF_PREFETCH = 3
1586 };
1587
1588 /*
1589 * Allocate a new buffer. If the allocation is not possible, wait until
1590 * some other thread frees a buffer.
1591 *
1592 * May drop the lock and regain it.
1593 */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)1594 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1595 {
1596 struct dm_buffer *b;
1597 bool tried_noio_alloc = false;
1598
1599 /*
1600 * dm-bufio is resistant to allocation failures (it just keeps
1601 * one buffer reserved in cases all the allocations fail).
1602 * So set flags to not try too hard:
1603 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1604 * mutex and wait ourselves.
1605 * __GFP_NORETRY: don't retry and rather return failure
1606 * __GFP_NOMEMALLOC: don't use emergency reserves
1607 * __GFP_NOWARN: don't print a warning in case of failure
1608 *
1609 * For debugging, if we set the cache size to 1, no new buffers will
1610 * be allocated.
1611 */
1612 while (1) {
1613 if (dm_bufio_cache_size_latch != 1) {
1614 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1615 if (b)
1616 return b;
1617 }
1618
1619 if (nf == NF_PREFETCH)
1620 return NULL;
1621
1622 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1623 dm_bufio_unlock(c);
1624 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1625 dm_bufio_lock(c);
1626 if (b)
1627 return b;
1628 tried_noio_alloc = true;
1629 }
1630
1631 if (!list_empty(&c->reserved_buffers)) {
1632 b = list_to_buffer(c->reserved_buffers.next);
1633 list_del(&b->lru.list);
1634 c->need_reserved_buffers++;
1635
1636 return b;
1637 }
1638
1639 b = __get_unclaimed_buffer(c);
1640 if (b)
1641 return b;
1642
1643 __wait_for_free_buffer(c);
1644 }
1645 }
1646
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)1647 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1648 {
1649 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1650
1651 if (!b)
1652 return NULL;
1653
1654 if (c->alloc_callback)
1655 c->alloc_callback(b);
1656
1657 return b;
1658 }
1659
1660 /*
1661 * Free a buffer and wake other threads waiting for free buffers.
1662 */
__free_buffer_wake(struct dm_buffer * b)1663 static void __free_buffer_wake(struct dm_buffer *b)
1664 {
1665 struct dm_bufio_client *c = b->c;
1666
1667 b->block = -1;
1668 if (!c->need_reserved_buffers)
1669 free_buffer(b);
1670 else {
1671 list_add(&b->lru.list, &c->reserved_buffers);
1672 c->need_reserved_buffers--;
1673 }
1674
1675 /*
1676 * We hold the bufio lock here, so no one can add entries to the
1677 * wait queue anyway.
1678 */
1679 if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1680 wake_up(&c->free_buffer_wait);
1681 }
1682
cleaned(struct dm_buffer * b,void * context)1683 static enum evict_result cleaned(struct dm_buffer *b, void *context)
1684 {
1685 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1686 return ER_DONT_EVICT; /* should never happen */
1687
1688 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1689 return ER_DONT_EVICT;
1690 else
1691 return ER_EVICT;
1692 }
1693
__move_clean_buffers(struct dm_bufio_client * c)1694 static void __move_clean_buffers(struct dm_bufio_client *c)
1695 {
1696 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1697 }
1698
1699 struct write_context {
1700 int no_wait;
1701 struct list_head *write_list;
1702 };
1703
write_one(struct dm_buffer * b,void * context)1704 static enum it_action write_one(struct dm_buffer *b, void *context)
1705 {
1706 struct write_context *wc = context;
1707
1708 if (wc->no_wait && test_bit(B_WRITING, &b->state))
1709 return IT_COMPLETE;
1710
1711 __write_dirty_buffer(b, wc->write_list);
1712 return IT_NEXT;
1713 }
1714
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)1715 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1716 struct list_head *write_list)
1717 {
1718 struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1719
1720 __move_clean_buffers(c);
1721 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1722 }
1723
1724 /*
1725 * Check if we're over watermark.
1726 * If we are over threshold_buffers, start freeing buffers.
1727 * If we're over "limit_buffers", block until we get under the limit.
1728 */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)1729 static void __check_watermark(struct dm_bufio_client *c,
1730 struct list_head *write_list)
1731 {
1732 if (cache_count(&c->cache, LIST_DIRTY) >
1733 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1734 __write_dirty_buffers_async(c, 1, write_list);
1735 }
1736
1737 /*
1738 *--------------------------------------------------------------
1739 * Getting a buffer
1740 *--------------------------------------------------------------
1741 */
1742
cache_put_and_wake(struct dm_bufio_client * c,struct dm_buffer * b)1743 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1744 {
1745 /*
1746 * Relying on waitqueue_active() is racey, but we sleep
1747 * with schedule_timeout anyway.
1748 */
1749 if (cache_put(&c->cache, b) &&
1750 unlikely(waitqueue_active(&c->free_buffer_wait)))
1751 wake_up(&c->free_buffer_wait);
1752 }
1753
1754 /*
1755 * This assumes you have already checked the cache to see if the buffer
1756 * is already present (it will recheck after dropping the lock for allocation).
1757 */
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)1758 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1759 enum new_flag nf, int *need_submit,
1760 struct list_head *write_list)
1761 {
1762 struct dm_buffer *b, *new_b = NULL;
1763
1764 *need_submit = 0;
1765
1766 /* This can't be called with NF_GET */
1767 if (WARN_ON_ONCE(nf == NF_GET))
1768 return NULL;
1769
1770 new_b = __alloc_buffer_wait(c, nf);
1771 if (!new_b)
1772 return NULL;
1773
1774 /*
1775 * We've had a period where the mutex was unlocked, so need to
1776 * recheck the buffer tree.
1777 */
1778 b = cache_get(&c->cache, block);
1779 if (b) {
1780 __free_buffer_wake(new_b);
1781 goto found_buffer;
1782 }
1783
1784 __check_watermark(c, write_list);
1785
1786 b = new_b;
1787 atomic_set(&b->hold_count, 1);
1788 WRITE_ONCE(b->last_accessed, jiffies);
1789 b->block = block;
1790 b->read_error = 0;
1791 b->write_error = 0;
1792 b->list_mode = LIST_CLEAN;
1793
1794 if (nf == NF_FRESH)
1795 b->state = 0;
1796 else {
1797 b->state = 1 << B_READING;
1798 *need_submit = 1;
1799 }
1800
1801 /*
1802 * We mustn't insert into the cache until the B_READING state
1803 * is set. Otherwise another thread could get it and use
1804 * it before it had been read.
1805 */
1806 cache_insert(&c->cache, b);
1807
1808 return b;
1809
1810 found_buffer:
1811 if (nf == NF_PREFETCH) {
1812 cache_put_and_wake(c, b);
1813 return NULL;
1814 }
1815
1816 /*
1817 * Note: it is essential that we don't wait for the buffer to be
1818 * read if dm_bufio_get function is used. Both dm_bufio_get and
1819 * dm_bufio_prefetch can be used in the driver request routine.
1820 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1821 * the same buffer, it would deadlock if we waited.
1822 */
1823 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1824 cache_put_and_wake(c, b);
1825 return NULL;
1826 }
1827
1828 return b;
1829 }
1830
1831 /*
1832 * The endio routine for reading: set the error, clear the bit and wake up
1833 * anyone waiting on the buffer.
1834 */
read_endio(struct dm_buffer * b,blk_status_t status)1835 static void read_endio(struct dm_buffer *b, blk_status_t status)
1836 {
1837 b->read_error = status;
1838
1839 BUG_ON(!test_bit(B_READING, &b->state));
1840
1841 smp_mb__before_atomic();
1842 clear_bit(B_READING, &b->state);
1843 smp_mb__after_atomic();
1844
1845 wake_up_bit(&b->state, B_READING);
1846 }
1847
1848 /*
1849 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1850 * functions is similar except that dm_bufio_new doesn't read the
1851 * buffer from the disk (assuming that the caller overwrites all the data
1852 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1853 */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)1854 static void *new_read(struct dm_bufio_client *c, sector_t block,
1855 enum new_flag nf, struct dm_buffer **bp)
1856 {
1857 int need_submit = 0;
1858 struct dm_buffer *b;
1859
1860 LIST_HEAD(write_list);
1861
1862 *bp = NULL;
1863
1864 /*
1865 * Fast path, hopefully the block is already in the cache. No need
1866 * to get the client lock for this.
1867 */
1868 b = cache_get(&c->cache, block);
1869 if (b) {
1870 if (nf == NF_PREFETCH) {
1871 cache_put_and_wake(c, b);
1872 return NULL;
1873 }
1874
1875 /*
1876 * Note: it is essential that we don't wait for the buffer to be
1877 * read if dm_bufio_get function is used. Both dm_bufio_get and
1878 * dm_bufio_prefetch can be used in the driver request routine.
1879 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1880 * the same buffer, it would deadlock if we waited.
1881 */
1882 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1883 cache_put_and_wake(c, b);
1884 return NULL;
1885 }
1886 }
1887
1888 if (!b) {
1889 if (nf == NF_GET)
1890 return NULL;
1891
1892 dm_bufio_lock(c);
1893 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1894 dm_bufio_unlock(c);
1895 }
1896
1897 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1898 if (b && (atomic_read(&b->hold_count) == 1))
1899 buffer_record_stack(b);
1900 #endif
1901
1902 __flush_write_list(&write_list);
1903
1904 if (!b)
1905 return NULL;
1906
1907 if (need_submit)
1908 submit_io(b, REQ_OP_READ, read_endio);
1909
1910 if (nf != NF_GET) /* we already tested this condition above */
1911 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1912
1913 if (b->read_error) {
1914 int error = blk_status_to_errno(b->read_error);
1915
1916 dm_bufio_release(b);
1917
1918 return ERR_PTR(error);
1919 }
1920
1921 *bp = b;
1922
1923 return b->data;
1924 }
1925
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1926 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1927 struct dm_buffer **bp)
1928 {
1929 return new_read(c, block, NF_GET, bp);
1930 }
1931 EXPORT_SYMBOL_GPL(dm_bufio_get);
1932
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1933 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1934 struct dm_buffer **bp)
1935 {
1936 if (WARN_ON_ONCE(dm_bufio_in_request()))
1937 return ERR_PTR(-EINVAL);
1938
1939 return new_read(c, block, NF_READ, bp);
1940 }
1941 EXPORT_SYMBOL_GPL(dm_bufio_read);
1942
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1943 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1944 struct dm_buffer **bp)
1945 {
1946 if (WARN_ON_ONCE(dm_bufio_in_request()))
1947 return ERR_PTR(-EINVAL);
1948
1949 return new_read(c, block, NF_FRESH, bp);
1950 }
1951 EXPORT_SYMBOL_GPL(dm_bufio_new);
1952
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks)1953 void dm_bufio_prefetch(struct dm_bufio_client *c,
1954 sector_t block, unsigned int n_blocks)
1955 {
1956 struct blk_plug plug;
1957
1958 LIST_HEAD(write_list);
1959
1960 if (WARN_ON_ONCE(dm_bufio_in_request()))
1961 return; /* should never happen */
1962
1963 blk_start_plug(&plug);
1964
1965 for (; n_blocks--; block++) {
1966 int need_submit;
1967 struct dm_buffer *b;
1968
1969 b = cache_get(&c->cache, block);
1970 if (b) {
1971 /* already in cache */
1972 cache_put_and_wake(c, b);
1973 continue;
1974 }
1975
1976 dm_bufio_lock(c);
1977 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1978 &write_list);
1979 if (unlikely(!list_empty(&write_list))) {
1980 dm_bufio_unlock(c);
1981 blk_finish_plug(&plug);
1982 __flush_write_list(&write_list);
1983 blk_start_plug(&plug);
1984 dm_bufio_lock(c);
1985 }
1986 if (unlikely(b != NULL)) {
1987 dm_bufio_unlock(c);
1988
1989 if (need_submit)
1990 submit_io(b, REQ_OP_READ, read_endio);
1991 dm_bufio_release(b);
1992
1993 cond_resched();
1994
1995 if (!n_blocks)
1996 goto flush_plug;
1997 dm_bufio_lock(c);
1998 }
1999 dm_bufio_unlock(c);
2000 }
2001
2002 flush_plug:
2003 blk_finish_plug(&plug);
2004 }
2005 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2006
dm_bufio_release(struct dm_buffer * b)2007 void dm_bufio_release(struct dm_buffer *b)
2008 {
2009 struct dm_bufio_client *c = b->c;
2010
2011 /*
2012 * If there were errors on the buffer, and the buffer is not
2013 * to be written, free the buffer. There is no point in caching
2014 * invalid buffer.
2015 */
2016 if ((b->read_error || b->write_error) &&
2017 !test_bit_acquire(B_READING, &b->state) &&
2018 !test_bit(B_WRITING, &b->state) &&
2019 !test_bit(B_DIRTY, &b->state)) {
2020 dm_bufio_lock(c);
2021
2022 /* cache remove can fail if there are other holders */
2023 if (cache_remove(&c->cache, b)) {
2024 __free_buffer_wake(b);
2025 dm_bufio_unlock(c);
2026 return;
2027 }
2028
2029 dm_bufio_unlock(c);
2030 }
2031
2032 cache_put_and_wake(c, b);
2033 }
2034 EXPORT_SYMBOL_GPL(dm_bufio_release);
2035
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned int start,unsigned int end)2036 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2037 unsigned int start, unsigned int end)
2038 {
2039 struct dm_bufio_client *c = b->c;
2040
2041 BUG_ON(start >= end);
2042 BUG_ON(end > b->c->block_size);
2043
2044 dm_bufio_lock(c);
2045
2046 BUG_ON(test_bit(B_READING, &b->state));
2047
2048 if (!test_and_set_bit(B_DIRTY, &b->state)) {
2049 b->dirty_start = start;
2050 b->dirty_end = end;
2051 cache_mark(&c->cache, b, LIST_DIRTY);
2052 } else {
2053 if (start < b->dirty_start)
2054 b->dirty_start = start;
2055 if (end > b->dirty_end)
2056 b->dirty_end = end;
2057 }
2058
2059 dm_bufio_unlock(c);
2060 }
2061 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2062
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)2063 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2064 {
2065 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2066 }
2067 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2068
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)2069 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2070 {
2071 LIST_HEAD(write_list);
2072
2073 if (WARN_ON_ONCE(dm_bufio_in_request()))
2074 return; /* should never happen */
2075
2076 dm_bufio_lock(c);
2077 __write_dirty_buffers_async(c, 0, &write_list);
2078 dm_bufio_unlock(c);
2079 __flush_write_list(&write_list);
2080 }
2081 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2082
2083 /*
2084 * For performance, it is essential that the buffers are written asynchronously
2085 * and simultaneously (so that the block layer can merge the writes) and then
2086 * waited upon.
2087 *
2088 * Finally, we flush hardware disk cache.
2089 */
is_writing(struct lru_entry * e,void * context)2090 static bool is_writing(struct lru_entry *e, void *context)
2091 {
2092 struct dm_buffer *b = le_to_buffer(e);
2093
2094 return test_bit(B_WRITING, &b->state);
2095 }
2096
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)2097 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2098 {
2099 int a, f;
2100 unsigned long nr_buffers;
2101 struct lru_entry *e;
2102 struct lru_iter it;
2103
2104 LIST_HEAD(write_list);
2105
2106 dm_bufio_lock(c);
2107 __write_dirty_buffers_async(c, 0, &write_list);
2108 dm_bufio_unlock(c);
2109 __flush_write_list(&write_list);
2110 dm_bufio_lock(c);
2111
2112 nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2113 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2114 while ((e = lru_iter_next(&it, is_writing, c))) {
2115 struct dm_buffer *b = le_to_buffer(e);
2116 __cache_inc_buffer(b);
2117
2118 BUG_ON(test_bit(B_READING, &b->state));
2119
2120 if (nr_buffers) {
2121 nr_buffers--;
2122 dm_bufio_unlock(c);
2123 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2124 dm_bufio_lock(c);
2125 } else {
2126 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2127 }
2128
2129 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2130 cache_mark(&c->cache, b, LIST_CLEAN);
2131
2132 cache_put_and_wake(c, b);
2133
2134 cond_resched();
2135 }
2136 lru_iter_end(&it);
2137
2138 wake_up(&c->free_buffer_wait);
2139 dm_bufio_unlock(c);
2140
2141 a = xchg(&c->async_write_error, 0);
2142 f = dm_bufio_issue_flush(c);
2143 if (a)
2144 return a;
2145
2146 return f;
2147 }
2148 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2149
2150 /*
2151 * Use dm-io to send an empty barrier to flush the device.
2152 */
dm_bufio_issue_flush(struct dm_bufio_client * c)2153 int dm_bufio_issue_flush(struct dm_bufio_client *c)
2154 {
2155 struct dm_io_request io_req = {
2156 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2157 .mem.type = DM_IO_KMEM,
2158 .mem.ptr.addr = NULL,
2159 .client = c->dm_io,
2160 };
2161 struct dm_io_region io_reg = {
2162 .bdev = c->bdev,
2163 .sector = 0,
2164 .count = 0,
2165 };
2166
2167 if (WARN_ON_ONCE(dm_bufio_in_request()))
2168 return -EINVAL;
2169
2170 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2171 }
2172 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2173
2174 /*
2175 * Use dm-io to send a discard request to flush the device.
2176 */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)2177 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2178 {
2179 struct dm_io_request io_req = {
2180 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2181 .mem.type = DM_IO_KMEM,
2182 .mem.ptr.addr = NULL,
2183 .client = c->dm_io,
2184 };
2185 struct dm_io_region io_reg = {
2186 .bdev = c->bdev,
2187 .sector = block_to_sector(c, block),
2188 .count = block_to_sector(c, count),
2189 };
2190
2191 if (WARN_ON_ONCE(dm_bufio_in_request()))
2192 return -EINVAL; /* discards are optional */
2193
2194 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2195 }
2196 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2197
forget_buffer(struct dm_bufio_client * c,sector_t block)2198 static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2199 {
2200 struct dm_buffer *b;
2201
2202 b = cache_get(&c->cache, block);
2203 if (b) {
2204 if (likely(!smp_load_acquire(&b->state))) {
2205 if (cache_remove(&c->cache, b))
2206 __free_buffer_wake(b);
2207 else
2208 cache_put_and_wake(c, b);
2209 } else {
2210 cache_put_and_wake(c, b);
2211 }
2212 }
2213
2214 return b ? true : false;
2215 }
2216
2217 /*
2218 * Free the given buffer.
2219 *
2220 * This is just a hint, if the buffer is in use or dirty, this function
2221 * does nothing.
2222 */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)2223 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2224 {
2225 dm_bufio_lock(c);
2226 forget_buffer(c, block);
2227 dm_bufio_unlock(c);
2228 }
2229 EXPORT_SYMBOL_GPL(dm_bufio_forget);
2230
idle(struct dm_buffer * b,void * context)2231 static enum evict_result idle(struct dm_buffer *b, void *context)
2232 {
2233 return b->state ? ER_DONT_EVICT : ER_EVICT;
2234 }
2235
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)2236 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2237 {
2238 dm_bufio_lock(c);
2239 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2240 dm_bufio_unlock(c);
2241 }
2242 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2243
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned int n)2244 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2245 {
2246 c->minimum_buffers = n;
2247 }
2248 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2249
dm_bufio_get_block_size(struct dm_bufio_client * c)2250 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2251 {
2252 return c->block_size;
2253 }
2254 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2255
dm_bufio_get_device_size(struct dm_bufio_client * c)2256 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2257 {
2258 sector_t s = bdev_nr_sectors(c->bdev);
2259
2260 if (s >= c->start)
2261 s -= c->start;
2262 else
2263 s = 0;
2264 if (likely(c->sectors_per_block_bits >= 0))
2265 s >>= c->sectors_per_block_bits;
2266 else
2267 sector_div(s, c->block_size >> SECTOR_SHIFT);
2268 return s;
2269 }
2270 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2271
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)2272 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2273 {
2274 return c->dm_io;
2275 }
2276 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2277
dm_bufio_get_block_number(struct dm_buffer * b)2278 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2279 {
2280 return b->block;
2281 }
2282 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2283
dm_bufio_get_block_data(struct dm_buffer * b)2284 void *dm_bufio_get_block_data(struct dm_buffer *b)
2285 {
2286 return b->data;
2287 }
2288 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2289
dm_bufio_get_aux_data(struct dm_buffer * b)2290 void *dm_bufio_get_aux_data(struct dm_buffer *b)
2291 {
2292 return b + 1;
2293 }
2294 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2295
dm_bufio_get_client(struct dm_buffer * b)2296 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2297 {
2298 return b->c;
2299 }
2300 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2301
warn_leak(struct dm_buffer * b,void * context)2302 static enum it_action warn_leak(struct dm_buffer *b, void *context)
2303 {
2304 bool *warned = context;
2305
2306 WARN_ON(!(*warned));
2307 *warned = true;
2308 DMERR("leaked buffer %llx, hold count %u, list %d",
2309 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2310 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2311 stack_trace_print(b->stack_entries, b->stack_len, 1);
2312 /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2313 atomic_set(&b->hold_count, 0);
2314 #endif
2315 return IT_NEXT;
2316 }
2317
drop_buffers(struct dm_bufio_client * c)2318 static void drop_buffers(struct dm_bufio_client *c)
2319 {
2320 int i;
2321 struct dm_buffer *b;
2322
2323 if (WARN_ON(dm_bufio_in_request()))
2324 return; /* should never happen */
2325
2326 /*
2327 * An optimization so that the buffers are not written one-by-one.
2328 */
2329 dm_bufio_write_dirty_buffers_async(c);
2330
2331 dm_bufio_lock(c);
2332
2333 while ((b = __get_unclaimed_buffer(c)))
2334 __free_buffer_wake(b);
2335
2336 for (i = 0; i < LIST_SIZE; i++) {
2337 bool warned = false;
2338
2339 cache_iterate(&c->cache, i, warn_leak, &warned);
2340 }
2341
2342 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2343 while ((b = __get_unclaimed_buffer(c)))
2344 __free_buffer_wake(b);
2345 #endif
2346
2347 for (i = 0; i < LIST_SIZE; i++)
2348 WARN_ON(cache_count(&c->cache, i));
2349
2350 dm_bufio_unlock(c);
2351 }
2352
get_retain_buffers(struct dm_bufio_client * c)2353 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2354 {
2355 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2356
2357 if (likely(c->sectors_per_block_bits >= 0))
2358 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2359 else
2360 retain_bytes /= c->block_size;
2361
2362 return retain_bytes;
2363 }
2364
__scan(struct dm_bufio_client * c)2365 static void __scan(struct dm_bufio_client *c)
2366 {
2367 int l;
2368 struct dm_buffer *b;
2369 unsigned long freed = 0;
2370 unsigned long retain_target = get_retain_buffers(c);
2371 unsigned long count = cache_total(&c->cache);
2372
2373 for (l = 0; l < LIST_SIZE; l++) {
2374 while (true) {
2375 if (count - freed <= retain_target)
2376 atomic_long_set(&c->need_shrink, 0);
2377 if (!atomic_long_read(&c->need_shrink))
2378 break;
2379
2380 b = cache_evict(&c->cache, l,
2381 l == LIST_CLEAN ? is_clean : is_dirty, c);
2382 if (!b)
2383 break;
2384
2385 __make_buffer_clean(b);
2386 __free_buffer_wake(b);
2387
2388 atomic_long_dec(&c->need_shrink);
2389 freed++;
2390 cond_resched();
2391 }
2392 }
2393 }
2394
shrink_work(struct work_struct * w)2395 static void shrink_work(struct work_struct *w)
2396 {
2397 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2398
2399 dm_bufio_lock(c);
2400 __scan(c);
2401 dm_bufio_unlock(c);
2402 }
2403
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)2404 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2405 {
2406 struct dm_bufio_client *c;
2407
2408 c = container_of(shrink, struct dm_bufio_client, shrinker);
2409 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2410 queue_work(dm_bufio_wq, &c->shrink_work);
2411
2412 return sc->nr_to_scan;
2413 }
2414
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)2415 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2416 {
2417 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
2418 unsigned long count = cache_total(&c->cache);
2419 unsigned long retain_target = get_retain_buffers(c);
2420 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2421
2422 if (unlikely(count < retain_target))
2423 count = 0;
2424 else
2425 count -= retain_target;
2426
2427 if (unlikely(count < queued_for_cleanup))
2428 count = 0;
2429 else
2430 count -= queued_for_cleanup;
2431
2432 return count;
2433 }
2434
2435 /*
2436 * Create the buffering interface
2437 */
dm_bufio_client_create(struct block_device * bdev,unsigned int block_size,unsigned int reserved_buffers,unsigned int aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *),unsigned int flags)2438 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2439 unsigned int reserved_buffers, unsigned int aux_size,
2440 void (*alloc_callback)(struct dm_buffer *),
2441 void (*write_callback)(struct dm_buffer *),
2442 unsigned int flags)
2443 {
2444 int r;
2445 unsigned int num_locks;
2446 struct dm_bufio_client *c;
2447 char slab_name[64];
2448 static atomic_t seqno = ATOMIC_INIT(0);
2449
2450 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2451 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2452 r = -EINVAL;
2453 goto bad_client;
2454 }
2455
2456 num_locks = dm_num_hash_locks();
2457 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2458 if (!c) {
2459 r = -ENOMEM;
2460 goto bad_client;
2461 }
2462 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2463
2464 c->bdev = bdev;
2465 c->block_size = block_size;
2466 if (is_power_of_2(block_size))
2467 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2468 else
2469 c->sectors_per_block_bits = -1;
2470
2471 c->alloc_callback = alloc_callback;
2472 c->write_callback = write_callback;
2473
2474 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2475 c->no_sleep = true;
2476 static_branch_inc(&no_sleep_enabled);
2477 }
2478
2479 mutex_init(&c->lock);
2480 spin_lock_init(&c->spinlock);
2481 INIT_LIST_HEAD(&c->reserved_buffers);
2482 c->need_reserved_buffers = reserved_buffers;
2483
2484 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2485
2486 init_waitqueue_head(&c->free_buffer_wait);
2487 c->async_write_error = 0;
2488
2489 c->dm_io = dm_io_client_create();
2490 if (IS_ERR(c->dm_io)) {
2491 r = PTR_ERR(c->dm_io);
2492 goto bad_dm_io;
2493 }
2494
2495 if (block_size <= KMALLOC_MAX_SIZE &&
2496 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
2497 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2498
2499 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
2500 block_size, atomic_inc_return(&seqno));
2501 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2502 SLAB_RECLAIM_ACCOUNT, NULL);
2503 if (!c->slab_cache) {
2504 r = -ENOMEM;
2505 goto bad;
2506 }
2507 }
2508 if (aux_size)
2509 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
2510 aux_size, atomic_inc_return(&seqno));
2511 else
2512 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
2513 atomic_inc_return(&seqno));
2514 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2515 0, SLAB_RECLAIM_ACCOUNT, NULL);
2516 if (!c->slab_buffer) {
2517 r = -ENOMEM;
2518 goto bad;
2519 }
2520
2521 while (c->need_reserved_buffers) {
2522 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2523
2524 if (!b) {
2525 r = -ENOMEM;
2526 goto bad;
2527 }
2528 __free_buffer_wake(b);
2529 }
2530
2531 INIT_WORK(&c->shrink_work, shrink_work);
2532 atomic_long_set(&c->need_shrink, 0);
2533
2534 c->shrinker.count_objects = dm_bufio_shrink_count;
2535 c->shrinker.scan_objects = dm_bufio_shrink_scan;
2536 c->shrinker.seeks = 1;
2537 c->shrinker.batch = 0;
2538 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
2539 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2540 if (r)
2541 goto bad;
2542
2543 mutex_lock(&dm_bufio_clients_lock);
2544 dm_bufio_client_count++;
2545 list_add(&c->client_list, &dm_bufio_all_clients);
2546 __cache_size_refresh();
2547 mutex_unlock(&dm_bufio_clients_lock);
2548
2549 return c;
2550
2551 bad:
2552 while (!list_empty(&c->reserved_buffers)) {
2553 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2554
2555 list_del(&b->lru.list);
2556 free_buffer(b);
2557 }
2558 kmem_cache_destroy(c->slab_cache);
2559 kmem_cache_destroy(c->slab_buffer);
2560 dm_io_client_destroy(c->dm_io);
2561 bad_dm_io:
2562 mutex_destroy(&c->lock);
2563 if (c->no_sleep)
2564 static_branch_dec(&no_sleep_enabled);
2565 kfree(c);
2566 bad_client:
2567 return ERR_PTR(r);
2568 }
2569 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2570
2571 /*
2572 * Free the buffering interface.
2573 * It is required that there are no references on any buffers.
2574 */
dm_bufio_client_destroy(struct dm_bufio_client * c)2575 void dm_bufio_client_destroy(struct dm_bufio_client *c)
2576 {
2577 unsigned int i;
2578
2579 drop_buffers(c);
2580
2581 unregister_shrinker(&c->shrinker);
2582 flush_work(&c->shrink_work);
2583
2584 mutex_lock(&dm_bufio_clients_lock);
2585
2586 list_del(&c->client_list);
2587 dm_bufio_client_count--;
2588 __cache_size_refresh();
2589
2590 mutex_unlock(&dm_bufio_clients_lock);
2591
2592 WARN_ON(c->need_reserved_buffers);
2593
2594 while (!list_empty(&c->reserved_buffers)) {
2595 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2596
2597 list_del(&b->lru.list);
2598 free_buffer(b);
2599 }
2600
2601 for (i = 0; i < LIST_SIZE; i++)
2602 if (cache_count(&c->cache, i))
2603 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2604
2605 for (i = 0; i < LIST_SIZE; i++)
2606 WARN_ON(cache_count(&c->cache, i));
2607
2608 cache_destroy(&c->cache);
2609 kmem_cache_destroy(c->slab_cache);
2610 kmem_cache_destroy(c->slab_buffer);
2611 dm_io_client_destroy(c->dm_io);
2612 mutex_destroy(&c->lock);
2613 if (c->no_sleep)
2614 static_branch_dec(&no_sleep_enabled);
2615 kfree(c);
2616 }
2617 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2618
dm_bufio_client_reset(struct dm_bufio_client * c)2619 void dm_bufio_client_reset(struct dm_bufio_client *c)
2620 {
2621 drop_buffers(c);
2622 flush_work(&c->shrink_work);
2623 }
2624 EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2625
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)2626 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2627 {
2628 c->start = start;
2629 }
2630 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2631
2632 /*--------------------------------------------------------------*/
2633
get_max_age_hz(void)2634 static unsigned int get_max_age_hz(void)
2635 {
2636 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2637
2638 if (max_age > UINT_MAX / HZ)
2639 max_age = UINT_MAX / HZ;
2640
2641 return max_age * HZ;
2642 }
2643
older_than(struct dm_buffer * b,unsigned long age_hz)2644 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2645 {
2646 return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2647 }
2648
2649 struct evict_params {
2650 gfp_t gfp;
2651 unsigned long age_hz;
2652
2653 /*
2654 * This gets updated with the largest last_accessed (ie. most
2655 * recently used) of the evicted buffers. It will not be reinitialised
2656 * by __evict_many(), so you can use it across multiple invocations.
2657 */
2658 unsigned long last_accessed;
2659 };
2660
2661 /*
2662 * We may not be able to evict this buffer if IO pending or the client
2663 * is still using it.
2664 *
2665 * And if GFP_NOFS is used, we must not do any I/O because we hold
2666 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2667 * rerouted to different bufio client.
2668 */
select_for_evict(struct dm_buffer * b,void * context)2669 static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2670 {
2671 struct evict_params *params = context;
2672
2673 if (!(params->gfp & __GFP_FS) ||
2674 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2675 if (test_bit_acquire(B_READING, &b->state) ||
2676 test_bit(B_WRITING, &b->state) ||
2677 test_bit(B_DIRTY, &b->state))
2678 return ER_DONT_EVICT;
2679 }
2680
2681 return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2682 }
2683
__evict_many(struct dm_bufio_client * c,struct evict_params * params,int list_mode,unsigned long max_count)2684 static unsigned long __evict_many(struct dm_bufio_client *c,
2685 struct evict_params *params,
2686 int list_mode, unsigned long max_count)
2687 {
2688 unsigned long count;
2689 unsigned long last_accessed;
2690 struct dm_buffer *b;
2691
2692 for (count = 0; count < max_count; count++) {
2693 b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2694 if (!b)
2695 break;
2696
2697 last_accessed = READ_ONCE(b->last_accessed);
2698 if (time_after_eq(params->last_accessed, last_accessed))
2699 params->last_accessed = last_accessed;
2700
2701 __make_buffer_clean(b);
2702 __free_buffer_wake(b);
2703
2704 cond_resched();
2705 }
2706
2707 return count;
2708 }
2709
evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)2710 static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2711 {
2712 struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2713 unsigned long retain = get_retain_buffers(c);
2714 unsigned long count;
2715 LIST_HEAD(write_list);
2716
2717 dm_bufio_lock(c);
2718
2719 __check_watermark(c, &write_list);
2720 if (unlikely(!list_empty(&write_list))) {
2721 dm_bufio_unlock(c);
2722 __flush_write_list(&write_list);
2723 dm_bufio_lock(c);
2724 }
2725
2726 count = cache_total(&c->cache);
2727 if (count > retain)
2728 __evict_many(c, ¶ms, LIST_CLEAN, count - retain);
2729
2730 dm_bufio_unlock(c);
2731 }
2732
cleanup_old_buffers(void)2733 static void cleanup_old_buffers(void)
2734 {
2735 unsigned long max_age_hz = get_max_age_hz();
2736 struct dm_bufio_client *c;
2737
2738 mutex_lock(&dm_bufio_clients_lock);
2739
2740 __cache_size_refresh();
2741
2742 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2743 evict_old_buffers(c, max_age_hz);
2744
2745 mutex_unlock(&dm_bufio_clients_lock);
2746 }
2747
work_fn(struct work_struct * w)2748 static void work_fn(struct work_struct *w)
2749 {
2750 cleanup_old_buffers();
2751
2752 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2753 DM_BUFIO_WORK_TIMER_SECS * HZ);
2754 }
2755
2756 /*--------------------------------------------------------------*/
2757
2758 /*
2759 * Global cleanup tries to evict the oldest buffers from across _all_
2760 * the clients. It does this by repeatedly evicting a few buffers from
2761 * the client that holds the oldest buffer. It's approximate, but hopefully
2762 * good enough.
2763 */
__pop_client(void)2764 static struct dm_bufio_client *__pop_client(void)
2765 {
2766 struct list_head *h;
2767
2768 if (list_empty(&dm_bufio_all_clients))
2769 return NULL;
2770
2771 h = dm_bufio_all_clients.next;
2772 list_del(h);
2773 return container_of(h, struct dm_bufio_client, client_list);
2774 }
2775
2776 /*
2777 * Inserts the client in the global client list based on its
2778 * 'oldest_buffer' field.
2779 */
__insert_client(struct dm_bufio_client * new_client)2780 static void __insert_client(struct dm_bufio_client *new_client)
2781 {
2782 struct dm_bufio_client *c;
2783 struct list_head *h = dm_bufio_all_clients.next;
2784
2785 while (h != &dm_bufio_all_clients) {
2786 c = container_of(h, struct dm_bufio_client, client_list);
2787 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2788 break;
2789 h = h->next;
2790 }
2791
2792 list_add_tail(&new_client->client_list, h);
2793 }
2794
__evict_a_few(unsigned long nr_buffers)2795 static unsigned long __evict_a_few(unsigned long nr_buffers)
2796 {
2797 unsigned long count;
2798 struct dm_bufio_client *c;
2799 struct evict_params params = {
2800 .gfp = GFP_KERNEL,
2801 .age_hz = 0,
2802 /* set to jiffies in case there are no buffers in this client */
2803 .last_accessed = jiffies
2804 };
2805
2806 c = __pop_client();
2807 if (!c)
2808 return 0;
2809
2810 dm_bufio_lock(c);
2811 count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers);
2812 dm_bufio_unlock(c);
2813
2814 if (count)
2815 c->oldest_buffer = params.last_accessed;
2816 __insert_client(c);
2817
2818 return count;
2819 }
2820
check_watermarks(void)2821 static void check_watermarks(void)
2822 {
2823 LIST_HEAD(write_list);
2824 struct dm_bufio_client *c;
2825
2826 mutex_lock(&dm_bufio_clients_lock);
2827 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2828 dm_bufio_lock(c);
2829 __check_watermark(c, &write_list);
2830 dm_bufio_unlock(c);
2831 }
2832 mutex_unlock(&dm_bufio_clients_lock);
2833
2834 __flush_write_list(&write_list);
2835 }
2836
evict_old(void)2837 static void evict_old(void)
2838 {
2839 unsigned long threshold = dm_bufio_cache_size -
2840 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2841
2842 mutex_lock(&dm_bufio_clients_lock);
2843 while (dm_bufio_current_allocated > threshold) {
2844 if (!__evict_a_few(64))
2845 break;
2846 cond_resched();
2847 }
2848 mutex_unlock(&dm_bufio_clients_lock);
2849 }
2850
do_global_cleanup(struct work_struct * w)2851 static void do_global_cleanup(struct work_struct *w)
2852 {
2853 check_watermarks();
2854 evict_old();
2855 }
2856
2857 /*
2858 *--------------------------------------------------------------
2859 * Module setup
2860 *--------------------------------------------------------------
2861 */
2862
2863 /*
2864 * This is called only once for the whole dm_bufio module.
2865 * It initializes memory limit.
2866 */
dm_bufio_init(void)2867 static int __init dm_bufio_init(void)
2868 {
2869 __u64 mem;
2870
2871 dm_bufio_allocated_kmem_cache = 0;
2872 dm_bufio_allocated_get_free_pages = 0;
2873 dm_bufio_allocated_vmalloc = 0;
2874 dm_bufio_current_allocated = 0;
2875
2876 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2877 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2878
2879 if (mem > ULONG_MAX)
2880 mem = ULONG_MAX;
2881
2882 #ifdef CONFIG_MMU
2883 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2884 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2885 #endif
2886
2887 dm_bufio_default_cache_size = mem;
2888
2889 mutex_lock(&dm_bufio_clients_lock);
2890 __cache_size_refresh();
2891 mutex_unlock(&dm_bufio_clients_lock);
2892
2893 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2894 if (!dm_bufio_wq)
2895 return -ENOMEM;
2896
2897 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2898 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2899 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2900 DM_BUFIO_WORK_TIMER_SECS * HZ);
2901
2902 return 0;
2903 }
2904
2905 /*
2906 * This is called once when unloading the dm_bufio module.
2907 */
dm_bufio_exit(void)2908 static void __exit dm_bufio_exit(void)
2909 {
2910 int bug = 0;
2911
2912 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2913 destroy_workqueue(dm_bufio_wq);
2914
2915 if (dm_bufio_client_count) {
2916 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2917 __func__, dm_bufio_client_count);
2918 bug = 1;
2919 }
2920
2921 if (dm_bufio_current_allocated) {
2922 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2923 __func__, dm_bufio_current_allocated);
2924 bug = 1;
2925 }
2926
2927 if (dm_bufio_allocated_get_free_pages) {
2928 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2929 __func__, dm_bufio_allocated_get_free_pages);
2930 bug = 1;
2931 }
2932
2933 if (dm_bufio_allocated_vmalloc) {
2934 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2935 __func__, dm_bufio_allocated_vmalloc);
2936 bug = 1;
2937 }
2938
2939 WARN_ON(bug); /* leaks are not worth crashing the system */
2940 }
2941
2942 module_init(dm_bufio_init)
2943 module_exit(dm_bufio_exit)
2944
2945 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2946 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2947
2948 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2949 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2950
2951 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2952 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2953
2954 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2955 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2956
2957 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2958 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2959
2960 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2961 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2962
2963 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2964 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2965
2966 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2967 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2968
2969 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2970 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2971 MODULE_LICENSE("GPL");
2972