xref: /openbmc/linux/drivers/md/dm-bufio.c (revision 4f6cce39)
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bufio.h"
10 
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
21 
22 #define DM_MSG_PREFIX "bufio"
23 
24 /*
25  * Memory management policy:
26  *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27  *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28  *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29  *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30  *	dirty buffers.
31  */
32 #define DM_BUFIO_MIN_BUFFERS		8
33 
34 #define DM_BUFIO_MEMORY_PERCENT		2
35 #define DM_BUFIO_VMALLOC_PERCENT	25
36 #define DM_BUFIO_WRITEBACK_PERCENT	75
37 
38 /*
39  * Check buffer ages in this interval (seconds)
40  */
41 #define DM_BUFIO_WORK_TIMER_SECS	30
42 
43 /*
44  * Free buffers when they are older than this (seconds)
45  */
46 #define DM_BUFIO_DEFAULT_AGE_SECS	300
47 
48 /*
49  * The nr of bytes of cached data to keep around.
50  */
51 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
52 
53 /*
54  * The number of bvec entries that are embedded directly in the buffer.
55  * If the chunk size is larger, dm-io is used to do the io.
56  */
57 #define DM_BUFIO_INLINE_VECS		16
58 
59 /*
60  * Don't try to use kmem_cache_alloc for blocks larger than this.
61  * For explanation, see alloc_buffer_data below.
62  */
63 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT	(PAGE_SIZE >> 1)
64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT	(PAGE_SIZE << (MAX_ORDER - 1))
65 
66 /*
67  * dm_buffer->list_mode
68  */
69 #define LIST_CLEAN	0
70 #define LIST_DIRTY	1
71 #define LIST_SIZE	2
72 
73 /*
74  * Linking of buffers:
75  *	All buffers are linked to cache_hash with their hash_list field.
76  *
77  *	Clean buffers that are not being written (B_WRITING not set)
78  *	are linked to lru[LIST_CLEAN] with their lru_list field.
79  *
80  *	Dirty and clean buffers that are being written are linked to
81  *	lru[LIST_DIRTY] with their lru_list field. When the write
82  *	finishes, the buffer cannot be relinked immediately (because we
83  *	are in an interrupt context and relinking requires process
84  *	context), so some clean-not-writing buffers can be held on
85  *	dirty_lru too.  They are later added to lru in the process
86  *	context.
87  */
88 struct dm_bufio_client {
89 	struct mutex lock;
90 
91 	struct list_head lru[LIST_SIZE];
92 	unsigned long n_buffers[LIST_SIZE];
93 
94 	struct block_device *bdev;
95 	unsigned block_size;
96 	unsigned char sectors_per_block_bits;
97 	unsigned char pages_per_block_bits;
98 	unsigned char blocks_per_page_bits;
99 	unsigned aux_size;
100 	void (*alloc_callback)(struct dm_buffer *);
101 	void (*write_callback)(struct dm_buffer *);
102 
103 	struct dm_io_client *dm_io;
104 
105 	struct list_head reserved_buffers;
106 	unsigned need_reserved_buffers;
107 
108 	unsigned minimum_buffers;
109 
110 	struct rb_root buffer_tree;
111 	wait_queue_head_t free_buffer_wait;
112 
113 	int async_write_error;
114 
115 	struct list_head client_list;
116 	struct shrinker shrinker;
117 };
118 
119 /*
120  * Buffer state bits.
121  */
122 #define B_READING	0
123 #define B_WRITING	1
124 #define B_DIRTY		2
125 
126 /*
127  * Describes how the block was allocated:
128  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
129  * See the comment at alloc_buffer_data.
130  */
131 enum data_mode {
132 	DATA_MODE_SLAB = 0,
133 	DATA_MODE_GET_FREE_PAGES = 1,
134 	DATA_MODE_VMALLOC = 2,
135 	DATA_MODE_LIMIT = 3
136 };
137 
138 struct dm_buffer {
139 	struct rb_node node;
140 	struct list_head lru_list;
141 	sector_t block;
142 	void *data;
143 	enum data_mode data_mode;
144 	unsigned char list_mode;		/* LIST_* */
145 	unsigned hold_count;
146 	int read_error;
147 	int write_error;
148 	unsigned long state;
149 	unsigned long last_accessed;
150 	struct dm_bufio_client *c;
151 	struct list_head write_list;
152 	struct bio bio;
153 	struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
154 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
155 #define MAX_STACK 10
156 	struct stack_trace stack_trace;
157 	unsigned long stack_entries[MAX_STACK];
158 #endif
159 };
160 
161 /*----------------------------------------------------------------*/
162 
163 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
164 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
165 
166 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
167 {
168 	unsigned ret = c->blocks_per_page_bits - 1;
169 
170 	BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
171 
172 	return ret;
173 }
174 
175 #define DM_BUFIO_CACHE(c)	(dm_bufio_caches[dm_bufio_cache_index(c)])
176 #define DM_BUFIO_CACHE_NAME(c)	(dm_bufio_cache_names[dm_bufio_cache_index(c)])
177 
178 #define dm_bufio_in_request()	(!!current->bio_list)
179 
180 static void dm_bufio_lock(struct dm_bufio_client *c)
181 {
182 	mutex_lock_nested(&c->lock, dm_bufio_in_request());
183 }
184 
185 static int dm_bufio_trylock(struct dm_bufio_client *c)
186 {
187 	return mutex_trylock(&c->lock);
188 }
189 
190 static void dm_bufio_unlock(struct dm_bufio_client *c)
191 {
192 	mutex_unlock(&c->lock);
193 }
194 
195 /*----------------------------------------------------------------*/
196 
197 /*
198  * Default cache size: available memory divided by the ratio.
199  */
200 static unsigned long dm_bufio_default_cache_size;
201 
202 /*
203  * Total cache size set by the user.
204  */
205 static unsigned long dm_bufio_cache_size;
206 
207 /*
208  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
209  * at any time.  If it disagrees, the user has changed cache size.
210  */
211 static unsigned long dm_bufio_cache_size_latch;
212 
213 static DEFINE_SPINLOCK(param_spinlock);
214 
215 /*
216  * Buffers are freed after this timeout
217  */
218 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
219 static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
220 
221 static unsigned long dm_bufio_peak_allocated;
222 static unsigned long dm_bufio_allocated_kmem_cache;
223 static unsigned long dm_bufio_allocated_get_free_pages;
224 static unsigned long dm_bufio_allocated_vmalloc;
225 static unsigned long dm_bufio_current_allocated;
226 
227 /*----------------------------------------------------------------*/
228 
229 /*
230  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
231  */
232 static unsigned long dm_bufio_cache_size_per_client;
233 
234 /*
235  * The current number of clients.
236  */
237 static int dm_bufio_client_count;
238 
239 /*
240  * The list of all clients.
241  */
242 static LIST_HEAD(dm_bufio_all_clients);
243 
244 /*
245  * This mutex protects dm_bufio_cache_size_latch,
246  * dm_bufio_cache_size_per_client and dm_bufio_client_count
247  */
248 static DEFINE_MUTEX(dm_bufio_clients_lock);
249 
250 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
251 static void buffer_record_stack(struct dm_buffer *b)
252 {
253 	b->stack_trace.nr_entries = 0;
254 	b->stack_trace.max_entries = MAX_STACK;
255 	b->stack_trace.entries = b->stack_entries;
256 	b->stack_trace.skip = 2;
257 	save_stack_trace(&b->stack_trace);
258 }
259 #endif
260 
261 /*----------------------------------------------------------------
262  * A red/black tree acts as an index for all the buffers.
263  *--------------------------------------------------------------*/
264 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
265 {
266 	struct rb_node *n = c->buffer_tree.rb_node;
267 	struct dm_buffer *b;
268 
269 	while (n) {
270 		b = container_of(n, struct dm_buffer, node);
271 
272 		if (b->block == block)
273 			return b;
274 
275 		n = (b->block < block) ? n->rb_left : n->rb_right;
276 	}
277 
278 	return NULL;
279 }
280 
281 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
282 {
283 	struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
284 	struct dm_buffer *found;
285 
286 	while (*new) {
287 		found = container_of(*new, struct dm_buffer, node);
288 
289 		if (found->block == b->block) {
290 			BUG_ON(found != b);
291 			return;
292 		}
293 
294 		parent = *new;
295 		new = (found->block < b->block) ?
296 			&((*new)->rb_left) : &((*new)->rb_right);
297 	}
298 
299 	rb_link_node(&b->node, parent, new);
300 	rb_insert_color(&b->node, &c->buffer_tree);
301 }
302 
303 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
304 {
305 	rb_erase(&b->node, &c->buffer_tree);
306 }
307 
308 /*----------------------------------------------------------------*/
309 
310 static void adjust_total_allocated(enum data_mode data_mode, long diff)
311 {
312 	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
313 		&dm_bufio_allocated_kmem_cache,
314 		&dm_bufio_allocated_get_free_pages,
315 		&dm_bufio_allocated_vmalloc,
316 	};
317 
318 	spin_lock(&param_spinlock);
319 
320 	*class_ptr[data_mode] += diff;
321 
322 	dm_bufio_current_allocated += diff;
323 
324 	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
325 		dm_bufio_peak_allocated = dm_bufio_current_allocated;
326 
327 	spin_unlock(&param_spinlock);
328 }
329 
330 /*
331  * Change the number of clients and recalculate per-client limit.
332  */
333 static void __cache_size_refresh(void)
334 {
335 	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
336 	BUG_ON(dm_bufio_client_count < 0);
337 
338 	dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
339 
340 	/*
341 	 * Use default if set to 0 and report the actual cache size used.
342 	 */
343 	if (!dm_bufio_cache_size_latch) {
344 		(void)cmpxchg(&dm_bufio_cache_size, 0,
345 			      dm_bufio_default_cache_size);
346 		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
347 	}
348 
349 	dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
350 					 (dm_bufio_client_count ? : 1);
351 }
352 
353 /*
354  * Allocating buffer data.
355  *
356  * Small buffers are allocated with kmem_cache, to use space optimally.
357  *
358  * For large buffers, we choose between get_free_pages and vmalloc.
359  * Each has advantages and disadvantages.
360  *
361  * __get_free_pages can randomly fail if the memory is fragmented.
362  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
363  * as low as 128M) so using it for caching is not appropriate.
364  *
365  * If the allocation may fail we use __get_free_pages. Memory fragmentation
366  * won't have a fatal effect here, but it just causes flushes of some other
367  * buffers and more I/O will be performed. Don't use __get_free_pages if it
368  * always fails (i.e. order >= MAX_ORDER).
369  *
370  * If the allocation shouldn't fail we use __vmalloc. This is only for the
371  * initial reserve allocation, so there's no risk of wasting all vmalloc
372  * space.
373  */
374 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
375 			       enum data_mode *data_mode)
376 {
377 	unsigned noio_flag;
378 	void *ptr;
379 
380 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
381 		*data_mode = DATA_MODE_SLAB;
382 		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
383 	}
384 
385 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
386 	    gfp_mask & __GFP_NORETRY) {
387 		*data_mode = DATA_MODE_GET_FREE_PAGES;
388 		return (void *)__get_free_pages(gfp_mask,
389 						c->pages_per_block_bits);
390 	}
391 
392 	*data_mode = DATA_MODE_VMALLOC;
393 
394 	/*
395 	 * __vmalloc allocates the data pages and auxiliary structures with
396 	 * gfp_flags that were specified, but pagetables are always allocated
397 	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
398 	 *
399 	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
400 	 * all allocations done by this process (including pagetables) are done
401 	 * as if GFP_NOIO was specified.
402 	 */
403 
404 	if (gfp_mask & __GFP_NORETRY)
405 		noio_flag = memalloc_noio_save();
406 
407 	ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
408 
409 	if (gfp_mask & __GFP_NORETRY)
410 		memalloc_noio_restore(noio_flag);
411 
412 	return ptr;
413 }
414 
415 /*
416  * Free buffer's data.
417  */
418 static void free_buffer_data(struct dm_bufio_client *c,
419 			     void *data, enum data_mode data_mode)
420 {
421 	switch (data_mode) {
422 	case DATA_MODE_SLAB:
423 		kmem_cache_free(DM_BUFIO_CACHE(c), data);
424 		break;
425 
426 	case DATA_MODE_GET_FREE_PAGES:
427 		free_pages((unsigned long)data, c->pages_per_block_bits);
428 		break;
429 
430 	case DATA_MODE_VMALLOC:
431 		vfree(data);
432 		break;
433 
434 	default:
435 		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
436 		       data_mode);
437 		BUG();
438 	}
439 }
440 
441 /*
442  * Allocate buffer and its data.
443  */
444 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
445 {
446 	struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
447 				      gfp_mask);
448 
449 	if (!b)
450 		return NULL;
451 
452 	b->c = c;
453 
454 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
455 	if (!b->data) {
456 		kfree(b);
457 		return NULL;
458 	}
459 
460 	adjust_total_allocated(b->data_mode, (long)c->block_size);
461 
462 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
463 	memset(&b->stack_trace, 0, sizeof(b->stack_trace));
464 #endif
465 	return b;
466 }
467 
468 /*
469  * Free buffer and its data.
470  */
471 static void free_buffer(struct dm_buffer *b)
472 {
473 	struct dm_bufio_client *c = b->c;
474 
475 	adjust_total_allocated(b->data_mode, -(long)c->block_size);
476 
477 	free_buffer_data(c, b->data, b->data_mode);
478 	kfree(b);
479 }
480 
481 /*
482  * Link buffer to the hash list and clean or dirty queue.
483  */
484 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
485 {
486 	struct dm_bufio_client *c = b->c;
487 
488 	c->n_buffers[dirty]++;
489 	b->block = block;
490 	b->list_mode = dirty;
491 	list_add(&b->lru_list, &c->lru[dirty]);
492 	__insert(b->c, b);
493 	b->last_accessed = jiffies;
494 }
495 
496 /*
497  * Unlink buffer from the hash list and dirty or clean queue.
498  */
499 static void __unlink_buffer(struct dm_buffer *b)
500 {
501 	struct dm_bufio_client *c = b->c;
502 
503 	BUG_ON(!c->n_buffers[b->list_mode]);
504 
505 	c->n_buffers[b->list_mode]--;
506 	__remove(b->c, b);
507 	list_del(&b->lru_list);
508 }
509 
510 /*
511  * Place the buffer to the head of dirty or clean LRU queue.
512  */
513 static void __relink_lru(struct dm_buffer *b, int dirty)
514 {
515 	struct dm_bufio_client *c = b->c;
516 
517 	BUG_ON(!c->n_buffers[b->list_mode]);
518 
519 	c->n_buffers[b->list_mode]--;
520 	c->n_buffers[dirty]++;
521 	b->list_mode = dirty;
522 	list_move(&b->lru_list, &c->lru[dirty]);
523 	b->last_accessed = jiffies;
524 }
525 
526 /*----------------------------------------------------------------
527  * Submit I/O on the buffer.
528  *
529  * Bio interface is faster but it has some problems:
530  *	the vector list is limited (increasing this limit increases
531  *	memory-consumption per buffer, so it is not viable);
532  *
533  *	the memory must be direct-mapped, not vmalloced;
534  *
535  *	the I/O driver can reject requests spuriously if it thinks that
536  *	the requests are too big for the device or if they cross a
537  *	controller-defined memory boundary.
538  *
539  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
540  * it is not vmalloced, try using the bio interface.
541  *
542  * If the buffer is big, if it is vmalloced or if the underlying device
543  * rejects the bio because it is too large, use dm-io layer to do the I/O.
544  * The dm-io layer splits the I/O into multiple requests, avoiding the above
545  * shortcomings.
546  *--------------------------------------------------------------*/
547 
548 /*
549  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
550  * that the request was handled directly with bio interface.
551  */
552 static void dmio_complete(unsigned long error, void *context)
553 {
554 	struct dm_buffer *b = context;
555 
556 	b->bio.bi_error = error ? -EIO : 0;
557 	b->bio.bi_end_io(&b->bio);
558 }
559 
560 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
561 		     bio_end_io_t *end_io)
562 {
563 	int r;
564 	struct dm_io_request io_req = {
565 		.bi_op = rw,
566 		.bi_op_flags = 0,
567 		.notify.fn = dmio_complete,
568 		.notify.context = b,
569 		.client = b->c->dm_io,
570 	};
571 	struct dm_io_region region = {
572 		.bdev = b->c->bdev,
573 		.sector = block << b->c->sectors_per_block_bits,
574 		.count = b->c->block_size >> SECTOR_SHIFT,
575 	};
576 
577 	if (b->data_mode != DATA_MODE_VMALLOC) {
578 		io_req.mem.type = DM_IO_KMEM;
579 		io_req.mem.ptr.addr = b->data;
580 	} else {
581 		io_req.mem.type = DM_IO_VMA;
582 		io_req.mem.ptr.vma = b->data;
583 	}
584 
585 	b->bio.bi_end_io = end_io;
586 
587 	r = dm_io(&io_req, 1, &region, NULL);
588 	if (r) {
589 		b->bio.bi_error = r;
590 		end_io(&b->bio);
591 	}
592 }
593 
594 static void inline_endio(struct bio *bio)
595 {
596 	bio_end_io_t *end_fn = bio->bi_private;
597 	int error = bio->bi_error;
598 
599 	/*
600 	 * Reset the bio to free any attached resources
601 	 * (e.g. bio integrity profiles).
602 	 */
603 	bio_reset(bio);
604 
605 	bio->bi_error = error;
606 	end_fn(bio);
607 }
608 
609 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
610 			   bio_end_io_t *end_io)
611 {
612 	char *ptr;
613 	int len;
614 
615 	bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
616 	b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
617 	b->bio.bi_bdev = b->c->bdev;
618 	b->bio.bi_end_io = inline_endio;
619 	/*
620 	 * Use of .bi_private isn't a problem here because
621 	 * the dm_buffer's inline bio is local to bufio.
622 	 */
623 	b->bio.bi_private = end_io;
624 	bio_set_op_attrs(&b->bio, rw, 0);
625 
626 	/*
627 	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
628 	 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
629 	 */
630 	ptr = b->data;
631 	len = b->c->block_size;
632 
633 	if (len >= PAGE_SIZE)
634 		BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
635 	else
636 		BUG_ON((unsigned long)ptr & (len - 1));
637 
638 	do {
639 		if (!bio_add_page(&b->bio, virt_to_page(ptr),
640 				  len < PAGE_SIZE ? len : PAGE_SIZE,
641 				  offset_in_page(ptr))) {
642 			BUG_ON(b->c->block_size <= PAGE_SIZE);
643 			use_dmio(b, rw, block, end_io);
644 			return;
645 		}
646 
647 		len -= PAGE_SIZE;
648 		ptr += PAGE_SIZE;
649 	} while (len > 0);
650 
651 	submit_bio(&b->bio);
652 }
653 
654 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
655 		      bio_end_io_t *end_io)
656 {
657 	if (rw == WRITE && b->c->write_callback)
658 		b->c->write_callback(b);
659 
660 	if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
661 	    b->data_mode != DATA_MODE_VMALLOC)
662 		use_inline_bio(b, rw, block, end_io);
663 	else
664 		use_dmio(b, rw, block, end_io);
665 }
666 
667 /*----------------------------------------------------------------
668  * Writing dirty buffers
669  *--------------------------------------------------------------*/
670 
671 /*
672  * The endio routine for write.
673  *
674  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
675  * it.
676  */
677 static void write_endio(struct bio *bio)
678 {
679 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
680 
681 	b->write_error = bio->bi_error;
682 	if (unlikely(bio->bi_error)) {
683 		struct dm_bufio_client *c = b->c;
684 		int error = bio->bi_error;
685 		(void)cmpxchg(&c->async_write_error, 0, error);
686 	}
687 
688 	BUG_ON(!test_bit(B_WRITING, &b->state));
689 
690 	smp_mb__before_atomic();
691 	clear_bit(B_WRITING, &b->state);
692 	smp_mb__after_atomic();
693 
694 	wake_up_bit(&b->state, B_WRITING);
695 }
696 
697 /*
698  * Initiate a write on a dirty buffer, but don't wait for it.
699  *
700  * - If the buffer is not dirty, exit.
701  * - If there some previous write going on, wait for it to finish (we can't
702  *   have two writes on the same buffer simultaneously).
703  * - Submit our write and don't wait on it. We set B_WRITING indicating
704  *   that there is a write in progress.
705  */
706 static void __write_dirty_buffer(struct dm_buffer *b,
707 				 struct list_head *write_list)
708 {
709 	if (!test_bit(B_DIRTY, &b->state))
710 		return;
711 
712 	clear_bit(B_DIRTY, &b->state);
713 	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
714 
715 	if (!write_list)
716 		submit_io(b, WRITE, b->block, write_endio);
717 	else
718 		list_add_tail(&b->write_list, write_list);
719 }
720 
721 static void __flush_write_list(struct list_head *write_list)
722 {
723 	struct blk_plug plug;
724 	blk_start_plug(&plug);
725 	while (!list_empty(write_list)) {
726 		struct dm_buffer *b =
727 			list_entry(write_list->next, struct dm_buffer, write_list);
728 		list_del(&b->write_list);
729 		submit_io(b, WRITE, b->block, write_endio);
730 		cond_resched();
731 	}
732 	blk_finish_plug(&plug);
733 }
734 
735 /*
736  * Wait until any activity on the buffer finishes.  Possibly write the
737  * buffer if it is dirty.  When this function finishes, there is no I/O
738  * running on the buffer and the buffer is not dirty.
739  */
740 static void __make_buffer_clean(struct dm_buffer *b)
741 {
742 	BUG_ON(b->hold_count);
743 
744 	if (!b->state)	/* fast case */
745 		return;
746 
747 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
748 	__write_dirty_buffer(b, NULL);
749 	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
750 }
751 
752 /*
753  * Find some buffer that is not held by anybody, clean it, unlink it and
754  * return it.
755  */
756 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
757 {
758 	struct dm_buffer *b;
759 
760 	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
761 		BUG_ON(test_bit(B_WRITING, &b->state));
762 		BUG_ON(test_bit(B_DIRTY, &b->state));
763 
764 		if (!b->hold_count) {
765 			__make_buffer_clean(b);
766 			__unlink_buffer(b);
767 			return b;
768 		}
769 		cond_resched();
770 	}
771 
772 	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
773 		BUG_ON(test_bit(B_READING, &b->state));
774 
775 		if (!b->hold_count) {
776 			__make_buffer_clean(b);
777 			__unlink_buffer(b);
778 			return b;
779 		}
780 		cond_resched();
781 	}
782 
783 	return NULL;
784 }
785 
786 /*
787  * Wait until some other threads free some buffer or release hold count on
788  * some buffer.
789  *
790  * This function is entered with c->lock held, drops it and regains it
791  * before exiting.
792  */
793 static void __wait_for_free_buffer(struct dm_bufio_client *c)
794 {
795 	DECLARE_WAITQUEUE(wait, current);
796 
797 	add_wait_queue(&c->free_buffer_wait, &wait);
798 	set_current_state(TASK_UNINTERRUPTIBLE);
799 	dm_bufio_unlock(c);
800 
801 	io_schedule();
802 
803 	remove_wait_queue(&c->free_buffer_wait, &wait);
804 
805 	dm_bufio_lock(c);
806 }
807 
808 enum new_flag {
809 	NF_FRESH = 0,
810 	NF_READ = 1,
811 	NF_GET = 2,
812 	NF_PREFETCH = 3
813 };
814 
815 /*
816  * Allocate a new buffer. If the allocation is not possible, wait until
817  * some other thread frees a buffer.
818  *
819  * May drop the lock and regain it.
820  */
821 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
822 {
823 	struct dm_buffer *b;
824 	bool tried_noio_alloc = false;
825 
826 	/*
827 	 * dm-bufio is resistant to allocation failures (it just keeps
828 	 * one buffer reserved in cases all the allocations fail).
829 	 * So set flags to not try too hard:
830 	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
831 	 *		    mutex and wait ourselves.
832 	 *	__GFP_NORETRY: don't retry and rather return failure
833 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
834 	 *	__GFP_NOWARN: don't print a warning in case of failure
835 	 *
836 	 * For debugging, if we set the cache size to 1, no new buffers will
837 	 * be allocated.
838 	 */
839 	while (1) {
840 		if (dm_bufio_cache_size_latch != 1) {
841 			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
842 			if (b)
843 				return b;
844 		}
845 
846 		if (nf == NF_PREFETCH)
847 			return NULL;
848 
849 		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
850 			dm_bufio_unlock(c);
851 			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
852 			dm_bufio_lock(c);
853 			if (b)
854 				return b;
855 			tried_noio_alloc = true;
856 		}
857 
858 		if (!list_empty(&c->reserved_buffers)) {
859 			b = list_entry(c->reserved_buffers.next,
860 				       struct dm_buffer, lru_list);
861 			list_del(&b->lru_list);
862 			c->need_reserved_buffers++;
863 
864 			return b;
865 		}
866 
867 		b = __get_unclaimed_buffer(c);
868 		if (b)
869 			return b;
870 
871 		__wait_for_free_buffer(c);
872 	}
873 }
874 
875 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
876 {
877 	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
878 
879 	if (!b)
880 		return NULL;
881 
882 	if (c->alloc_callback)
883 		c->alloc_callback(b);
884 
885 	return b;
886 }
887 
888 /*
889  * Free a buffer and wake other threads waiting for free buffers.
890  */
891 static void __free_buffer_wake(struct dm_buffer *b)
892 {
893 	struct dm_bufio_client *c = b->c;
894 
895 	if (!c->need_reserved_buffers)
896 		free_buffer(b);
897 	else {
898 		list_add(&b->lru_list, &c->reserved_buffers);
899 		c->need_reserved_buffers--;
900 	}
901 
902 	wake_up(&c->free_buffer_wait);
903 }
904 
905 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
906 					struct list_head *write_list)
907 {
908 	struct dm_buffer *b, *tmp;
909 
910 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
911 		BUG_ON(test_bit(B_READING, &b->state));
912 
913 		if (!test_bit(B_DIRTY, &b->state) &&
914 		    !test_bit(B_WRITING, &b->state)) {
915 			__relink_lru(b, LIST_CLEAN);
916 			continue;
917 		}
918 
919 		if (no_wait && test_bit(B_WRITING, &b->state))
920 			return;
921 
922 		__write_dirty_buffer(b, write_list);
923 		cond_resched();
924 	}
925 }
926 
927 /*
928  * Get writeback threshold and buffer limit for a given client.
929  */
930 static void __get_memory_limit(struct dm_bufio_client *c,
931 			       unsigned long *threshold_buffers,
932 			       unsigned long *limit_buffers)
933 {
934 	unsigned long buffers;
935 
936 	if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
937 		mutex_lock(&dm_bufio_clients_lock);
938 		__cache_size_refresh();
939 		mutex_unlock(&dm_bufio_clients_lock);
940 	}
941 
942 	buffers = dm_bufio_cache_size_per_client >>
943 		  (c->sectors_per_block_bits + SECTOR_SHIFT);
944 
945 	if (buffers < c->minimum_buffers)
946 		buffers = c->minimum_buffers;
947 
948 	*limit_buffers = buffers;
949 	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
950 }
951 
952 /*
953  * Check if we're over watermark.
954  * If we are over threshold_buffers, start freeing buffers.
955  * If we're over "limit_buffers", block until we get under the limit.
956  */
957 static void __check_watermark(struct dm_bufio_client *c,
958 			      struct list_head *write_list)
959 {
960 	unsigned long threshold_buffers, limit_buffers;
961 
962 	__get_memory_limit(c, &threshold_buffers, &limit_buffers);
963 
964 	while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
965 	       limit_buffers) {
966 
967 		struct dm_buffer *b = __get_unclaimed_buffer(c);
968 
969 		if (!b)
970 			return;
971 
972 		__free_buffer_wake(b);
973 		cond_resched();
974 	}
975 
976 	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
977 		__write_dirty_buffers_async(c, 1, write_list);
978 }
979 
980 /*----------------------------------------------------------------
981  * Getting a buffer
982  *--------------------------------------------------------------*/
983 
984 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
985 				     enum new_flag nf, int *need_submit,
986 				     struct list_head *write_list)
987 {
988 	struct dm_buffer *b, *new_b = NULL;
989 
990 	*need_submit = 0;
991 
992 	b = __find(c, block);
993 	if (b)
994 		goto found_buffer;
995 
996 	if (nf == NF_GET)
997 		return NULL;
998 
999 	new_b = __alloc_buffer_wait(c, nf);
1000 	if (!new_b)
1001 		return NULL;
1002 
1003 	/*
1004 	 * We've had a period where the mutex was unlocked, so need to
1005 	 * recheck the hash table.
1006 	 */
1007 	b = __find(c, block);
1008 	if (b) {
1009 		__free_buffer_wake(new_b);
1010 		goto found_buffer;
1011 	}
1012 
1013 	__check_watermark(c, write_list);
1014 
1015 	b = new_b;
1016 	b->hold_count = 1;
1017 	b->read_error = 0;
1018 	b->write_error = 0;
1019 	__link_buffer(b, block, LIST_CLEAN);
1020 
1021 	if (nf == NF_FRESH) {
1022 		b->state = 0;
1023 		return b;
1024 	}
1025 
1026 	b->state = 1 << B_READING;
1027 	*need_submit = 1;
1028 
1029 	return b;
1030 
1031 found_buffer:
1032 	if (nf == NF_PREFETCH)
1033 		return NULL;
1034 	/*
1035 	 * Note: it is essential that we don't wait for the buffer to be
1036 	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1037 	 * dm_bufio_prefetch can be used in the driver request routine.
1038 	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1039 	 * the same buffer, it would deadlock if we waited.
1040 	 */
1041 	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1042 		return NULL;
1043 
1044 	b->hold_count++;
1045 	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1046 		     test_bit(B_WRITING, &b->state));
1047 	return b;
1048 }
1049 
1050 /*
1051  * The endio routine for reading: set the error, clear the bit and wake up
1052  * anyone waiting on the buffer.
1053  */
1054 static void read_endio(struct bio *bio)
1055 {
1056 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1057 
1058 	b->read_error = bio->bi_error;
1059 
1060 	BUG_ON(!test_bit(B_READING, &b->state));
1061 
1062 	smp_mb__before_atomic();
1063 	clear_bit(B_READING, &b->state);
1064 	smp_mb__after_atomic();
1065 
1066 	wake_up_bit(&b->state, B_READING);
1067 }
1068 
1069 /*
1070  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1071  * functions is similar except that dm_bufio_new doesn't read the
1072  * buffer from the disk (assuming that the caller overwrites all the data
1073  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1074  */
1075 static void *new_read(struct dm_bufio_client *c, sector_t block,
1076 		      enum new_flag nf, struct dm_buffer **bp)
1077 {
1078 	int need_submit;
1079 	struct dm_buffer *b;
1080 
1081 	LIST_HEAD(write_list);
1082 
1083 	dm_bufio_lock(c);
1084 	b = __bufio_new(c, block, nf, &need_submit, &write_list);
1085 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1086 	if (b && b->hold_count == 1)
1087 		buffer_record_stack(b);
1088 #endif
1089 	dm_bufio_unlock(c);
1090 
1091 	__flush_write_list(&write_list);
1092 
1093 	if (!b)
1094 		return NULL;
1095 
1096 	if (need_submit)
1097 		submit_io(b, READ, b->block, read_endio);
1098 
1099 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1100 
1101 	if (b->read_error) {
1102 		int error = b->read_error;
1103 
1104 		dm_bufio_release(b);
1105 
1106 		return ERR_PTR(error);
1107 	}
1108 
1109 	*bp = b;
1110 
1111 	return b->data;
1112 }
1113 
1114 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1115 		   struct dm_buffer **bp)
1116 {
1117 	return new_read(c, block, NF_GET, bp);
1118 }
1119 EXPORT_SYMBOL_GPL(dm_bufio_get);
1120 
1121 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1122 		    struct dm_buffer **bp)
1123 {
1124 	BUG_ON(dm_bufio_in_request());
1125 
1126 	return new_read(c, block, NF_READ, bp);
1127 }
1128 EXPORT_SYMBOL_GPL(dm_bufio_read);
1129 
1130 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1131 		   struct dm_buffer **bp)
1132 {
1133 	BUG_ON(dm_bufio_in_request());
1134 
1135 	return new_read(c, block, NF_FRESH, bp);
1136 }
1137 EXPORT_SYMBOL_GPL(dm_bufio_new);
1138 
1139 void dm_bufio_prefetch(struct dm_bufio_client *c,
1140 		       sector_t block, unsigned n_blocks)
1141 {
1142 	struct blk_plug plug;
1143 
1144 	LIST_HEAD(write_list);
1145 
1146 	BUG_ON(dm_bufio_in_request());
1147 
1148 	blk_start_plug(&plug);
1149 	dm_bufio_lock(c);
1150 
1151 	for (; n_blocks--; block++) {
1152 		int need_submit;
1153 		struct dm_buffer *b;
1154 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1155 				&write_list);
1156 		if (unlikely(!list_empty(&write_list))) {
1157 			dm_bufio_unlock(c);
1158 			blk_finish_plug(&plug);
1159 			__flush_write_list(&write_list);
1160 			blk_start_plug(&plug);
1161 			dm_bufio_lock(c);
1162 		}
1163 		if (unlikely(b != NULL)) {
1164 			dm_bufio_unlock(c);
1165 
1166 			if (need_submit)
1167 				submit_io(b, READ, b->block, read_endio);
1168 			dm_bufio_release(b);
1169 
1170 			cond_resched();
1171 
1172 			if (!n_blocks)
1173 				goto flush_plug;
1174 			dm_bufio_lock(c);
1175 		}
1176 	}
1177 
1178 	dm_bufio_unlock(c);
1179 
1180 flush_plug:
1181 	blk_finish_plug(&plug);
1182 }
1183 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1184 
1185 void dm_bufio_release(struct dm_buffer *b)
1186 {
1187 	struct dm_bufio_client *c = b->c;
1188 
1189 	dm_bufio_lock(c);
1190 
1191 	BUG_ON(!b->hold_count);
1192 
1193 	b->hold_count--;
1194 	if (!b->hold_count) {
1195 		wake_up(&c->free_buffer_wait);
1196 
1197 		/*
1198 		 * If there were errors on the buffer, and the buffer is not
1199 		 * to be written, free the buffer. There is no point in caching
1200 		 * invalid buffer.
1201 		 */
1202 		if ((b->read_error || b->write_error) &&
1203 		    !test_bit(B_READING, &b->state) &&
1204 		    !test_bit(B_WRITING, &b->state) &&
1205 		    !test_bit(B_DIRTY, &b->state)) {
1206 			__unlink_buffer(b);
1207 			__free_buffer_wake(b);
1208 		}
1209 	}
1210 
1211 	dm_bufio_unlock(c);
1212 }
1213 EXPORT_SYMBOL_GPL(dm_bufio_release);
1214 
1215 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1216 {
1217 	struct dm_bufio_client *c = b->c;
1218 
1219 	dm_bufio_lock(c);
1220 
1221 	BUG_ON(test_bit(B_READING, &b->state));
1222 
1223 	if (!test_and_set_bit(B_DIRTY, &b->state))
1224 		__relink_lru(b, LIST_DIRTY);
1225 
1226 	dm_bufio_unlock(c);
1227 }
1228 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1229 
1230 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1231 {
1232 	LIST_HEAD(write_list);
1233 
1234 	BUG_ON(dm_bufio_in_request());
1235 
1236 	dm_bufio_lock(c);
1237 	__write_dirty_buffers_async(c, 0, &write_list);
1238 	dm_bufio_unlock(c);
1239 	__flush_write_list(&write_list);
1240 }
1241 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1242 
1243 /*
1244  * For performance, it is essential that the buffers are written asynchronously
1245  * and simultaneously (so that the block layer can merge the writes) and then
1246  * waited upon.
1247  *
1248  * Finally, we flush hardware disk cache.
1249  */
1250 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1251 {
1252 	int a, f;
1253 	unsigned long buffers_processed = 0;
1254 	struct dm_buffer *b, *tmp;
1255 
1256 	LIST_HEAD(write_list);
1257 
1258 	dm_bufio_lock(c);
1259 	__write_dirty_buffers_async(c, 0, &write_list);
1260 	dm_bufio_unlock(c);
1261 	__flush_write_list(&write_list);
1262 	dm_bufio_lock(c);
1263 
1264 again:
1265 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1266 		int dropped_lock = 0;
1267 
1268 		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1269 			buffers_processed++;
1270 
1271 		BUG_ON(test_bit(B_READING, &b->state));
1272 
1273 		if (test_bit(B_WRITING, &b->state)) {
1274 			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1275 				dropped_lock = 1;
1276 				b->hold_count++;
1277 				dm_bufio_unlock(c);
1278 				wait_on_bit_io(&b->state, B_WRITING,
1279 					       TASK_UNINTERRUPTIBLE);
1280 				dm_bufio_lock(c);
1281 				b->hold_count--;
1282 			} else
1283 				wait_on_bit_io(&b->state, B_WRITING,
1284 					       TASK_UNINTERRUPTIBLE);
1285 		}
1286 
1287 		if (!test_bit(B_DIRTY, &b->state) &&
1288 		    !test_bit(B_WRITING, &b->state))
1289 			__relink_lru(b, LIST_CLEAN);
1290 
1291 		cond_resched();
1292 
1293 		/*
1294 		 * If we dropped the lock, the list is no longer consistent,
1295 		 * so we must restart the search.
1296 		 *
1297 		 * In the most common case, the buffer just processed is
1298 		 * relinked to the clean list, so we won't loop scanning the
1299 		 * same buffer again and again.
1300 		 *
1301 		 * This may livelock if there is another thread simultaneously
1302 		 * dirtying buffers, so we count the number of buffers walked
1303 		 * and if it exceeds the total number of buffers, it means that
1304 		 * someone is doing some writes simultaneously with us.  In
1305 		 * this case, stop, dropping the lock.
1306 		 */
1307 		if (dropped_lock)
1308 			goto again;
1309 	}
1310 	wake_up(&c->free_buffer_wait);
1311 	dm_bufio_unlock(c);
1312 
1313 	a = xchg(&c->async_write_error, 0);
1314 	f = dm_bufio_issue_flush(c);
1315 	if (a)
1316 		return a;
1317 
1318 	return f;
1319 }
1320 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1321 
1322 /*
1323  * Use dm-io to send and empty barrier flush the device.
1324  */
1325 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1326 {
1327 	struct dm_io_request io_req = {
1328 		.bi_op = REQ_OP_WRITE,
1329 		.bi_op_flags = REQ_PREFLUSH,
1330 		.mem.type = DM_IO_KMEM,
1331 		.mem.ptr.addr = NULL,
1332 		.client = c->dm_io,
1333 	};
1334 	struct dm_io_region io_reg = {
1335 		.bdev = c->bdev,
1336 		.sector = 0,
1337 		.count = 0,
1338 	};
1339 
1340 	BUG_ON(dm_bufio_in_request());
1341 
1342 	return dm_io(&io_req, 1, &io_reg, NULL);
1343 }
1344 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1345 
1346 /*
1347  * We first delete any other buffer that may be at that new location.
1348  *
1349  * Then, we write the buffer to the original location if it was dirty.
1350  *
1351  * Then, if we are the only one who is holding the buffer, relink the buffer
1352  * in the hash queue for the new location.
1353  *
1354  * If there was someone else holding the buffer, we write it to the new
1355  * location but not relink it, because that other user needs to have the buffer
1356  * at the same place.
1357  */
1358 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1359 {
1360 	struct dm_bufio_client *c = b->c;
1361 	struct dm_buffer *new;
1362 
1363 	BUG_ON(dm_bufio_in_request());
1364 
1365 	dm_bufio_lock(c);
1366 
1367 retry:
1368 	new = __find(c, new_block);
1369 	if (new) {
1370 		if (new->hold_count) {
1371 			__wait_for_free_buffer(c);
1372 			goto retry;
1373 		}
1374 
1375 		/*
1376 		 * FIXME: Is there any point waiting for a write that's going
1377 		 * to be overwritten in a bit?
1378 		 */
1379 		__make_buffer_clean(new);
1380 		__unlink_buffer(new);
1381 		__free_buffer_wake(new);
1382 	}
1383 
1384 	BUG_ON(!b->hold_count);
1385 	BUG_ON(test_bit(B_READING, &b->state));
1386 
1387 	__write_dirty_buffer(b, NULL);
1388 	if (b->hold_count == 1) {
1389 		wait_on_bit_io(&b->state, B_WRITING,
1390 			       TASK_UNINTERRUPTIBLE);
1391 		set_bit(B_DIRTY, &b->state);
1392 		__unlink_buffer(b);
1393 		__link_buffer(b, new_block, LIST_DIRTY);
1394 	} else {
1395 		sector_t old_block;
1396 		wait_on_bit_lock_io(&b->state, B_WRITING,
1397 				    TASK_UNINTERRUPTIBLE);
1398 		/*
1399 		 * Relink buffer to "new_block" so that write_callback
1400 		 * sees "new_block" as a block number.
1401 		 * After the write, link the buffer back to old_block.
1402 		 * All this must be done in bufio lock, so that block number
1403 		 * change isn't visible to other threads.
1404 		 */
1405 		old_block = b->block;
1406 		__unlink_buffer(b);
1407 		__link_buffer(b, new_block, b->list_mode);
1408 		submit_io(b, WRITE, new_block, write_endio);
1409 		wait_on_bit_io(&b->state, B_WRITING,
1410 			       TASK_UNINTERRUPTIBLE);
1411 		__unlink_buffer(b);
1412 		__link_buffer(b, old_block, b->list_mode);
1413 	}
1414 
1415 	dm_bufio_unlock(c);
1416 	dm_bufio_release(b);
1417 }
1418 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1419 
1420 /*
1421  * Free the given buffer.
1422  *
1423  * This is just a hint, if the buffer is in use or dirty, this function
1424  * does nothing.
1425  */
1426 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1427 {
1428 	struct dm_buffer *b;
1429 
1430 	dm_bufio_lock(c);
1431 
1432 	b = __find(c, block);
1433 	if (b && likely(!b->hold_count) && likely(!b->state)) {
1434 		__unlink_buffer(b);
1435 		__free_buffer_wake(b);
1436 	}
1437 
1438 	dm_bufio_unlock(c);
1439 }
1440 EXPORT_SYMBOL(dm_bufio_forget);
1441 
1442 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1443 {
1444 	c->minimum_buffers = n;
1445 }
1446 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1447 
1448 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1449 {
1450 	return c->block_size;
1451 }
1452 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1453 
1454 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1455 {
1456 	return i_size_read(c->bdev->bd_inode) >>
1457 			   (SECTOR_SHIFT + c->sectors_per_block_bits);
1458 }
1459 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1460 
1461 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1462 {
1463 	return b->block;
1464 }
1465 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1466 
1467 void *dm_bufio_get_block_data(struct dm_buffer *b)
1468 {
1469 	return b->data;
1470 }
1471 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1472 
1473 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1474 {
1475 	return b + 1;
1476 }
1477 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1478 
1479 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1480 {
1481 	return b->c;
1482 }
1483 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1484 
1485 static void drop_buffers(struct dm_bufio_client *c)
1486 {
1487 	struct dm_buffer *b;
1488 	int i;
1489 	bool warned = false;
1490 
1491 	BUG_ON(dm_bufio_in_request());
1492 
1493 	/*
1494 	 * An optimization so that the buffers are not written one-by-one.
1495 	 */
1496 	dm_bufio_write_dirty_buffers_async(c);
1497 
1498 	dm_bufio_lock(c);
1499 
1500 	while ((b = __get_unclaimed_buffer(c)))
1501 		__free_buffer_wake(b);
1502 
1503 	for (i = 0; i < LIST_SIZE; i++)
1504 		list_for_each_entry(b, &c->lru[i], lru_list) {
1505 			WARN_ON(!warned);
1506 			warned = true;
1507 			DMERR("leaked buffer %llx, hold count %u, list %d",
1508 			      (unsigned long long)b->block, b->hold_count, i);
1509 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1510 			print_stack_trace(&b->stack_trace, 1);
1511 			b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1512 #endif
1513 		}
1514 
1515 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1516 	while ((b = __get_unclaimed_buffer(c)))
1517 		__free_buffer_wake(b);
1518 #endif
1519 
1520 	for (i = 0; i < LIST_SIZE; i++)
1521 		BUG_ON(!list_empty(&c->lru[i]));
1522 
1523 	dm_bufio_unlock(c);
1524 }
1525 
1526 /*
1527  * We may not be able to evict this buffer if IO pending or the client
1528  * is still using it.  Caller is expected to know buffer is too old.
1529  *
1530  * And if GFP_NOFS is used, we must not do any I/O because we hold
1531  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1532  * rerouted to different bufio client.
1533  */
1534 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1535 {
1536 	if (!(gfp & __GFP_FS)) {
1537 		if (test_bit(B_READING, &b->state) ||
1538 		    test_bit(B_WRITING, &b->state) ||
1539 		    test_bit(B_DIRTY, &b->state))
1540 			return false;
1541 	}
1542 
1543 	if (b->hold_count)
1544 		return false;
1545 
1546 	__make_buffer_clean(b);
1547 	__unlink_buffer(b);
1548 	__free_buffer_wake(b);
1549 
1550 	return true;
1551 }
1552 
1553 static unsigned get_retain_buffers(struct dm_bufio_client *c)
1554 {
1555         unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1556         return retain_bytes / c->block_size;
1557 }
1558 
1559 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1560 			    gfp_t gfp_mask)
1561 {
1562 	int l;
1563 	struct dm_buffer *b, *tmp;
1564 	unsigned long freed = 0;
1565 	unsigned long count = nr_to_scan;
1566 	unsigned retain_target = get_retain_buffers(c);
1567 
1568 	for (l = 0; l < LIST_SIZE; l++) {
1569 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1570 			if (__try_evict_buffer(b, gfp_mask))
1571 				freed++;
1572 			if (!--nr_to_scan || ((count - freed) <= retain_target))
1573 				return freed;
1574 			cond_resched();
1575 		}
1576 	}
1577 	return freed;
1578 }
1579 
1580 static unsigned long
1581 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1582 {
1583 	struct dm_bufio_client *c;
1584 	unsigned long freed;
1585 
1586 	c = container_of(shrink, struct dm_bufio_client, shrinker);
1587 	if (sc->gfp_mask & __GFP_FS)
1588 		dm_bufio_lock(c);
1589 	else if (!dm_bufio_trylock(c))
1590 		return SHRINK_STOP;
1591 
1592 	freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1593 	dm_bufio_unlock(c);
1594 	return freed;
1595 }
1596 
1597 static unsigned long
1598 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1599 {
1600 	struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1601 
1602 	return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
1603 }
1604 
1605 /*
1606  * Create the buffering interface
1607  */
1608 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1609 					       unsigned reserved_buffers, unsigned aux_size,
1610 					       void (*alloc_callback)(struct dm_buffer *),
1611 					       void (*write_callback)(struct dm_buffer *))
1612 {
1613 	int r;
1614 	struct dm_bufio_client *c;
1615 	unsigned i;
1616 
1617 	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1618 	       (block_size & (block_size - 1)));
1619 
1620 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1621 	if (!c) {
1622 		r = -ENOMEM;
1623 		goto bad_client;
1624 	}
1625 	c->buffer_tree = RB_ROOT;
1626 
1627 	c->bdev = bdev;
1628 	c->block_size = block_size;
1629 	c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1630 	c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1631 				  __ffs(block_size) - PAGE_SHIFT : 0;
1632 	c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1633 				  PAGE_SHIFT - __ffs(block_size) : 0);
1634 
1635 	c->aux_size = aux_size;
1636 	c->alloc_callback = alloc_callback;
1637 	c->write_callback = write_callback;
1638 
1639 	for (i = 0; i < LIST_SIZE; i++) {
1640 		INIT_LIST_HEAD(&c->lru[i]);
1641 		c->n_buffers[i] = 0;
1642 	}
1643 
1644 	mutex_init(&c->lock);
1645 	INIT_LIST_HEAD(&c->reserved_buffers);
1646 	c->need_reserved_buffers = reserved_buffers;
1647 
1648 	c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1649 
1650 	init_waitqueue_head(&c->free_buffer_wait);
1651 	c->async_write_error = 0;
1652 
1653 	c->dm_io = dm_io_client_create();
1654 	if (IS_ERR(c->dm_io)) {
1655 		r = PTR_ERR(c->dm_io);
1656 		goto bad_dm_io;
1657 	}
1658 
1659 	mutex_lock(&dm_bufio_clients_lock);
1660 	if (c->blocks_per_page_bits) {
1661 		if (!DM_BUFIO_CACHE_NAME(c)) {
1662 			DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1663 			if (!DM_BUFIO_CACHE_NAME(c)) {
1664 				r = -ENOMEM;
1665 				mutex_unlock(&dm_bufio_clients_lock);
1666 				goto bad_cache;
1667 			}
1668 		}
1669 
1670 		if (!DM_BUFIO_CACHE(c)) {
1671 			DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1672 							      c->block_size,
1673 							      c->block_size, 0, NULL);
1674 			if (!DM_BUFIO_CACHE(c)) {
1675 				r = -ENOMEM;
1676 				mutex_unlock(&dm_bufio_clients_lock);
1677 				goto bad_cache;
1678 			}
1679 		}
1680 	}
1681 	mutex_unlock(&dm_bufio_clients_lock);
1682 
1683 	while (c->need_reserved_buffers) {
1684 		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1685 
1686 		if (!b) {
1687 			r = -ENOMEM;
1688 			goto bad_buffer;
1689 		}
1690 		__free_buffer_wake(b);
1691 	}
1692 
1693 	mutex_lock(&dm_bufio_clients_lock);
1694 	dm_bufio_client_count++;
1695 	list_add(&c->client_list, &dm_bufio_all_clients);
1696 	__cache_size_refresh();
1697 	mutex_unlock(&dm_bufio_clients_lock);
1698 
1699 	c->shrinker.count_objects = dm_bufio_shrink_count;
1700 	c->shrinker.scan_objects = dm_bufio_shrink_scan;
1701 	c->shrinker.seeks = 1;
1702 	c->shrinker.batch = 0;
1703 	register_shrinker(&c->shrinker);
1704 
1705 	return c;
1706 
1707 bad_buffer:
1708 bad_cache:
1709 	while (!list_empty(&c->reserved_buffers)) {
1710 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1711 						 struct dm_buffer, lru_list);
1712 		list_del(&b->lru_list);
1713 		free_buffer(b);
1714 	}
1715 	dm_io_client_destroy(c->dm_io);
1716 bad_dm_io:
1717 	kfree(c);
1718 bad_client:
1719 	return ERR_PTR(r);
1720 }
1721 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1722 
1723 /*
1724  * Free the buffering interface.
1725  * It is required that there are no references on any buffers.
1726  */
1727 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1728 {
1729 	unsigned i;
1730 
1731 	drop_buffers(c);
1732 
1733 	unregister_shrinker(&c->shrinker);
1734 
1735 	mutex_lock(&dm_bufio_clients_lock);
1736 
1737 	list_del(&c->client_list);
1738 	dm_bufio_client_count--;
1739 	__cache_size_refresh();
1740 
1741 	mutex_unlock(&dm_bufio_clients_lock);
1742 
1743 	BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1744 	BUG_ON(c->need_reserved_buffers);
1745 
1746 	while (!list_empty(&c->reserved_buffers)) {
1747 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1748 						 struct dm_buffer, lru_list);
1749 		list_del(&b->lru_list);
1750 		free_buffer(b);
1751 	}
1752 
1753 	for (i = 0; i < LIST_SIZE; i++)
1754 		if (c->n_buffers[i])
1755 			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1756 
1757 	for (i = 0; i < LIST_SIZE; i++)
1758 		BUG_ON(c->n_buffers[i]);
1759 
1760 	dm_io_client_destroy(c->dm_io);
1761 	kfree(c);
1762 }
1763 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1764 
1765 static unsigned get_max_age_hz(void)
1766 {
1767 	unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1768 
1769 	if (max_age > UINT_MAX / HZ)
1770 		max_age = UINT_MAX / HZ;
1771 
1772 	return max_age * HZ;
1773 }
1774 
1775 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1776 {
1777 	return time_after_eq(jiffies, b->last_accessed + age_hz);
1778 }
1779 
1780 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1781 {
1782 	struct dm_buffer *b, *tmp;
1783 	unsigned retain_target = get_retain_buffers(c);
1784 	unsigned count;
1785 
1786 	dm_bufio_lock(c);
1787 
1788 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1789 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1790 		if (count <= retain_target)
1791 			break;
1792 
1793 		if (!older_than(b, age_hz))
1794 			break;
1795 
1796 		if (__try_evict_buffer(b, 0))
1797 			count--;
1798 
1799 		cond_resched();
1800 	}
1801 
1802 	dm_bufio_unlock(c);
1803 }
1804 
1805 static void cleanup_old_buffers(void)
1806 {
1807 	unsigned long max_age_hz = get_max_age_hz();
1808 	struct dm_bufio_client *c;
1809 
1810 	mutex_lock(&dm_bufio_clients_lock);
1811 
1812 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1813 		__evict_old_buffers(c, max_age_hz);
1814 
1815 	mutex_unlock(&dm_bufio_clients_lock);
1816 }
1817 
1818 static struct workqueue_struct *dm_bufio_wq;
1819 static struct delayed_work dm_bufio_work;
1820 
1821 static void work_fn(struct work_struct *w)
1822 {
1823 	cleanup_old_buffers();
1824 
1825 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1826 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1827 }
1828 
1829 /*----------------------------------------------------------------
1830  * Module setup
1831  *--------------------------------------------------------------*/
1832 
1833 /*
1834  * This is called only once for the whole dm_bufio module.
1835  * It initializes memory limit.
1836  */
1837 static int __init dm_bufio_init(void)
1838 {
1839 	__u64 mem;
1840 
1841 	dm_bufio_allocated_kmem_cache = 0;
1842 	dm_bufio_allocated_get_free_pages = 0;
1843 	dm_bufio_allocated_vmalloc = 0;
1844 	dm_bufio_current_allocated = 0;
1845 
1846 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1847 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1848 
1849 	mem = (__u64)((totalram_pages - totalhigh_pages) *
1850 		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1851 
1852 	if (mem > ULONG_MAX)
1853 		mem = ULONG_MAX;
1854 
1855 #ifdef CONFIG_MMU
1856 	/*
1857 	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1858 	 * in fs/proc/internal.h
1859 	 */
1860 	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1861 		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1862 #endif
1863 
1864 	dm_bufio_default_cache_size = mem;
1865 
1866 	mutex_lock(&dm_bufio_clients_lock);
1867 	__cache_size_refresh();
1868 	mutex_unlock(&dm_bufio_clients_lock);
1869 
1870 	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1871 	if (!dm_bufio_wq)
1872 		return -ENOMEM;
1873 
1874 	INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1875 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1876 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1877 
1878 	return 0;
1879 }
1880 
1881 /*
1882  * This is called once when unloading the dm_bufio module.
1883  */
1884 static void __exit dm_bufio_exit(void)
1885 {
1886 	int bug = 0;
1887 	int i;
1888 
1889 	cancel_delayed_work_sync(&dm_bufio_work);
1890 	destroy_workqueue(dm_bufio_wq);
1891 
1892 	for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1893 		kmem_cache_destroy(dm_bufio_caches[i]);
1894 
1895 	for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1896 		kfree(dm_bufio_cache_names[i]);
1897 
1898 	if (dm_bufio_client_count) {
1899 		DMCRIT("%s: dm_bufio_client_count leaked: %d",
1900 			__func__, dm_bufio_client_count);
1901 		bug = 1;
1902 	}
1903 
1904 	if (dm_bufio_current_allocated) {
1905 		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1906 			__func__, dm_bufio_current_allocated);
1907 		bug = 1;
1908 	}
1909 
1910 	if (dm_bufio_allocated_get_free_pages) {
1911 		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1912 		       __func__, dm_bufio_allocated_get_free_pages);
1913 		bug = 1;
1914 	}
1915 
1916 	if (dm_bufio_allocated_vmalloc) {
1917 		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1918 		       __func__, dm_bufio_allocated_vmalloc);
1919 		bug = 1;
1920 	}
1921 
1922 	BUG_ON(bug);
1923 }
1924 
1925 module_init(dm_bufio_init)
1926 module_exit(dm_bufio_exit)
1927 
1928 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1929 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1930 
1931 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1932 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1933 
1934 module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1935 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1936 
1937 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1938 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1939 
1940 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1941 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1942 
1943 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1944 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1945 
1946 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1947 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1948 
1949 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1950 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1951 
1952 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1953 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1954 MODULE_LICENSE("GPL");
1955