xref: /openbmc/linux/drivers/md/dm-bufio.c (revision ce932d0c5589e9766e089c22c66890dfc48fbd94)
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bufio.h"
10 
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
17 
18 #define DM_MSG_PREFIX "bufio"
19 
20 /*
21  * Memory management policy:
22  *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23  *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24  *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25  *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
26  *	dirty buffers.
27  */
28 #define DM_BUFIO_MIN_BUFFERS		8
29 
30 #define DM_BUFIO_MEMORY_PERCENT		2
31 #define DM_BUFIO_VMALLOC_PERCENT	25
32 #define DM_BUFIO_WRITEBACK_PERCENT	75
33 
34 /*
35  * Check buffer ages in this interval (seconds)
36  */
37 #define DM_BUFIO_WORK_TIMER_SECS	10
38 
39 /*
40  * Free buffers when they are older than this (seconds)
41  */
42 #define DM_BUFIO_DEFAULT_AGE_SECS	60
43 
44 /*
45  * The number of bvec entries that are embedded directly in the buffer.
46  * If the chunk size is larger, dm-io is used to do the io.
47  */
48 #define DM_BUFIO_INLINE_VECS		16
49 
50 /*
51  * Buffer hash
52  */
53 #define DM_BUFIO_HASH_BITS	20
54 #define DM_BUFIO_HASH(block) \
55 	((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 	 ((1 << DM_BUFIO_HASH_BITS) - 1))
57 
58 /*
59  * Don't try to use kmem_cache_alloc for blocks larger than this.
60  * For explanation, see alloc_buffer_data below.
61  */
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT	(PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT	(PAGE_SIZE << (MAX_ORDER - 1))
64 
65 /*
66  * dm_buffer->list_mode
67  */
68 #define LIST_CLEAN	0
69 #define LIST_DIRTY	1
70 #define LIST_SIZE	2
71 
72 /*
73  * Linking of buffers:
74  *	All buffers are linked to cache_hash with their hash_list field.
75  *
76  *	Clean buffers that are not being written (B_WRITING not set)
77  *	are linked to lru[LIST_CLEAN] with their lru_list field.
78  *
79  *	Dirty and clean buffers that are being written are linked to
80  *	lru[LIST_DIRTY] with their lru_list field. When the write
81  *	finishes, the buffer cannot be relinked immediately (because we
82  *	are in an interrupt context and relinking requires process
83  *	context), so some clean-not-writing buffers can be held on
84  *	dirty_lru too.  They are later added to lru in the process
85  *	context.
86  */
87 struct dm_bufio_client {
88 	struct mutex lock;
89 
90 	struct list_head lru[LIST_SIZE];
91 	unsigned long n_buffers[LIST_SIZE];
92 
93 	struct block_device *bdev;
94 	unsigned block_size;
95 	unsigned char sectors_per_block_bits;
96 	unsigned char pages_per_block_bits;
97 	unsigned char blocks_per_page_bits;
98 	unsigned aux_size;
99 	void (*alloc_callback)(struct dm_buffer *);
100 	void (*write_callback)(struct dm_buffer *);
101 
102 	struct dm_io_client *dm_io;
103 
104 	struct list_head reserved_buffers;
105 	unsigned need_reserved_buffers;
106 
107 	struct hlist_head *cache_hash;
108 	wait_queue_head_t free_buffer_wait;
109 
110 	int async_write_error;
111 
112 	struct list_head client_list;
113 	struct shrinker shrinker;
114 };
115 
116 /*
117  * Buffer state bits.
118  */
119 #define B_READING	0
120 #define B_WRITING	1
121 #define B_DIRTY		2
122 
123 /*
124  * Describes how the block was allocated:
125  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
126  * See the comment at alloc_buffer_data.
127  */
128 enum data_mode {
129 	DATA_MODE_SLAB = 0,
130 	DATA_MODE_GET_FREE_PAGES = 1,
131 	DATA_MODE_VMALLOC = 2,
132 	DATA_MODE_LIMIT = 3
133 };
134 
135 struct dm_buffer {
136 	struct hlist_node hash_list;
137 	struct list_head lru_list;
138 	sector_t block;
139 	void *data;
140 	enum data_mode data_mode;
141 	unsigned char list_mode;		/* LIST_* */
142 	unsigned hold_count;
143 	int read_error;
144 	int write_error;
145 	unsigned long state;
146 	unsigned long last_accessed;
147 	struct dm_bufio_client *c;
148 	struct bio bio;
149 	struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
150 };
151 
152 /*----------------------------------------------------------------*/
153 
154 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
155 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
156 
157 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
158 {
159 	unsigned ret = c->blocks_per_page_bits - 1;
160 
161 	BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
162 
163 	return ret;
164 }
165 
166 #define DM_BUFIO_CACHE(c)	(dm_bufio_caches[dm_bufio_cache_index(c)])
167 #define DM_BUFIO_CACHE_NAME(c)	(dm_bufio_cache_names[dm_bufio_cache_index(c)])
168 
169 #define dm_bufio_in_request()	(!!current->bio_list)
170 
171 static void dm_bufio_lock(struct dm_bufio_client *c)
172 {
173 	mutex_lock_nested(&c->lock, dm_bufio_in_request());
174 }
175 
176 static int dm_bufio_trylock(struct dm_bufio_client *c)
177 {
178 	return mutex_trylock(&c->lock);
179 }
180 
181 static void dm_bufio_unlock(struct dm_bufio_client *c)
182 {
183 	mutex_unlock(&c->lock);
184 }
185 
186 /*
187  * FIXME Move to sched.h?
188  */
189 #ifdef CONFIG_PREEMPT_VOLUNTARY
190 #  define dm_bufio_cond_resched()		\
191 do {						\
192 	if (unlikely(need_resched()))		\
193 		_cond_resched();		\
194 } while (0)
195 #else
196 #  define dm_bufio_cond_resched()                do { } while (0)
197 #endif
198 
199 /*----------------------------------------------------------------*/
200 
201 /*
202  * Default cache size: available memory divided by the ratio.
203  */
204 static unsigned long dm_bufio_default_cache_size;
205 
206 /*
207  * Total cache size set by the user.
208  */
209 static unsigned long dm_bufio_cache_size;
210 
211 /*
212  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
213  * at any time.  If it disagrees, the user has changed cache size.
214  */
215 static unsigned long dm_bufio_cache_size_latch;
216 
217 static DEFINE_SPINLOCK(param_spinlock);
218 
219 /*
220  * Buffers are freed after this timeout
221  */
222 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
223 
224 static unsigned long dm_bufio_peak_allocated;
225 static unsigned long dm_bufio_allocated_kmem_cache;
226 static unsigned long dm_bufio_allocated_get_free_pages;
227 static unsigned long dm_bufio_allocated_vmalloc;
228 static unsigned long dm_bufio_current_allocated;
229 
230 /*----------------------------------------------------------------*/
231 
232 /*
233  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
234  */
235 static unsigned long dm_bufio_cache_size_per_client;
236 
237 /*
238  * The current number of clients.
239  */
240 static int dm_bufio_client_count;
241 
242 /*
243  * The list of all clients.
244  */
245 static LIST_HEAD(dm_bufio_all_clients);
246 
247 /*
248  * This mutex protects dm_bufio_cache_size_latch,
249  * dm_bufio_cache_size_per_client and dm_bufio_client_count
250  */
251 static DEFINE_MUTEX(dm_bufio_clients_lock);
252 
253 /*----------------------------------------------------------------*/
254 
255 static void adjust_total_allocated(enum data_mode data_mode, long diff)
256 {
257 	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
258 		&dm_bufio_allocated_kmem_cache,
259 		&dm_bufio_allocated_get_free_pages,
260 		&dm_bufio_allocated_vmalloc,
261 	};
262 
263 	spin_lock(&param_spinlock);
264 
265 	*class_ptr[data_mode] += diff;
266 
267 	dm_bufio_current_allocated += diff;
268 
269 	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
270 		dm_bufio_peak_allocated = dm_bufio_current_allocated;
271 
272 	spin_unlock(&param_spinlock);
273 }
274 
275 /*
276  * Change the number of clients and recalculate per-client limit.
277  */
278 static void __cache_size_refresh(void)
279 {
280 	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
281 	BUG_ON(dm_bufio_client_count < 0);
282 
283 	dm_bufio_cache_size_latch = dm_bufio_cache_size;
284 
285 	barrier();
286 
287 	/*
288 	 * Use default if set to 0 and report the actual cache size used.
289 	 */
290 	if (!dm_bufio_cache_size_latch) {
291 		(void)cmpxchg(&dm_bufio_cache_size, 0,
292 			      dm_bufio_default_cache_size);
293 		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
294 	}
295 
296 	dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
297 					 (dm_bufio_client_count ? : 1);
298 }
299 
300 /*
301  * Allocating buffer data.
302  *
303  * Small buffers are allocated with kmem_cache, to use space optimally.
304  *
305  * For large buffers, we choose between get_free_pages and vmalloc.
306  * Each has advantages and disadvantages.
307  *
308  * __get_free_pages can randomly fail if the memory is fragmented.
309  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
310  * as low as 128M) so using it for caching is not appropriate.
311  *
312  * If the allocation may fail we use __get_free_pages. Memory fragmentation
313  * won't have a fatal effect here, but it just causes flushes of some other
314  * buffers and more I/O will be performed. Don't use __get_free_pages if it
315  * always fails (i.e. order >= MAX_ORDER).
316  *
317  * If the allocation shouldn't fail we use __vmalloc. This is only for the
318  * initial reserve allocation, so there's no risk of wasting all vmalloc
319  * space.
320  */
321 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
322 			       enum data_mode *data_mode)
323 {
324 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
325 		*data_mode = DATA_MODE_SLAB;
326 		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
327 	}
328 
329 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
330 	    gfp_mask & __GFP_NORETRY) {
331 		*data_mode = DATA_MODE_GET_FREE_PAGES;
332 		return (void *)__get_free_pages(gfp_mask,
333 						c->pages_per_block_bits);
334 	}
335 
336 	*data_mode = DATA_MODE_VMALLOC;
337 	return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
338 }
339 
340 /*
341  * Free buffer's data.
342  */
343 static void free_buffer_data(struct dm_bufio_client *c,
344 			     void *data, enum data_mode data_mode)
345 {
346 	switch (data_mode) {
347 	case DATA_MODE_SLAB:
348 		kmem_cache_free(DM_BUFIO_CACHE(c), data);
349 		break;
350 
351 	case DATA_MODE_GET_FREE_PAGES:
352 		free_pages((unsigned long)data, c->pages_per_block_bits);
353 		break;
354 
355 	case DATA_MODE_VMALLOC:
356 		vfree(data);
357 		break;
358 
359 	default:
360 		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
361 		       data_mode);
362 		BUG();
363 	}
364 }
365 
366 /*
367  * Allocate buffer and its data.
368  */
369 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
370 {
371 	struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
372 				      gfp_mask);
373 
374 	if (!b)
375 		return NULL;
376 
377 	b->c = c;
378 
379 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
380 	if (!b->data) {
381 		kfree(b);
382 		return NULL;
383 	}
384 
385 	adjust_total_allocated(b->data_mode, (long)c->block_size);
386 
387 	return b;
388 }
389 
390 /*
391  * Free buffer and its data.
392  */
393 static void free_buffer(struct dm_buffer *b)
394 {
395 	struct dm_bufio_client *c = b->c;
396 
397 	adjust_total_allocated(b->data_mode, -(long)c->block_size);
398 
399 	free_buffer_data(c, b->data, b->data_mode);
400 	kfree(b);
401 }
402 
403 /*
404  * Link buffer to the hash list and clean or dirty queue.
405  */
406 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
407 {
408 	struct dm_bufio_client *c = b->c;
409 
410 	c->n_buffers[dirty]++;
411 	b->block = block;
412 	b->list_mode = dirty;
413 	list_add(&b->lru_list, &c->lru[dirty]);
414 	hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
415 	b->last_accessed = jiffies;
416 }
417 
418 /*
419  * Unlink buffer from the hash list and dirty or clean queue.
420  */
421 static void __unlink_buffer(struct dm_buffer *b)
422 {
423 	struct dm_bufio_client *c = b->c;
424 
425 	BUG_ON(!c->n_buffers[b->list_mode]);
426 
427 	c->n_buffers[b->list_mode]--;
428 	hlist_del(&b->hash_list);
429 	list_del(&b->lru_list);
430 }
431 
432 /*
433  * Place the buffer to the head of dirty or clean LRU queue.
434  */
435 static void __relink_lru(struct dm_buffer *b, int dirty)
436 {
437 	struct dm_bufio_client *c = b->c;
438 
439 	BUG_ON(!c->n_buffers[b->list_mode]);
440 
441 	c->n_buffers[b->list_mode]--;
442 	c->n_buffers[dirty]++;
443 	b->list_mode = dirty;
444 	list_del(&b->lru_list);
445 	list_add(&b->lru_list, &c->lru[dirty]);
446 }
447 
448 /*----------------------------------------------------------------
449  * Submit I/O on the buffer.
450  *
451  * Bio interface is faster but it has some problems:
452  *	the vector list is limited (increasing this limit increases
453  *	memory-consumption per buffer, so it is not viable);
454  *
455  *	the memory must be direct-mapped, not vmalloced;
456  *
457  *	the I/O driver can reject requests spuriously if it thinks that
458  *	the requests are too big for the device or if they cross a
459  *	controller-defined memory boundary.
460  *
461  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
462  * it is not vmalloced, try using the bio interface.
463  *
464  * If the buffer is big, if it is vmalloced or if the underlying device
465  * rejects the bio because it is too large, use dm-io layer to do the I/O.
466  * The dm-io layer splits the I/O into multiple requests, avoiding the above
467  * shortcomings.
468  *--------------------------------------------------------------*/
469 
470 /*
471  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
472  * that the request was handled directly with bio interface.
473  */
474 static void dmio_complete(unsigned long error, void *context)
475 {
476 	struct dm_buffer *b = context;
477 
478 	b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
479 }
480 
481 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
482 		     bio_end_io_t *end_io)
483 {
484 	int r;
485 	struct dm_io_request io_req = {
486 		.bi_rw = rw,
487 		.notify.fn = dmio_complete,
488 		.notify.context = b,
489 		.client = b->c->dm_io,
490 	};
491 	struct dm_io_region region = {
492 		.bdev = b->c->bdev,
493 		.sector = block << b->c->sectors_per_block_bits,
494 		.count = b->c->block_size >> SECTOR_SHIFT,
495 	};
496 
497 	if (b->data_mode != DATA_MODE_VMALLOC) {
498 		io_req.mem.type = DM_IO_KMEM;
499 		io_req.mem.ptr.addr = b->data;
500 	} else {
501 		io_req.mem.type = DM_IO_VMA;
502 		io_req.mem.ptr.vma = b->data;
503 	}
504 
505 	b->bio.bi_end_io = end_io;
506 
507 	r = dm_io(&io_req, 1, &region, NULL);
508 	if (r)
509 		end_io(&b->bio, r);
510 }
511 
512 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
513 			   bio_end_io_t *end_io)
514 {
515 	char *ptr;
516 	int len;
517 
518 	bio_init(&b->bio);
519 	b->bio.bi_io_vec = b->bio_vec;
520 	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
521 	b->bio.bi_sector = block << b->c->sectors_per_block_bits;
522 	b->bio.bi_bdev = b->c->bdev;
523 	b->bio.bi_end_io = end_io;
524 
525 	/*
526 	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
527 	 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
528 	 */
529 	ptr = b->data;
530 	len = b->c->block_size;
531 
532 	if (len >= PAGE_SIZE)
533 		BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
534 	else
535 		BUG_ON((unsigned long)ptr & (len - 1));
536 
537 	do {
538 		if (!bio_add_page(&b->bio, virt_to_page(ptr),
539 				  len < PAGE_SIZE ? len : PAGE_SIZE,
540 				  virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
541 			BUG_ON(b->c->block_size <= PAGE_SIZE);
542 			use_dmio(b, rw, block, end_io);
543 			return;
544 		}
545 
546 		len -= PAGE_SIZE;
547 		ptr += PAGE_SIZE;
548 	} while (len > 0);
549 
550 	submit_bio(rw, &b->bio);
551 }
552 
553 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
554 		      bio_end_io_t *end_io)
555 {
556 	if (rw == WRITE && b->c->write_callback)
557 		b->c->write_callback(b);
558 
559 	if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
560 	    b->data_mode != DATA_MODE_VMALLOC)
561 		use_inline_bio(b, rw, block, end_io);
562 	else
563 		use_dmio(b, rw, block, end_io);
564 }
565 
566 /*----------------------------------------------------------------
567  * Writing dirty buffers
568  *--------------------------------------------------------------*/
569 
570 /*
571  * The endio routine for write.
572  *
573  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
574  * it.
575  */
576 static void write_endio(struct bio *bio, int error)
577 {
578 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
579 
580 	b->write_error = error;
581 	if (unlikely(error)) {
582 		struct dm_bufio_client *c = b->c;
583 		(void)cmpxchg(&c->async_write_error, 0, error);
584 	}
585 
586 	BUG_ON(!test_bit(B_WRITING, &b->state));
587 
588 	smp_mb__before_clear_bit();
589 	clear_bit(B_WRITING, &b->state);
590 	smp_mb__after_clear_bit();
591 
592 	wake_up_bit(&b->state, B_WRITING);
593 }
594 
595 /*
596  * This function is called when wait_on_bit is actually waiting.
597  */
598 static int do_io_schedule(void *word)
599 {
600 	io_schedule();
601 
602 	return 0;
603 }
604 
605 /*
606  * Initiate a write on a dirty buffer, but don't wait for it.
607  *
608  * - If the buffer is not dirty, exit.
609  * - If there some previous write going on, wait for it to finish (we can't
610  *   have two writes on the same buffer simultaneously).
611  * - Submit our write and don't wait on it. We set B_WRITING indicating
612  *   that there is a write in progress.
613  */
614 static void __write_dirty_buffer(struct dm_buffer *b)
615 {
616 	if (!test_bit(B_DIRTY, &b->state))
617 		return;
618 
619 	clear_bit(B_DIRTY, &b->state);
620 	wait_on_bit_lock(&b->state, B_WRITING,
621 			 do_io_schedule, TASK_UNINTERRUPTIBLE);
622 
623 	submit_io(b, WRITE, b->block, write_endio);
624 }
625 
626 /*
627  * Wait until any activity on the buffer finishes.  Possibly write the
628  * buffer if it is dirty.  When this function finishes, there is no I/O
629  * running on the buffer and the buffer is not dirty.
630  */
631 static void __make_buffer_clean(struct dm_buffer *b)
632 {
633 	BUG_ON(b->hold_count);
634 
635 	if (!b->state)	/* fast case */
636 		return;
637 
638 	wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
639 	__write_dirty_buffer(b);
640 	wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
641 }
642 
643 /*
644  * Find some buffer that is not held by anybody, clean it, unlink it and
645  * return it.
646  */
647 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
648 {
649 	struct dm_buffer *b;
650 
651 	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
652 		BUG_ON(test_bit(B_WRITING, &b->state));
653 		BUG_ON(test_bit(B_DIRTY, &b->state));
654 
655 		if (!b->hold_count) {
656 			__make_buffer_clean(b);
657 			__unlink_buffer(b);
658 			return b;
659 		}
660 		dm_bufio_cond_resched();
661 	}
662 
663 	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
664 		BUG_ON(test_bit(B_READING, &b->state));
665 
666 		if (!b->hold_count) {
667 			__make_buffer_clean(b);
668 			__unlink_buffer(b);
669 			return b;
670 		}
671 		dm_bufio_cond_resched();
672 	}
673 
674 	return NULL;
675 }
676 
677 /*
678  * Wait until some other threads free some buffer or release hold count on
679  * some buffer.
680  *
681  * This function is entered with c->lock held, drops it and regains it
682  * before exiting.
683  */
684 static void __wait_for_free_buffer(struct dm_bufio_client *c)
685 {
686 	DECLARE_WAITQUEUE(wait, current);
687 
688 	add_wait_queue(&c->free_buffer_wait, &wait);
689 	set_task_state(current, TASK_UNINTERRUPTIBLE);
690 	dm_bufio_unlock(c);
691 
692 	io_schedule();
693 
694 	set_task_state(current, TASK_RUNNING);
695 	remove_wait_queue(&c->free_buffer_wait, &wait);
696 
697 	dm_bufio_lock(c);
698 }
699 
700 enum new_flag {
701 	NF_FRESH = 0,
702 	NF_READ = 1,
703 	NF_GET = 2,
704 	NF_PREFETCH = 3
705 };
706 
707 /*
708  * Allocate a new buffer. If the allocation is not possible, wait until
709  * some other thread frees a buffer.
710  *
711  * May drop the lock and regain it.
712  */
713 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
714 {
715 	struct dm_buffer *b;
716 
717 	/*
718 	 * dm-bufio is resistant to allocation failures (it just keeps
719 	 * one buffer reserved in cases all the allocations fail).
720 	 * So set flags to not try too hard:
721 	 *	GFP_NOIO: don't recurse into the I/O layer
722 	 *	__GFP_NORETRY: don't retry and rather return failure
723 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
724 	 *	__GFP_NOWARN: don't print a warning in case of failure
725 	 *
726 	 * For debugging, if we set the cache size to 1, no new buffers will
727 	 * be allocated.
728 	 */
729 	while (1) {
730 		if (dm_bufio_cache_size_latch != 1) {
731 			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
732 			if (b)
733 				return b;
734 		}
735 
736 		if (nf == NF_PREFETCH)
737 			return NULL;
738 
739 		if (!list_empty(&c->reserved_buffers)) {
740 			b = list_entry(c->reserved_buffers.next,
741 				       struct dm_buffer, lru_list);
742 			list_del(&b->lru_list);
743 			c->need_reserved_buffers++;
744 
745 			return b;
746 		}
747 
748 		b = __get_unclaimed_buffer(c);
749 		if (b)
750 			return b;
751 
752 		__wait_for_free_buffer(c);
753 	}
754 }
755 
756 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
757 {
758 	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
759 
760 	if (!b)
761 		return NULL;
762 
763 	if (c->alloc_callback)
764 		c->alloc_callback(b);
765 
766 	return b;
767 }
768 
769 /*
770  * Free a buffer and wake other threads waiting for free buffers.
771  */
772 static void __free_buffer_wake(struct dm_buffer *b)
773 {
774 	struct dm_bufio_client *c = b->c;
775 
776 	if (!c->need_reserved_buffers)
777 		free_buffer(b);
778 	else {
779 		list_add(&b->lru_list, &c->reserved_buffers);
780 		c->need_reserved_buffers--;
781 	}
782 
783 	wake_up(&c->free_buffer_wait);
784 }
785 
786 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
787 {
788 	struct dm_buffer *b, *tmp;
789 
790 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
791 		BUG_ON(test_bit(B_READING, &b->state));
792 
793 		if (!test_bit(B_DIRTY, &b->state) &&
794 		    !test_bit(B_WRITING, &b->state)) {
795 			__relink_lru(b, LIST_CLEAN);
796 			continue;
797 		}
798 
799 		if (no_wait && test_bit(B_WRITING, &b->state))
800 			return;
801 
802 		__write_dirty_buffer(b);
803 		dm_bufio_cond_resched();
804 	}
805 }
806 
807 /*
808  * Get writeback threshold and buffer limit for a given client.
809  */
810 static void __get_memory_limit(struct dm_bufio_client *c,
811 			       unsigned long *threshold_buffers,
812 			       unsigned long *limit_buffers)
813 {
814 	unsigned long buffers;
815 
816 	if (dm_bufio_cache_size != dm_bufio_cache_size_latch) {
817 		mutex_lock(&dm_bufio_clients_lock);
818 		__cache_size_refresh();
819 		mutex_unlock(&dm_bufio_clients_lock);
820 	}
821 
822 	buffers = dm_bufio_cache_size_per_client >>
823 		  (c->sectors_per_block_bits + SECTOR_SHIFT);
824 
825 	if (buffers < DM_BUFIO_MIN_BUFFERS)
826 		buffers = DM_BUFIO_MIN_BUFFERS;
827 
828 	*limit_buffers = buffers;
829 	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
830 }
831 
832 /*
833  * Check if we're over watermark.
834  * If we are over threshold_buffers, start freeing buffers.
835  * If we're over "limit_buffers", block until we get under the limit.
836  */
837 static void __check_watermark(struct dm_bufio_client *c)
838 {
839 	unsigned long threshold_buffers, limit_buffers;
840 
841 	__get_memory_limit(c, &threshold_buffers, &limit_buffers);
842 
843 	while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
844 	       limit_buffers) {
845 
846 		struct dm_buffer *b = __get_unclaimed_buffer(c);
847 
848 		if (!b)
849 			return;
850 
851 		__free_buffer_wake(b);
852 		dm_bufio_cond_resched();
853 	}
854 
855 	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
856 		__write_dirty_buffers_async(c, 1);
857 }
858 
859 /*
860  * Find a buffer in the hash.
861  */
862 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
863 {
864 	struct dm_buffer *b;
865 	struct hlist_node *hn;
866 
867 	hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
868 			     hash_list) {
869 		dm_bufio_cond_resched();
870 		if (b->block == block)
871 			return b;
872 	}
873 
874 	return NULL;
875 }
876 
877 /*----------------------------------------------------------------
878  * Getting a buffer
879  *--------------------------------------------------------------*/
880 
881 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
882 				     enum new_flag nf, int *need_submit)
883 {
884 	struct dm_buffer *b, *new_b = NULL;
885 
886 	*need_submit = 0;
887 
888 	b = __find(c, block);
889 	if (b)
890 		goto found_buffer;
891 
892 	if (nf == NF_GET)
893 		return NULL;
894 
895 	new_b = __alloc_buffer_wait(c, nf);
896 	if (!new_b)
897 		return NULL;
898 
899 	/*
900 	 * We've had a period where the mutex was unlocked, so need to
901 	 * recheck the hash table.
902 	 */
903 	b = __find(c, block);
904 	if (b) {
905 		__free_buffer_wake(new_b);
906 		goto found_buffer;
907 	}
908 
909 	__check_watermark(c);
910 
911 	b = new_b;
912 	b->hold_count = 1;
913 	b->read_error = 0;
914 	b->write_error = 0;
915 	__link_buffer(b, block, LIST_CLEAN);
916 
917 	if (nf == NF_FRESH) {
918 		b->state = 0;
919 		return b;
920 	}
921 
922 	b->state = 1 << B_READING;
923 	*need_submit = 1;
924 
925 	return b;
926 
927 found_buffer:
928 	if (nf == NF_PREFETCH)
929 		return NULL;
930 	/*
931 	 * Note: it is essential that we don't wait for the buffer to be
932 	 * read if dm_bufio_get function is used. Both dm_bufio_get and
933 	 * dm_bufio_prefetch can be used in the driver request routine.
934 	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
935 	 * the same buffer, it would deadlock if we waited.
936 	 */
937 	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
938 		return NULL;
939 
940 	b->hold_count++;
941 	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
942 		     test_bit(B_WRITING, &b->state));
943 	return b;
944 }
945 
946 /*
947  * The endio routine for reading: set the error, clear the bit and wake up
948  * anyone waiting on the buffer.
949  */
950 static void read_endio(struct bio *bio, int error)
951 {
952 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
953 
954 	b->read_error = error;
955 
956 	BUG_ON(!test_bit(B_READING, &b->state));
957 
958 	smp_mb__before_clear_bit();
959 	clear_bit(B_READING, &b->state);
960 	smp_mb__after_clear_bit();
961 
962 	wake_up_bit(&b->state, B_READING);
963 }
964 
965 /*
966  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
967  * functions is similar except that dm_bufio_new doesn't read the
968  * buffer from the disk (assuming that the caller overwrites all the data
969  * and uses dm_bufio_mark_buffer_dirty to write new data back).
970  */
971 static void *new_read(struct dm_bufio_client *c, sector_t block,
972 		      enum new_flag nf, struct dm_buffer **bp)
973 {
974 	int need_submit;
975 	struct dm_buffer *b;
976 
977 	dm_bufio_lock(c);
978 	b = __bufio_new(c, block, nf, &need_submit);
979 	dm_bufio_unlock(c);
980 
981 	if (!b)
982 		return b;
983 
984 	if (need_submit)
985 		submit_io(b, READ, b->block, read_endio);
986 
987 	wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
988 
989 	if (b->read_error) {
990 		int error = b->read_error;
991 
992 		dm_bufio_release(b);
993 
994 		return ERR_PTR(error);
995 	}
996 
997 	*bp = b;
998 
999 	return b->data;
1000 }
1001 
1002 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1003 		   struct dm_buffer **bp)
1004 {
1005 	return new_read(c, block, NF_GET, bp);
1006 }
1007 EXPORT_SYMBOL_GPL(dm_bufio_get);
1008 
1009 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1010 		    struct dm_buffer **bp)
1011 {
1012 	BUG_ON(dm_bufio_in_request());
1013 
1014 	return new_read(c, block, NF_READ, bp);
1015 }
1016 EXPORT_SYMBOL_GPL(dm_bufio_read);
1017 
1018 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1019 		   struct dm_buffer **bp)
1020 {
1021 	BUG_ON(dm_bufio_in_request());
1022 
1023 	return new_read(c, block, NF_FRESH, bp);
1024 }
1025 EXPORT_SYMBOL_GPL(dm_bufio_new);
1026 
1027 void dm_bufio_prefetch(struct dm_bufio_client *c,
1028 		       sector_t block, unsigned n_blocks)
1029 {
1030 	struct blk_plug plug;
1031 
1032 	blk_start_plug(&plug);
1033 	dm_bufio_lock(c);
1034 
1035 	for (; n_blocks--; block++) {
1036 		int need_submit;
1037 		struct dm_buffer *b;
1038 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
1039 		if (unlikely(b != NULL)) {
1040 			dm_bufio_unlock(c);
1041 
1042 			if (need_submit)
1043 				submit_io(b, READ, b->block, read_endio);
1044 			dm_bufio_release(b);
1045 
1046 			dm_bufio_cond_resched();
1047 
1048 			if (!n_blocks)
1049 				goto flush_plug;
1050 			dm_bufio_lock(c);
1051 		}
1052 
1053 	}
1054 
1055 	dm_bufio_unlock(c);
1056 
1057 flush_plug:
1058 	blk_finish_plug(&plug);
1059 }
1060 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1061 
1062 void dm_bufio_release(struct dm_buffer *b)
1063 {
1064 	struct dm_bufio_client *c = b->c;
1065 
1066 	dm_bufio_lock(c);
1067 
1068 	BUG_ON(!b->hold_count);
1069 
1070 	b->hold_count--;
1071 	if (!b->hold_count) {
1072 		wake_up(&c->free_buffer_wait);
1073 
1074 		/*
1075 		 * If there were errors on the buffer, and the buffer is not
1076 		 * to be written, free the buffer. There is no point in caching
1077 		 * invalid buffer.
1078 		 */
1079 		if ((b->read_error || b->write_error) &&
1080 		    !test_bit(B_READING, &b->state) &&
1081 		    !test_bit(B_WRITING, &b->state) &&
1082 		    !test_bit(B_DIRTY, &b->state)) {
1083 			__unlink_buffer(b);
1084 			__free_buffer_wake(b);
1085 		}
1086 	}
1087 
1088 	dm_bufio_unlock(c);
1089 }
1090 EXPORT_SYMBOL_GPL(dm_bufio_release);
1091 
1092 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1093 {
1094 	struct dm_bufio_client *c = b->c;
1095 
1096 	dm_bufio_lock(c);
1097 
1098 	BUG_ON(test_bit(B_READING, &b->state));
1099 
1100 	if (!test_and_set_bit(B_DIRTY, &b->state))
1101 		__relink_lru(b, LIST_DIRTY);
1102 
1103 	dm_bufio_unlock(c);
1104 }
1105 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1106 
1107 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1108 {
1109 	BUG_ON(dm_bufio_in_request());
1110 
1111 	dm_bufio_lock(c);
1112 	__write_dirty_buffers_async(c, 0);
1113 	dm_bufio_unlock(c);
1114 }
1115 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1116 
1117 /*
1118  * For performance, it is essential that the buffers are written asynchronously
1119  * and simultaneously (so that the block layer can merge the writes) and then
1120  * waited upon.
1121  *
1122  * Finally, we flush hardware disk cache.
1123  */
1124 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1125 {
1126 	int a, f;
1127 	unsigned long buffers_processed = 0;
1128 	struct dm_buffer *b, *tmp;
1129 
1130 	dm_bufio_lock(c);
1131 	__write_dirty_buffers_async(c, 0);
1132 
1133 again:
1134 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1135 		int dropped_lock = 0;
1136 
1137 		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1138 			buffers_processed++;
1139 
1140 		BUG_ON(test_bit(B_READING, &b->state));
1141 
1142 		if (test_bit(B_WRITING, &b->state)) {
1143 			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1144 				dropped_lock = 1;
1145 				b->hold_count++;
1146 				dm_bufio_unlock(c);
1147 				wait_on_bit(&b->state, B_WRITING,
1148 					    do_io_schedule,
1149 					    TASK_UNINTERRUPTIBLE);
1150 				dm_bufio_lock(c);
1151 				b->hold_count--;
1152 			} else
1153 				wait_on_bit(&b->state, B_WRITING,
1154 					    do_io_schedule,
1155 					    TASK_UNINTERRUPTIBLE);
1156 		}
1157 
1158 		if (!test_bit(B_DIRTY, &b->state) &&
1159 		    !test_bit(B_WRITING, &b->state))
1160 			__relink_lru(b, LIST_CLEAN);
1161 
1162 		dm_bufio_cond_resched();
1163 
1164 		/*
1165 		 * If we dropped the lock, the list is no longer consistent,
1166 		 * so we must restart the search.
1167 		 *
1168 		 * In the most common case, the buffer just processed is
1169 		 * relinked to the clean list, so we won't loop scanning the
1170 		 * same buffer again and again.
1171 		 *
1172 		 * This may livelock if there is another thread simultaneously
1173 		 * dirtying buffers, so we count the number of buffers walked
1174 		 * and if it exceeds the total number of buffers, it means that
1175 		 * someone is doing some writes simultaneously with us.  In
1176 		 * this case, stop, dropping the lock.
1177 		 */
1178 		if (dropped_lock)
1179 			goto again;
1180 	}
1181 	wake_up(&c->free_buffer_wait);
1182 	dm_bufio_unlock(c);
1183 
1184 	a = xchg(&c->async_write_error, 0);
1185 	f = dm_bufio_issue_flush(c);
1186 	if (a)
1187 		return a;
1188 
1189 	return f;
1190 }
1191 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1192 
1193 /*
1194  * Use dm-io to send and empty barrier flush the device.
1195  */
1196 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1197 {
1198 	struct dm_io_request io_req = {
1199 		.bi_rw = REQ_FLUSH,
1200 		.mem.type = DM_IO_KMEM,
1201 		.mem.ptr.addr = NULL,
1202 		.client = c->dm_io,
1203 	};
1204 	struct dm_io_region io_reg = {
1205 		.bdev = c->bdev,
1206 		.sector = 0,
1207 		.count = 0,
1208 	};
1209 
1210 	BUG_ON(dm_bufio_in_request());
1211 
1212 	return dm_io(&io_req, 1, &io_reg, NULL);
1213 }
1214 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1215 
1216 /*
1217  * We first delete any other buffer that may be at that new location.
1218  *
1219  * Then, we write the buffer to the original location if it was dirty.
1220  *
1221  * Then, if we are the only one who is holding the buffer, relink the buffer
1222  * in the hash queue for the new location.
1223  *
1224  * If there was someone else holding the buffer, we write it to the new
1225  * location but not relink it, because that other user needs to have the buffer
1226  * at the same place.
1227  */
1228 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1229 {
1230 	struct dm_bufio_client *c = b->c;
1231 	struct dm_buffer *new;
1232 
1233 	BUG_ON(dm_bufio_in_request());
1234 
1235 	dm_bufio_lock(c);
1236 
1237 retry:
1238 	new = __find(c, new_block);
1239 	if (new) {
1240 		if (new->hold_count) {
1241 			__wait_for_free_buffer(c);
1242 			goto retry;
1243 		}
1244 
1245 		/*
1246 		 * FIXME: Is there any point waiting for a write that's going
1247 		 * to be overwritten in a bit?
1248 		 */
1249 		__make_buffer_clean(new);
1250 		__unlink_buffer(new);
1251 		__free_buffer_wake(new);
1252 	}
1253 
1254 	BUG_ON(!b->hold_count);
1255 	BUG_ON(test_bit(B_READING, &b->state));
1256 
1257 	__write_dirty_buffer(b);
1258 	if (b->hold_count == 1) {
1259 		wait_on_bit(&b->state, B_WRITING,
1260 			    do_io_schedule, TASK_UNINTERRUPTIBLE);
1261 		set_bit(B_DIRTY, &b->state);
1262 		__unlink_buffer(b);
1263 		__link_buffer(b, new_block, LIST_DIRTY);
1264 	} else {
1265 		sector_t old_block;
1266 		wait_on_bit_lock(&b->state, B_WRITING,
1267 				 do_io_schedule, TASK_UNINTERRUPTIBLE);
1268 		/*
1269 		 * Relink buffer to "new_block" so that write_callback
1270 		 * sees "new_block" as a block number.
1271 		 * After the write, link the buffer back to old_block.
1272 		 * All this must be done in bufio lock, so that block number
1273 		 * change isn't visible to other threads.
1274 		 */
1275 		old_block = b->block;
1276 		__unlink_buffer(b);
1277 		__link_buffer(b, new_block, b->list_mode);
1278 		submit_io(b, WRITE, new_block, write_endio);
1279 		wait_on_bit(&b->state, B_WRITING,
1280 			    do_io_schedule, TASK_UNINTERRUPTIBLE);
1281 		__unlink_buffer(b);
1282 		__link_buffer(b, old_block, b->list_mode);
1283 	}
1284 
1285 	dm_bufio_unlock(c);
1286 	dm_bufio_release(b);
1287 }
1288 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1289 
1290 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1291 {
1292 	return c->block_size;
1293 }
1294 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1295 
1296 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1297 {
1298 	return i_size_read(c->bdev->bd_inode) >>
1299 			   (SECTOR_SHIFT + c->sectors_per_block_bits);
1300 }
1301 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1302 
1303 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1304 {
1305 	return b->block;
1306 }
1307 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1308 
1309 void *dm_bufio_get_block_data(struct dm_buffer *b)
1310 {
1311 	return b->data;
1312 }
1313 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1314 
1315 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1316 {
1317 	return b + 1;
1318 }
1319 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1320 
1321 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1322 {
1323 	return b->c;
1324 }
1325 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1326 
1327 static void drop_buffers(struct dm_bufio_client *c)
1328 {
1329 	struct dm_buffer *b;
1330 	int i;
1331 
1332 	BUG_ON(dm_bufio_in_request());
1333 
1334 	/*
1335 	 * An optimization so that the buffers are not written one-by-one.
1336 	 */
1337 	dm_bufio_write_dirty_buffers_async(c);
1338 
1339 	dm_bufio_lock(c);
1340 
1341 	while ((b = __get_unclaimed_buffer(c)))
1342 		__free_buffer_wake(b);
1343 
1344 	for (i = 0; i < LIST_SIZE; i++)
1345 		list_for_each_entry(b, &c->lru[i], lru_list)
1346 			DMERR("leaked buffer %llx, hold count %u, list %d",
1347 			      (unsigned long long)b->block, b->hold_count, i);
1348 
1349 	for (i = 0; i < LIST_SIZE; i++)
1350 		BUG_ON(!list_empty(&c->lru[i]));
1351 
1352 	dm_bufio_unlock(c);
1353 }
1354 
1355 /*
1356  * Test if the buffer is unused and too old, and commit it.
1357  * At if noio is set, we must not do any I/O because we hold
1358  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1359  * different bufio client.
1360  */
1361 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1362 				unsigned long max_jiffies)
1363 {
1364 	if (jiffies - b->last_accessed < max_jiffies)
1365 		return 1;
1366 
1367 	if (!(gfp & __GFP_IO)) {
1368 		if (test_bit(B_READING, &b->state) ||
1369 		    test_bit(B_WRITING, &b->state) ||
1370 		    test_bit(B_DIRTY, &b->state))
1371 			return 1;
1372 	}
1373 
1374 	if (b->hold_count)
1375 		return 1;
1376 
1377 	__make_buffer_clean(b);
1378 	__unlink_buffer(b);
1379 	__free_buffer_wake(b);
1380 
1381 	return 0;
1382 }
1383 
1384 static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1385 		   struct shrink_control *sc)
1386 {
1387 	int l;
1388 	struct dm_buffer *b, *tmp;
1389 
1390 	for (l = 0; l < LIST_SIZE; l++) {
1391 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
1392 			if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1393 			    !--nr_to_scan)
1394 				return;
1395 		dm_bufio_cond_resched();
1396 	}
1397 }
1398 
1399 static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1400 {
1401 	struct dm_bufio_client *c =
1402 	    container_of(shrinker, struct dm_bufio_client, shrinker);
1403 	unsigned long r;
1404 	unsigned long nr_to_scan = sc->nr_to_scan;
1405 
1406 	if (sc->gfp_mask & __GFP_IO)
1407 		dm_bufio_lock(c);
1408 	else if (!dm_bufio_trylock(c))
1409 		return !nr_to_scan ? 0 : -1;
1410 
1411 	if (nr_to_scan)
1412 		__scan(c, nr_to_scan, sc);
1413 
1414 	r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1415 	if (r > INT_MAX)
1416 		r = INT_MAX;
1417 
1418 	dm_bufio_unlock(c);
1419 
1420 	return r;
1421 }
1422 
1423 /*
1424  * Create the buffering interface
1425  */
1426 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1427 					       unsigned reserved_buffers, unsigned aux_size,
1428 					       void (*alloc_callback)(struct dm_buffer *),
1429 					       void (*write_callback)(struct dm_buffer *))
1430 {
1431 	int r;
1432 	struct dm_bufio_client *c;
1433 	unsigned i;
1434 
1435 	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1436 	       (block_size & (block_size - 1)));
1437 
1438 	c = kmalloc(sizeof(*c), GFP_KERNEL);
1439 	if (!c) {
1440 		r = -ENOMEM;
1441 		goto bad_client;
1442 	}
1443 	c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1444 	if (!c->cache_hash) {
1445 		r = -ENOMEM;
1446 		goto bad_hash;
1447 	}
1448 
1449 	c->bdev = bdev;
1450 	c->block_size = block_size;
1451 	c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1452 	c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1453 				  ffs(block_size) - 1 - PAGE_SHIFT : 0;
1454 	c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1455 				  PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1456 
1457 	c->aux_size = aux_size;
1458 	c->alloc_callback = alloc_callback;
1459 	c->write_callback = write_callback;
1460 
1461 	for (i = 0; i < LIST_SIZE; i++) {
1462 		INIT_LIST_HEAD(&c->lru[i]);
1463 		c->n_buffers[i] = 0;
1464 	}
1465 
1466 	for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1467 		INIT_HLIST_HEAD(&c->cache_hash[i]);
1468 
1469 	mutex_init(&c->lock);
1470 	INIT_LIST_HEAD(&c->reserved_buffers);
1471 	c->need_reserved_buffers = reserved_buffers;
1472 
1473 	init_waitqueue_head(&c->free_buffer_wait);
1474 	c->async_write_error = 0;
1475 
1476 	c->dm_io = dm_io_client_create();
1477 	if (IS_ERR(c->dm_io)) {
1478 		r = PTR_ERR(c->dm_io);
1479 		goto bad_dm_io;
1480 	}
1481 
1482 	mutex_lock(&dm_bufio_clients_lock);
1483 	if (c->blocks_per_page_bits) {
1484 		if (!DM_BUFIO_CACHE_NAME(c)) {
1485 			DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1486 			if (!DM_BUFIO_CACHE_NAME(c)) {
1487 				r = -ENOMEM;
1488 				mutex_unlock(&dm_bufio_clients_lock);
1489 				goto bad_cache;
1490 			}
1491 		}
1492 
1493 		if (!DM_BUFIO_CACHE(c)) {
1494 			DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1495 							      c->block_size,
1496 							      c->block_size, 0, NULL);
1497 			if (!DM_BUFIO_CACHE(c)) {
1498 				r = -ENOMEM;
1499 				mutex_unlock(&dm_bufio_clients_lock);
1500 				goto bad_cache;
1501 			}
1502 		}
1503 	}
1504 	mutex_unlock(&dm_bufio_clients_lock);
1505 
1506 	while (c->need_reserved_buffers) {
1507 		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1508 
1509 		if (!b) {
1510 			r = -ENOMEM;
1511 			goto bad_buffer;
1512 		}
1513 		__free_buffer_wake(b);
1514 	}
1515 
1516 	mutex_lock(&dm_bufio_clients_lock);
1517 	dm_bufio_client_count++;
1518 	list_add(&c->client_list, &dm_bufio_all_clients);
1519 	__cache_size_refresh();
1520 	mutex_unlock(&dm_bufio_clients_lock);
1521 
1522 	c->shrinker.shrink = shrink;
1523 	c->shrinker.seeks = 1;
1524 	c->shrinker.batch = 0;
1525 	register_shrinker(&c->shrinker);
1526 
1527 	return c;
1528 
1529 bad_buffer:
1530 bad_cache:
1531 	while (!list_empty(&c->reserved_buffers)) {
1532 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1533 						 struct dm_buffer, lru_list);
1534 		list_del(&b->lru_list);
1535 		free_buffer(b);
1536 	}
1537 	dm_io_client_destroy(c->dm_io);
1538 bad_dm_io:
1539 	vfree(c->cache_hash);
1540 bad_hash:
1541 	kfree(c);
1542 bad_client:
1543 	return ERR_PTR(r);
1544 }
1545 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1546 
1547 /*
1548  * Free the buffering interface.
1549  * It is required that there are no references on any buffers.
1550  */
1551 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1552 {
1553 	unsigned i;
1554 
1555 	drop_buffers(c);
1556 
1557 	unregister_shrinker(&c->shrinker);
1558 
1559 	mutex_lock(&dm_bufio_clients_lock);
1560 
1561 	list_del(&c->client_list);
1562 	dm_bufio_client_count--;
1563 	__cache_size_refresh();
1564 
1565 	mutex_unlock(&dm_bufio_clients_lock);
1566 
1567 	for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1568 		BUG_ON(!hlist_empty(&c->cache_hash[i]));
1569 
1570 	BUG_ON(c->need_reserved_buffers);
1571 
1572 	while (!list_empty(&c->reserved_buffers)) {
1573 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1574 						 struct dm_buffer, lru_list);
1575 		list_del(&b->lru_list);
1576 		free_buffer(b);
1577 	}
1578 
1579 	for (i = 0; i < LIST_SIZE; i++)
1580 		if (c->n_buffers[i])
1581 			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1582 
1583 	for (i = 0; i < LIST_SIZE; i++)
1584 		BUG_ON(c->n_buffers[i]);
1585 
1586 	dm_io_client_destroy(c->dm_io);
1587 	vfree(c->cache_hash);
1588 	kfree(c);
1589 }
1590 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1591 
1592 static void cleanup_old_buffers(void)
1593 {
1594 	unsigned long max_age = dm_bufio_max_age;
1595 	struct dm_bufio_client *c;
1596 
1597 	barrier();
1598 
1599 	if (max_age > ULONG_MAX / HZ)
1600 		max_age = ULONG_MAX / HZ;
1601 
1602 	mutex_lock(&dm_bufio_clients_lock);
1603 	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1604 		if (!dm_bufio_trylock(c))
1605 			continue;
1606 
1607 		while (!list_empty(&c->lru[LIST_CLEAN])) {
1608 			struct dm_buffer *b;
1609 			b = list_entry(c->lru[LIST_CLEAN].prev,
1610 				       struct dm_buffer, lru_list);
1611 			if (__cleanup_old_buffer(b, 0, max_age * HZ))
1612 				break;
1613 			dm_bufio_cond_resched();
1614 		}
1615 
1616 		dm_bufio_unlock(c);
1617 		dm_bufio_cond_resched();
1618 	}
1619 	mutex_unlock(&dm_bufio_clients_lock);
1620 }
1621 
1622 static struct workqueue_struct *dm_bufio_wq;
1623 static struct delayed_work dm_bufio_work;
1624 
1625 static void work_fn(struct work_struct *w)
1626 {
1627 	cleanup_old_buffers();
1628 
1629 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1630 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1631 }
1632 
1633 /*----------------------------------------------------------------
1634  * Module setup
1635  *--------------------------------------------------------------*/
1636 
1637 /*
1638  * This is called only once for the whole dm_bufio module.
1639  * It initializes memory limit.
1640  */
1641 static int __init dm_bufio_init(void)
1642 {
1643 	__u64 mem;
1644 
1645 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1646 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1647 
1648 	mem = (__u64)((totalram_pages - totalhigh_pages) *
1649 		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1650 
1651 	if (mem > ULONG_MAX)
1652 		mem = ULONG_MAX;
1653 
1654 #ifdef CONFIG_MMU
1655 	/*
1656 	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1657 	 * in fs/proc/internal.h
1658 	 */
1659 	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1660 		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1661 #endif
1662 
1663 	dm_bufio_default_cache_size = mem;
1664 
1665 	mutex_lock(&dm_bufio_clients_lock);
1666 	__cache_size_refresh();
1667 	mutex_unlock(&dm_bufio_clients_lock);
1668 
1669 	dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1670 	if (!dm_bufio_wq)
1671 		return -ENOMEM;
1672 
1673 	INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1674 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1675 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1676 
1677 	return 0;
1678 }
1679 
1680 /*
1681  * This is called once when unloading the dm_bufio module.
1682  */
1683 static void __exit dm_bufio_exit(void)
1684 {
1685 	int bug = 0;
1686 	int i;
1687 
1688 	cancel_delayed_work_sync(&dm_bufio_work);
1689 	destroy_workqueue(dm_bufio_wq);
1690 
1691 	for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1692 		struct kmem_cache *kc = dm_bufio_caches[i];
1693 
1694 		if (kc)
1695 			kmem_cache_destroy(kc);
1696 	}
1697 
1698 	for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1699 		kfree(dm_bufio_cache_names[i]);
1700 
1701 	if (dm_bufio_client_count) {
1702 		DMCRIT("%s: dm_bufio_client_count leaked: %d",
1703 			__func__, dm_bufio_client_count);
1704 		bug = 1;
1705 	}
1706 
1707 	if (dm_bufio_current_allocated) {
1708 		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1709 			__func__, dm_bufio_current_allocated);
1710 		bug = 1;
1711 	}
1712 
1713 	if (dm_bufio_allocated_get_free_pages) {
1714 		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1715 		       __func__, dm_bufio_allocated_get_free_pages);
1716 		bug = 1;
1717 	}
1718 
1719 	if (dm_bufio_allocated_vmalloc) {
1720 		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1721 		       __func__, dm_bufio_allocated_vmalloc);
1722 		bug = 1;
1723 	}
1724 
1725 	if (bug)
1726 		BUG();
1727 }
1728 
1729 module_init(dm_bufio_init)
1730 module_exit(dm_bufio_exit)
1731 
1732 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1733 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1734 
1735 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1736 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1737 
1738 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1739 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1740 
1741 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1742 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1743 
1744 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1745 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1746 
1747 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1748 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1749 
1750 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1751 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1752 
1753 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1754 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1755 MODULE_LICENSE("GPL");
1756