xref: /openbmc/linux/drivers/md/dm-io.c (revision 92b19ff5)
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/device-mapper.h>
11 
12 #include <linux/bio.h>
13 #include <linux/completion.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dm-io.h>
19 
20 #define DM_MSG_PREFIX "io"
21 
22 #define DM_IO_MAX_REGIONS	BITS_PER_LONG
23 
24 struct dm_io_client {
25 	mempool_t *pool;
26 	struct bio_set *bios;
27 };
28 
29 /*
30  * Aligning 'struct io' reduces the number of bits required to store
31  * its address.  Refer to store_io_and_region_in_bio() below.
32  */
33 struct io {
34 	unsigned long error_bits;
35 	atomic_t count;
36 	struct dm_io_client *client;
37 	io_notify_fn callback;
38 	void *context;
39 	void *vma_invalidate_address;
40 	unsigned long vma_invalidate_size;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
42 
43 static struct kmem_cache *_dm_io_cache;
44 
45 /*
46  * Create a client with mempool and bioset.
47  */
48 struct dm_io_client *dm_io_client_create(void)
49 {
50 	struct dm_io_client *client;
51 	unsigned min_ios = dm_get_reserved_bio_based_ios();
52 
53 	client = kmalloc(sizeof(*client), GFP_KERNEL);
54 	if (!client)
55 		return ERR_PTR(-ENOMEM);
56 
57 	client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
58 	if (!client->pool)
59 		goto bad;
60 
61 	client->bios = bioset_create(min_ios, 0);
62 	if (!client->bios)
63 		goto bad;
64 
65 	return client;
66 
67    bad:
68 	if (client->pool)
69 		mempool_destroy(client->pool);
70 	kfree(client);
71 	return ERR_PTR(-ENOMEM);
72 }
73 EXPORT_SYMBOL(dm_io_client_create);
74 
75 void dm_io_client_destroy(struct dm_io_client *client)
76 {
77 	mempool_destroy(client->pool);
78 	bioset_free(client->bios);
79 	kfree(client);
80 }
81 EXPORT_SYMBOL(dm_io_client_destroy);
82 
83 /*-----------------------------------------------------------------
84  * We need to keep track of which region a bio is doing io for.
85  * To avoid a memory allocation to store just 5 or 6 bits, we
86  * ensure the 'struct io' pointer is aligned so enough low bits are
87  * always zero and then combine it with the region number directly in
88  * bi_private.
89  *---------------------------------------------------------------*/
90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 				       unsigned region)
92 {
93 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 		DMCRIT("Unaligned struct io pointer %p", io);
95 		BUG();
96 	}
97 
98 	bio->bi_private = (void *)((unsigned long)io | region);
99 }
100 
101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 				       unsigned *region)
103 {
104 	unsigned long val = (unsigned long)bio->bi_private;
105 
106 	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 	*region = val & (DM_IO_MAX_REGIONS - 1);
108 }
109 
110 /*-----------------------------------------------------------------
111  * We need an io object to keep track of the number of bios that
112  * have been dispatched for a particular io.
113  *---------------------------------------------------------------*/
114 static void complete_io(struct io *io)
115 {
116 	unsigned long error_bits = io->error_bits;
117 	io_notify_fn fn = io->callback;
118 	void *context = io->context;
119 
120 	if (io->vma_invalidate_size)
121 		invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 					     io->vma_invalidate_size);
123 
124 	mempool_free(io, io->client->pool);
125 	fn(error_bits, context);
126 }
127 
128 static void dec_count(struct io *io, unsigned int region, int error)
129 {
130 	if (error)
131 		set_bit(region, &io->error_bits);
132 
133 	if (atomic_dec_and_test(&io->count))
134 		complete_io(io);
135 }
136 
137 static void endio(struct bio *bio, int error)
138 {
139 	struct io *io;
140 	unsigned region;
141 
142 	if (error && bio_data_dir(bio) == READ)
143 		zero_fill_bio(bio);
144 
145 	/*
146 	 * The bio destructor in bio_put() may use the io object.
147 	 */
148 	retrieve_io_and_region_from_bio(bio, &io, &region);
149 
150 	bio_put(bio);
151 
152 	dec_count(io, region, error);
153 }
154 
155 /*-----------------------------------------------------------------
156  * These little objects provide an abstraction for getting a new
157  * destination page for io.
158  *---------------------------------------------------------------*/
159 struct dpages {
160 	void (*get_page)(struct dpages *dp,
161 			 struct page **p, unsigned long *len, unsigned *offset);
162 	void (*next_page)(struct dpages *dp);
163 
164 	unsigned context_u;
165 	void *context_ptr;
166 
167 	void *vma_invalidate_address;
168 	unsigned long vma_invalidate_size;
169 };
170 
171 /*
172  * Functions for getting the pages from a list.
173  */
174 static void list_get_page(struct dpages *dp,
175 		  struct page **p, unsigned long *len, unsigned *offset)
176 {
177 	unsigned o = dp->context_u;
178 	struct page_list *pl = (struct page_list *) dp->context_ptr;
179 
180 	*p = pl->page;
181 	*len = PAGE_SIZE - o;
182 	*offset = o;
183 }
184 
185 static void list_next_page(struct dpages *dp)
186 {
187 	struct page_list *pl = (struct page_list *) dp->context_ptr;
188 	dp->context_ptr = pl->next;
189 	dp->context_u = 0;
190 }
191 
192 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
193 {
194 	dp->get_page = list_get_page;
195 	dp->next_page = list_next_page;
196 	dp->context_u = offset;
197 	dp->context_ptr = pl;
198 }
199 
200 /*
201  * Functions for getting the pages from a bvec.
202  */
203 static void bio_get_page(struct dpages *dp, struct page **p,
204 			 unsigned long *len, unsigned *offset)
205 {
206 	struct bio_vec *bvec = dp->context_ptr;
207 	*p = bvec->bv_page;
208 	*len = bvec->bv_len - dp->context_u;
209 	*offset = bvec->bv_offset + dp->context_u;
210 }
211 
212 static void bio_next_page(struct dpages *dp)
213 {
214 	struct bio_vec *bvec = dp->context_ptr;
215 	dp->context_ptr = bvec + 1;
216 	dp->context_u = 0;
217 }
218 
219 static void bio_dp_init(struct dpages *dp, struct bio *bio)
220 {
221 	dp->get_page = bio_get_page;
222 	dp->next_page = bio_next_page;
223 	dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
224 	dp->context_u = bio->bi_iter.bi_bvec_done;
225 }
226 
227 /*
228  * Functions for getting the pages from a VMA.
229  */
230 static void vm_get_page(struct dpages *dp,
231 		 struct page **p, unsigned long *len, unsigned *offset)
232 {
233 	*p = vmalloc_to_page(dp->context_ptr);
234 	*offset = dp->context_u;
235 	*len = PAGE_SIZE - dp->context_u;
236 }
237 
238 static void vm_next_page(struct dpages *dp)
239 {
240 	dp->context_ptr += PAGE_SIZE - dp->context_u;
241 	dp->context_u = 0;
242 }
243 
244 static void vm_dp_init(struct dpages *dp, void *data)
245 {
246 	dp->get_page = vm_get_page;
247 	dp->next_page = vm_next_page;
248 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249 	dp->context_ptr = data;
250 }
251 
252 /*
253  * Functions for getting the pages from kernel memory.
254  */
255 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 			unsigned *offset)
257 {
258 	*p = virt_to_page(dp->context_ptr);
259 	*offset = dp->context_u;
260 	*len = PAGE_SIZE - dp->context_u;
261 }
262 
263 static void km_next_page(struct dpages *dp)
264 {
265 	dp->context_ptr += PAGE_SIZE - dp->context_u;
266 	dp->context_u = 0;
267 }
268 
269 static void km_dp_init(struct dpages *dp, void *data)
270 {
271 	dp->get_page = km_get_page;
272 	dp->next_page = km_next_page;
273 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 	dp->context_ptr = data;
275 }
276 
277 /*-----------------------------------------------------------------
278  * IO routines that accept a list of pages.
279  *---------------------------------------------------------------*/
280 static void do_region(int rw, unsigned region, struct dm_io_region *where,
281 		      struct dpages *dp, struct io *io)
282 {
283 	struct bio *bio;
284 	struct page *page;
285 	unsigned long len;
286 	unsigned offset;
287 	unsigned num_bvecs;
288 	sector_t remaining = where->count;
289 	struct request_queue *q = bdev_get_queue(where->bdev);
290 	unsigned short logical_block_size = queue_logical_block_size(q);
291 	sector_t num_sectors;
292 	unsigned int uninitialized_var(special_cmd_max_sectors);
293 
294 	/*
295 	 * Reject unsupported discard and write same requests.
296 	 */
297 	if (rw & REQ_DISCARD)
298 		special_cmd_max_sectors = q->limits.max_discard_sectors;
299 	else if (rw & REQ_WRITE_SAME)
300 		special_cmd_max_sectors = q->limits.max_write_same_sectors;
301 	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
302 		dec_count(io, region, -EOPNOTSUPP);
303 		return;
304 	}
305 
306 	/*
307 	 * where->count may be zero if rw holds a flush and we need to
308 	 * send a zero-sized flush.
309 	 */
310 	do {
311 		/*
312 		 * Allocate a suitably sized-bio.
313 		 */
314 		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
315 			num_bvecs = 1;
316 		else
317 			num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
318 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
319 
320 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
321 		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
322 		bio->bi_bdev = where->bdev;
323 		bio->bi_end_io = endio;
324 		store_io_and_region_in_bio(bio, io, region);
325 
326 		if (rw & REQ_DISCARD) {
327 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
328 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
329 			remaining -= num_sectors;
330 		} else if (rw & REQ_WRITE_SAME) {
331 			/*
332 			 * WRITE SAME only uses a single page.
333 			 */
334 			dp->get_page(dp, &page, &len, &offset);
335 			bio_add_page(bio, page, logical_block_size, offset);
336 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
337 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
338 
339 			offset = 0;
340 			remaining -= num_sectors;
341 			dp->next_page(dp);
342 		} else while (remaining) {
343 			/*
344 			 * Try and add as many pages as possible.
345 			 */
346 			dp->get_page(dp, &page, &len, &offset);
347 			len = min(len, to_bytes(remaining));
348 			if (!bio_add_page(bio, page, len, offset))
349 				break;
350 
351 			offset = 0;
352 			remaining -= to_sector(len);
353 			dp->next_page(dp);
354 		}
355 
356 		atomic_inc(&io->count);
357 		submit_bio(rw, bio);
358 	} while (remaining);
359 }
360 
361 static void dispatch_io(int rw, unsigned int num_regions,
362 			struct dm_io_region *where, struct dpages *dp,
363 			struct io *io, int sync)
364 {
365 	int i;
366 	struct dpages old_pages = *dp;
367 
368 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
369 
370 	if (sync)
371 		rw |= REQ_SYNC;
372 
373 	/*
374 	 * For multiple regions we need to be careful to rewind
375 	 * the dp object for each call to do_region.
376 	 */
377 	for (i = 0; i < num_regions; i++) {
378 		*dp = old_pages;
379 		if (where[i].count || (rw & REQ_FLUSH))
380 			do_region(rw, i, where + i, dp, io);
381 	}
382 
383 	/*
384 	 * Drop the extra reference that we were holding to avoid
385 	 * the io being completed too early.
386 	 */
387 	dec_count(io, 0, 0);
388 }
389 
390 struct sync_io {
391 	unsigned long error_bits;
392 	struct completion wait;
393 };
394 
395 static void sync_io_complete(unsigned long error, void *context)
396 {
397 	struct sync_io *sio = context;
398 
399 	sio->error_bits = error;
400 	complete(&sio->wait);
401 }
402 
403 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
404 		   struct dm_io_region *where, int rw, struct dpages *dp,
405 		   unsigned long *error_bits)
406 {
407 	struct io *io;
408 	struct sync_io sio;
409 
410 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
411 		WARN_ON(1);
412 		return -EIO;
413 	}
414 
415 	init_completion(&sio.wait);
416 
417 	io = mempool_alloc(client->pool, GFP_NOIO);
418 	io->error_bits = 0;
419 	atomic_set(&io->count, 1); /* see dispatch_io() */
420 	io->client = client;
421 	io->callback = sync_io_complete;
422 	io->context = &sio;
423 
424 	io->vma_invalidate_address = dp->vma_invalidate_address;
425 	io->vma_invalidate_size = dp->vma_invalidate_size;
426 
427 	dispatch_io(rw, num_regions, where, dp, io, 1);
428 
429 	wait_for_completion_io(&sio.wait);
430 
431 	if (error_bits)
432 		*error_bits = sio.error_bits;
433 
434 	return sio.error_bits ? -EIO : 0;
435 }
436 
437 static int async_io(struct dm_io_client *client, unsigned int num_regions,
438 		    struct dm_io_region *where, int rw, struct dpages *dp,
439 		    io_notify_fn fn, void *context)
440 {
441 	struct io *io;
442 
443 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
444 		WARN_ON(1);
445 		fn(1, context);
446 		return -EIO;
447 	}
448 
449 	io = mempool_alloc(client->pool, GFP_NOIO);
450 	io->error_bits = 0;
451 	atomic_set(&io->count, 1); /* see dispatch_io() */
452 	io->client = client;
453 	io->callback = fn;
454 	io->context = context;
455 
456 	io->vma_invalidate_address = dp->vma_invalidate_address;
457 	io->vma_invalidate_size = dp->vma_invalidate_size;
458 
459 	dispatch_io(rw, num_regions, where, dp, io, 0);
460 	return 0;
461 }
462 
463 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
464 		   unsigned long size)
465 {
466 	/* Set up dpages based on memory type */
467 
468 	dp->vma_invalidate_address = NULL;
469 	dp->vma_invalidate_size = 0;
470 
471 	switch (io_req->mem.type) {
472 	case DM_IO_PAGE_LIST:
473 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
474 		break;
475 
476 	case DM_IO_BIO:
477 		bio_dp_init(dp, io_req->mem.ptr.bio);
478 		break;
479 
480 	case DM_IO_VMA:
481 		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
482 		if ((io_req->bi_rw & RW_MASK) == READ) {
483 			dp->vma_invalidate_address = io_req->mem.ptr.vma;
484 			dp->vma_invalidate_size = size;
485 		}
486 		vm_dp_init(dp, io_req->mem.ptr.vma);
487 		break;
488 
489 	case DM_IO_KMEM:
490 		km_dp_init(dp, io_req->mem.ptr.addr);
491 		break;
492 
493 	default:
494 		return -EINVAL;
495 	}
496 
497 	return 0;
498 }
499 
500 /*
501  * New collapsed (a)synchronous interface.
502  *
503  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
504  * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
505  * If you fail to do one of these, the IO will be submitted to the disk after
506  * q->unplug_delay, which defaults to 3ms in blk-settings.c.
507  */
508 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
509 	  struct dm_io_region *where, unsigned long *sync_error_bits)
510 {
511 	int r;
512 	struct dpages dp;
513 
514 	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
515 	if (r)
516 		return r;
517 
518 	if (!io_req->notify.fn)
519 		return sync_io(io_req->client, num_regions, where,
520 			       io_req->bi_rw, &dp, sync_error_bits);
521 
522 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
523 			&dp, io_req->notify.fn, io_req->notify.context);
524 }
525 EXPORT_SYMBOL(dm_io);
526 
527 int __init dm_io_init(void)
528 {
529 	_dm_io_cache = KMEM_CACHE(io, 0);
530 	if (!_dm_io_cache)
531 		return -ENOMEM;
532 
533 	return 0;
534 }
535 
536 void dm_io_exit(void)
537 {
538 	kmem_cache_destroy(_dm_io_cache);
539 	_dm_io_cache = NULL;
540 }
541