xref: /openbmc/linux/drivers/md/dm-io.c (revision c7cbb022)
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/device-mapper.h>
11 
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
18 
19 #define DM_MSG_PREFIX "io"
20 
21 #define DM_IO_MAX_REGIONS	BITS_PER_LONG
22 #define MIN_IOS		16
23 #define MIN_BIOS	16
24 
25 struct dm_io_client {
26 	mempool_t *pool;
27 	struct bio_set *bios;
28 };
29 
30 /*
31  * Aligning 'struct io' reduces the number of bits required to store
32  * its address.  Refer to store_io_and_region_in_bio() below.
33  */
34 struct io {
35 	unsigned long error_bits;
36 	atomic_t count;
37 	struct task_struct *sleeper;
38 	struct dm_io_client *client;
39 	io_notify_fn callback;
40 	void *context;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
42 
43 static struct kmem_cache *_dm_io_cache;
44 
45 /*
46  * Create a client with mempool and bioset.
47  */
48 struct dm_io_client *dm_io_client_create(void)
49 {
50 	struct dm_io_client *client;
51 
52 	client = kmalloc(sizeof(*client), GFP_KERNEL);
53 	if (!client)
54 		return ERR_PTR(-ENOMEM);
55 
56 	client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
57 	if (!client->pool)
58 		goto bad;
59 
60 	client->bios = bioset_create(MIN_BIOS, 0);
61 	if (!client->bios)
62 		goto bad;
63 
64 	return client;
65 
66    bad:
67 	if (client->pool)
68 		mempool_destroy(client->pool);
69 	kfree(client);
70 	return ERR_PTR(-ENOMEM);
71 }
72 EXPORT_SYMBOL(dm_io_client_create);
73 
74 void dm_io_client_destroy(struct dm_io_client *client)
75 {
76 	mempool_destroy(client->pool);
77 	bioset_free(client->bios);
78 	kfree(client);
79 }
80 EXPORT_SYMBOL(dm_io_client_destroy);
81 
82 /*-----------------------------------------------------------------
83  * We need to keep track of which region a bio is doing io for.
84  * To avoid a memory allocation to store just 5 or 6 bits, we
85  * ensure the 'struct io' pointer is aligned so enough low bits are
86  * always zero and then combine it with the region number directly in
87  * bi_private.
88  *---------------------------------------------------------------*/
89 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 				       unsigned region)
91 {
92 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 		DMCRIT("Unaligned struct io pointer %p", io);
94 		BUG();
95 	}
96 
97 	bio->bi_private = (void *)((unsigned long)io | region);
98 }
99 
100 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 				       unsigned *region)
102 {
103 	unsigned long val = (unsigned long)bio->bi_private;
104 
105 	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 	*region = val & (DM_IO_MAX_REGIONS - 1);
107 }
108 
109 /*-----------------------------------------------------------------
110  * We need an io object to keep track of the number of bios that
111  * have been dispatched for a particular io.
112  *---------------------------------------------------------------*/
113 static void dec_count(struct io *io, unsigned int region, int error)
114 {
115 	if (error)
116 		set_bit(region, &io->error_bits);
117 
118 	if (atomic_dec_and_test(&io->count)) {
119 		if (io->sleeper)
120 			wake_up_process(io->sleeper);
121 
122 		else {
123 			unsigned long r = io->error_bits;
124 			io_notify_fn fn = io->callback;
125 			void *context = io->context;
126 
127 			mempool_free(io, io->client->pool);
128 			fn(r, context);
129 		}
130 	}
131 }
132 
133 static void endio(struct bio *bio, int error)
134 {
135 	struct io *io;
136 	unsigned region;
137 
138 	if (error && bio_data_dir(bio) == READ)
139 		zero_fill_bio(bio);
140 
141 	/*
142 	 * The bio destructor in bio_put() may use the io object.
143 	 */
144 	retrieve_io_and_region_from_bio(bio, &io, &region);
145 
146 	bio_put(bio);
147 
148 	dec_count(io, region, error);
149 }
150 
151 /*-----------------------------------------------------------------
152  * These little objects provide an abstraction for getting a new
153  * destination page for io.
154  *---------------------------------------------------------------*/
155 struct dpages {
156 	void (*get_page)(struct dpages *dp,
157 			 struct page **p, unsigned long *len, unsigned *offset);
158 	void (*next_page)(struct dpages *dp);
159 
160 	unsigned context_u;
161 	void *context_ptr;
162 };
163 
164 /*
165  * Functions for getting the pages from a list.
166  */
167 static void list_get_page(struct dpages *dp,
168 		  struct page **p, unsigned long *len, unsigned *offset)
169 {
170 	unsigned o = dp->context_u;
171 	struct page_list *pl = (struct page_list *) dp->context_ptr;
172 
173 	*p = pl->page;
174 	*len = PAGE_SIZE - o;
175 	*offset = o;
176 }
177 
178 static void list_next_page(struct dpages *dp)
179 {
180 	struct page_list *pl = (struct page_list *) dp->context_ptr;
181 	dp->context_ptr = pl->next;
182 	dp->context_u = 0;
183 }
184 
185 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
186 {
187 	dp->get_page = list_get_page;
188 	dp->next_page = list_next_page;
189 	dp->context_u = offset;
190 	dp->context_ptr = pl;
191 }
192 
193 /*
194  * Functions for getting the pages from a bvec.
195  */
196 static void bvec_get_page(struct dpages *dp,
197 		  struct page **p, unsigned long *len, unsigned *offset)
198 {
199 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
200 	*p = bvec->bv_page;
201 	*len = bvec->bv_len;
202 	*offset = bvec->bv_offset;
203 }
204 
205 static void bvec_next_page(struct dpages *dp)
206 {
207 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
208 	dp->context_ptr = bvec + 1;
209 }
210 
211 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
212 {
213 	dp->get_page = bvec_get_page;
214 	dp->next_page = bvec_next_page;
215 	dp->context_ptr = bvec;
216 }
217 
218 /*
219  * Functions for getting the pages from a VMA.
220  */
221 static void vm_get_page(struct dpages *dp,
222 		 struct page **p, unsigned long *len, unsigned *offset)
223 {
224 	*p = vmalloc_to_page(dp->context_ptr);
225 	*offset = dp->context_u;
226 	*len = PAGE_SIZE - dp->context_u;
227 }
228 
229 static void vm_next_page(struct dpages *dp)
230 {
231 	dp->context_ptr += PAGE_SIZE - dp->context_u;
232 	dp->context_u = 0;
233 }
234 
235 static void vm_dp_init(struct dpages *dp, void *data)
236 {
237 	dp->get_page = vm_get_page;
238 	dp->next_page = vm_next_page;
239 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
240 	dp->context_ptr = data;
241 }
242 
243 static void dm_bio_destructor(struct bio *bio)
244 {
245 	unsigned region;
246 	struct io *io;
247 
248 	retrieve_io_and_region_from_bio(bio, &io, &region);
249 
250 	bio_free(bio, io->client->bios);
251 }
252 
253 /*
254  * Functions for getting the pages from kernel memory.
255  */
256 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
257 			unsigned *offset)
258 {
259 	*p = virt_to_page(dp->context_ptr);
260 	*offset = dp->context_u;
261 	*len = PAGE_SIZE - dp->context_u;
262 }
263 
264 static void km_next_page(struct dpages *dp)
265 {
266 	dp->context_ptr += PAGE_SIZE - dp->context_u;
267 	dp->context_u = 0;
268 }
269 
270 static void km_dp_init(struct dpages *dp, void *data)
271 {
272 	dp->get_page = km_get_page;
273 	dp->next_page = km_next_page;
274 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
275 	dp->context_ptr = data;
276 }
277 
278 /*-----------------------------------------------------------------
279  * IO routines that accept a list of pages.
280  *---------------------------------------------------------------*/
281 static void do_region(int rw, unsigned region, struct dm_io_region *where,
282 		      struct dpages *dp, struct io *io)
283 {
284 	struct bio *bio;
285 	struct page *page;
286 	unsigned long len;
287 	unsigned offset;
288 	unsigned num_bvecs;
289 	sector_t remaining = where->count;
290 
291 	/*
292 	 * where->count may be zero if rw holds a flush and we need to
293 	 * send a zero-sized flush.
294 	 */
295 	do {
296 		/*
297 		 * Allocate a suitably sized-bio.
298 		 */
299 		num_bvecs = dm_sector_div_up(remaining,
300 					     (PAGE_SIZE >> SECTOR_SHIFT));
301 		num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
302 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
303 		bio->bi_sector = where->sector + (where->count - remaining);
304 		bio->bi_bdev = where->bdev;
305 		bio->bi_end_io = endio;
306 		bio->bi_destructor = dm_bio_destructor;
307 		store_io_and_region_in_bio(bio, io, region);
308 
309 		/*
310 		 * Try and add as many pages as possible.
311 		 */
312 		while (remaining) {
313 			dp->get_page(dp, &page, &len, &offset);
314 			len = min(len, to_bytes(remaining));
315 			if (!bio_add_page(bio, page, len, offset))
316 				break;
317 
318 			offset = 0;
319 			remaining -= to_sector(len);
320 			dp->next_page(dp);
321 		}
322 
323 		atomic_inc(&io->count);
324 		submit_bio(rw, bio);
325 	} while (remaining);
326 }
327 
328 static void dispatch_io(int rw, unsigned int num_regions,
329 			struct dm_io_region *where, struct dpages *dp,
330 			struct io *io, int sync)
331 {
332 	int i;
333 	struct dpages old_pages = *dp;
334 
335 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
336 
337 	if (sync)
338 		rw |= REQ_SYNC;
339 
340 	/*
341 	 * For multiple regions we need to be careful to rewind
342 	 * the dp object for each call to do_region.
343 	 */
344 	for (i = 0; i < num_regions; i++) {
345 		*dp = old_pages;
346 		if (where[i].count || (rw & REQ_FLUSH))
347 			do_region(rw, i, where + i, dp, io);
348 	}
349 
350 	/*
351 	 * Drop the extra reference that we were holding to avoid
352 	 * the io being completed too early.
353 	 */
354 	dec_count(io, 0, 0);
355 }
356 
357 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
358 		   struct dm_io_region *where, int rw, struct dpages *dp,
359 		   unsigned long *error_bits)
360 {
361 	/*
362 	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
363 	 * align it on our own.
364 	 * volatile prevents the optimizer from removing or reusing
365 	 * "io_" field from the stack frame (allowed in ANSI C).
366 	 */
367 	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
368 	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
369 
370 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
371 		WARN_ON(1);
372 		return -EIO;
373 	}
374 
375 	io->error_bits = 0;
376 	atomic_set(&io->count, 1); /* see dispatch_io() */
377 	io->sleeper = current;
378 	io->client = client;
379 
380 	dispatch_io(rw, num_regions, where, dp, io, 1);
381 
382 	while (1) {
383 		set_current_state(TASK_UNINTERRUPTIBLE);
384 
385 		if (!atomic_read(&io->count))
386 			break;
387 
388 		io_schedule();
389 	}
390 	set_current_state(TASK_RUNNING);
391 
392 	if (error_bits)
393 		*error_bits = io->error_bits;
394 
395 	return io->error_bits ? -EIO : 0;
396 }
397 
398 static int async_io(struct dm_io_client *client, unsigned int num_regions,
399 		    struct dm_io_region *where, int rw, struct dpages *dp,
400 		    io_notify_fn fn, void *context)
401 {
402 	struct io *io;
403 
404 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
405 		WARN_ON(1);
406 		fn(1, context);
407 		return -EIO;
408 	}
409 
410 	io = mempool_alloc(client->pool, GFP_NOIO);
411 	io->error_bits = 0;
412 	atomic_set(&io->count, 1); /* see dispatch_io() */
413 	io->sleeper = NULL;
414 	io->client = client;
415 	io->callback = fn;
416 	io->context = context;
417 
418 	dispatch_io(rw, num_regions, where, dp, io, 0);
419 	return 0;
420 }
421 
422 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
423 {
424 	/* Set up dpages based on memory type */
425 	switch (io_req->mem.type) {
426 	case DM_IO_PAGE_LIST:
427 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
428 		break;
429 
430 	case DM_IO_BVEC:
431 		bvec_dp_init(dp, io_req->mem.ptr.bvec);
432 		break;
433 
434 	case DM_IO_VMA:
435 		vm_dp_init(dp, io_req->mem.ptr.vma);
436 		break;
437 
438 	case DM_IO_KMEM:
439 		km_dp_init(dp, io_req->mem.ptr.addr);
440 		break;
441 
442 	default:
443 		return -EINVAL;
444 	}
445 
446 	return 0;
447 }
448 
449 /*
450  * New collapsed (a)synchronous interface.
451  *
452  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
453  * the queue with blk_unplug() some time later or set REQ_SYNC in
454 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
455  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
456  */
457 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
458 	  struct dm_io_region *where, unsigned long *sync_error_bits)
459 {
460 	int r;
461 	struct dpages dp;
462 
463 	r = dp_init(io_req, &dp);
464 	if (r)
465 		return r;
466 
467 	if (!io_req->notify.fn)
468 		return sync_io(io_req->client, num_regions, where,
469 			       io_req->bi_rw, &dp, sync_error_bits);
470 
471 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
472 			&dp, io_req->notify.fn, io_req->notify.context);
473 }
474 EXPORT_SYMBOL(dm_io);
475 
476 int __init dm_io_init(void)
477 {
478 	_dm_io_cache = KMEM_CACHE(io, 0);
479 	if (!_dm_io_cache)
480 		return -ENOMEM;
481 
482 	return 0;
483 }
484 
485 void dm_io_exit(void)
486 {
487 	kmem_cache_destroy(_dm_io_cache);
488 	_dm_io_cache = NULL;
489 }
490