xref: /openbmc/linux/drivers/md/dm-io.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-io.h"
9 
10 #include <linux/bio.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 
16 struct dm_io_client {
17 	mempool_t *pool;
18 	struct bio_set *bios;
19 };
20 
21 /* FIXME: can we shrink this ? */
22 struct io {
23 	unsigned long error;
24 	atomic_t count;
25 	struct task_struct *sleeper;
26 	struct dm_io_client *client;
27 	io_notify_fn callback;
28 	void *context;
29 };
30 
31 /*
32  * io contexts are only dynamically allocated for asynchronous
33  * io.  Since async io is likely to be the majority of io we'll
34  * have the same number of io contexts as bios! (FIXME: must reduce this).
35  */
36 
37 static unsigned int pages_to_ios(unsigned int pages)
38 {
39 	return 4 * pages;	/* too many ? */
40 }
41 
42 /*
43  * Create a client with mempool and bioset.
44  */
45 struct dm_io_client *dm_io_client_create(unsigned num_pages)
46 {
47 	unsigned ios = pages_to_ios(num_pages);
48 	struct dm_io_client *client;
49 
50 	client = kmalloc(sizeof(*client), GFP_KERNEL);
51 	if (!client)
52 		return ERR_PTR(-ENOMEM);
53 
54 	client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
55 	if (!client->pool)
56 		goto bad;
57 
58 	client->bios = bioset_create(16, 16);
59 	if (!client->bios)
60 		goto bad;
61 
62 	return client;
63 
64    bad:
65 	if (client->pool)
66 		mempool_destroy(client->pool);
67 	kfree(client);
68 	return ERR_PTR(-ENOMEM);
69 }
70 EXPORT_SYMBOL(dm_io_client_create);
71 
72 int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
73 {
74 	return mempool_resize(client->pool, pages_to_ios(num_pages),
75 			      GFP_KERNEL);
76 }
77 EXPORT_SYMBOL(dm_io_client_resize);
78 
79 void dm_io_client_destroy(struct dm_io_client *client)
80 {
81 	mempool_destroy(client->pool);
82 	bioset_free(client->bios);
83 	kfree(client);
84 }
85 EXPORT_SYMBOL(dm_io_client_destroy);
86 
87 /*-----------------------------------------------------------------
88  * We need to keep track of which region a bio is doing io for.
89  * In order to save a memory allocation we store this the last
90  * bvec which we know is unused (blech).
91  * XXX This is ugly and can OOPS with some configs... find another way.
92  *---------------------------------------------------------------*/
93 static inline void bio_set_region(struct bio *bio, unsigned region)
94 {
95 	bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
96 }
97 
98 static inline unsigned bio_get_region(struct bio *bio)
99 {
100 	return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
101 }
102 
103 /*-----------------------------------------------------------------
104  * We need an io object to keep track of the number of bios that
105  * have been dispatched for a particular io.
106  *---------------------------------------------------------------*/
107 static void dec_count(struct io *io, unsigned int region, int error)
108 {
109 	if (error)
110 		set_bit(region, &io->error);
111 
112 	if (atomic_dec_and_test(&io->count)) {
113 		if (io->sleeper)
114 			wake_up_process(io->sleeper);
115 
116 		else {
117 			int r = io->error;
118 			io_notify_fn fn = io->callback;
119 			void *context = io->context;
120 
121 			mempool_free(io, io->client->pool);
122 			fn(r, context);
123 		}
124 	}
125 }
126 
127 static int endio(struct bio *bio, unsigned int done, int error)
128 {
129 	struct io *io;
130 	unsigned region;
131 
132 	/* keep going until we've finished */
133 	if (bio->bi_size)
134 		return 1;
135 
136 	if (error && bio_data_dir(bio) == READ)
137 		zero_fill_bio(bio);
138 
139 	/*
140 	 * The bio destructor in bio_put() may use the io object.
141 	 */
142 	io = bio->bi_private;
143 	region = bio_get_region(bio);
144 
145 	bio->bi_max_vecs++;
146 	bio_put(bio);
147 
148 	dec_count(io, region, error);
149 
150 	return 0;
151 }
152 
153 /*-----------------------------------------------------------------
154  * These little objects provide an abstraction for getting a new
155  * destination page for io.
156  *---------------------------------------------------------------*/
157 struct dpages {
158 	void (*get_page)(struct dpages *dp,
159 			 struct page **p, unsigned long *len, unsigned *offset);
160 	void (*next_page)(struct dpages *dp);
161 
162 	unsigned context_u;
163 	void *context_ptr;
164 };
165 
166 /*
167  * Functions for getting the pages from a list.
168  */
169 static void list_get_page(struct dpages *dp,
170 		  struct page **p, unsigned long *len, unsigned *offset)
171 {
172 	unsigned o = dp->context_u;
173 	struct page_list *pl = (struct page_list *) dp->context_ptr;
174 
175 	*p = pl->page;
176 	*len = PAGE_SIZE - o;
177 	*offset = o;
178 }
179 
180 static void list_next_page(struct dpages *dp)
181 {
182 	struct page_list *pl = (struct page_list *) dp->context_ptr;
183 	dp->context_ptr = pl->next;
184 	dp->context_u = 0;
185 }
186 
187 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
188 {
189 	dp->get_page = list_get_page;
190 	dp->next_page = list_next_page;
191 	dp->context_u = offset;
192 	dp->context_ptr = pl;
193 }
194 
195 /*
196  * Functions for getting the pages from a bvec.
197  */
198 static void bvec_get_page(struct dpages *dp,
199 		  struct page **p, unsigned long *len, unsigned *offset)
200 {
201 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202 	*p = bvec->bv_page;
203 	*len = bvec->bv_len;
204 	*offset = bvec->bv_offset;
205 }
206 
207 static void bvec_next_page(struct dpages *dp)
208 {
209 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
210 	dp->context_ptr = bvec + 1;
211 }
212 
213 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
214 {
215 	dp->get_page = bvec_get_page;
216 	dp->next_page = bvec_next_page;
217 	dp->context_ptr = bvec;
218 }
219 
220 /*
221  * Functions for getting the pages from a VMA.
222  */
223 static void vm_get_page(struct dpages *dp,
224 		 struct page **p, unsigned long *len, unsigned *offset)
225 {
226 	*p = vmalloc_to_page(dp->context_ptr);
227 	*offset = dp->context_u;
228 	*len = PAGE_SIZE - dp->context_u;
229 }
230 
231 static void vm_next_page(struct dpages *dp)
232 {
233 	dp->context_ptr += PAGE_SIZE - dp->context_u;
234 	dp->context_u = 0;
235 }
236 
237 static void vm_dp_init(struct dpages *dp, void *data)
238 {
239 	dp->get_page = vm_get_page;
240 	dp->next_page = vm_next_page;
241 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
242 	dp->context_ptr = data;
243 }
244 
245 static void dm_bio_destructor(struct bio *bio)
246 {
247 	struct io *io = bio->bi_private;
248 
249 	bio_free(bio, io->client->bios);
250 }
251 
252 /*
253  * Functions for getting the pages from kernel memory.
254  */
255 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 			unsigned *offset)
257 {
258 	*p = virt_to_page(dp->context_ptr);
259 	*offset = dp->context_u;
260 	*len = PAGE_SIZE - dp->context_u;
261 }
262 
263 static void km_next_page(struct dpages *dp)
264 {
265 	dp->context_ptr += PAGE_SIZE - dp->context_u;
266 	dp->context_u = 0;
267 }
268 
269 static void km_dp_init(struct dpages *dp, void *data)
270 {
271 	dp->get_page = km_get_page;
272 	dp->next_page = km_next_page;
273 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 	dp->context_ptr = data;
275 }
276 
277 /*-----------------------------------------------------------------
278  * IO routines that accept a list of pages.
279  *---------------------------------------------------------------*/
280 static void do_region(int rw, unsigned int region, struct io_region *where,
281 		      struct dpages *dp, struct io *io)
282 {
283 	struct bio *bio;
284 	struct page *page;
285 	unsigned long len;
286 	unsigned offset;
287 	unsigned num_bvecs;
288 	sector_t remaining = where->count;
289 
290 	while (remaining) {
291 		/*
292 		 * Allocate a suitably sized-bio: we add an extra
293 		 * bvec for bio_get/set_region() and decrement bi_max_vecs
294 		 * to hide it from bio_add_page().
295 		 */
296 		num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
297 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
298 		bio->bi_sector = where->sector + (where->count - remaining);
299 		bio->bi_bdev = where->bdev;
300 		bio->bi_end_io = endio;
301 		bio->bi_private = io;
302 		bio->bi_destructor = dm_bio_destructor;
303 		bio->bi_max_vecs--;
304 		bio_set_region(bio, region);
305 
306 		/*
307 		 * Try and add as many pages as possible.
308 		 */
309 		while (remaining) {
310 			dp->get_page(dp, &page, &len, &offset);
311 			len = min(len, to_bytes(remaining));
312 			if (!bio_add_page(bio, page, len, offset))
313 				break;
314 
315 			offset = 0;
316 			remaining -= to_sector(len);
317 			dp->next_page(dp);
318 		}
319 
320 		atomic_inc(&io->count);
321 		submit_bio(rw, bio);
322 	}
323 }
324 
325 static void dispatch_io(int rw, unsigned int num_regions,
326 			struct io_region *where, struct dpages *dp,
327 			struct io *io, int sync)
328 {
329 	int i;
330 	struct dpages old_pages = *dp;
331 
332 	if (sync)
333 		rw |= (1 << BIO_RW_SYNC);
334 
335 	/*
336 	 * For multiple regions we need to be careful to rewind
337 	 * the dp object for each call to do_region.
338 	 */
339 	for (i = 0; i < num_regions; i++) {
340 		*dp = old_pages;
341 		if (where[i].count)
342 			do_region(rw, i, where + i, dp, io);
343 	}
344 
345 	/*
346 	 * Drop the extra reference that we were holding to avoid
347 	 * the io being completed too early.
348 	 */
349 	dec_count(io, 0, 0);
350 }
351 
352 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
353 		   struct io_region *where, int rw, struct dpages *dp,
354 		   unsigned long *error_bits)
355 {
356 	struct io io;
357 
358 	if (num_regions > 1 && rw != WRITE) {
359 		WARN_ON(1);
360 		return -EIO;
361 	}
362 
363 	io.error = 0;
364 	atomic_set(&io.count, 1); /* see dispatch_io() */
365 	io.sleeper = current;
366 	io.client = client;
367 
368 	dispatch_io(rw, num_regions, where, dp, &io, 1);
369 
370 	while (1) {
371 		set_current_state(TASK_UNINTERRUPTIBLE);
372 
373 		if (!atomic_read(&io.count) || signal_pending(current))
374 			break;
375 
376 		io_schedule();
377 	}
378 	set_current_state(TASK_RUNNING);
379 
380 	if (atomic_read(&io.count))
381 		return -EINTR;
382 
383 	if (error_bits)
384 		*error_bits = io.error;
385 
386 	return io.error ? -EIO : 0;
387 }
388 
389 static int async_io(struct dm_io_client *client, unsigned int num_regions,
390 		    struct io_region *where, int rw, struct dpages *dp,
391 		    io_notify_fn fn, void *context)
392 {
393 	struct io *io;
394 
395 	if (num_regions > 1 && rw != WRITE) {
396 		WARN_ON(1);
397 		fn(1, context);
398 		return -EIO;
399 	}
400 
401 	io = mempool_alloc(client->pool, GFP_NOIO);
402 	io->error = 0;
403 	atomic_set(&io->count, 1); /* see dispatch_io() */
404 	io->sleeper = NULL;
405 	io->client = client;
406 	io->callback = fn;
407 	io->context = context;
408 
409 	dispatch_io(rw, num_regions, where, dp, io, 0);
410 	return 0;
411 }
412 
413 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
414 {
415 	/* Set up dpages based on memory type */
416 	switch (io_req->mem.type) {
417 	case DM_IO_PAGE_LIST:
418 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
419 		break;
420 
421 	case DM_IO_BVEC:
422 		bvec_dp_init(dp, io_req->mem.ptr.bvec);
423 		break;
424 
425 	case DM_IO_VMA:
426 		vm_dp_init(dp, io_req->mem.ptr.vma);
427 		break;
428 
429 	case DM_IO_KMEM:
430 		km_dp_init(dp, io_req->mem.ptr.addr);
431 		break;
432 
433 	default:
434 		return -EINVAL;
435 	}
436 
437 	return 0;
438 }
439 
440 /*
441  * New collapsed (a)synchronous interface
442  */
443 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
444 	  struct io_region *where, unsigned long *sync_error_bits)
445 {
446 	int r;
447 	struct dpages dp;
448 
449 	r = dp_init(io_req, &dp);
450 	if (r)
451 		return r;
452 
453 	if (!io_req->notify.fn)
454 		return sync_io(io_req->client, num_regions, where,
455 			       io_req->bi_rw, &dp, sync_error_bits);
456 
457 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
458 			&dp, io_req->notify.fn, io_req->notify.context);
459 }
460 EXPORT_SYMBOL(dm_io);
461