xref: /openbmc/linux/drivers/md/dm-io.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1*1da177e4SLinus Torvalds /*
2*1da177e4SLinus Torvalds  * Copyright (C) 2003 Sistina Software
3*1da177e4SLinus Torvalds  *
4*1da177e4SLinus Torvalds  * This file is released under the GPL.
5*1da177e4SLinus Torvalds  */
6*1da177e4SLinus Torvalds 
7*1da177e4SLinus Torvalds #include "dm-io.h"
8*1da177e4SLinus Torvalds 
9*1da177e4SLinus Torvalds #include <linux/bio.h>
10*1da177e4SLinus Torvalds #include <linux/mempool.h>
11*1da177e4SLinus Torvalds #include <linux/module.h>
12*1da177e4SLinus Torvalds #include <linux/sched.h>
13*1da177e4SLinus Torvalds #include <linux/slab.h>
14*1da177e4SLinus Torvalds 
15*1da177e4SLinus Torvalds static struct bio_set *_bios;
16*1da177e4SLinus Torvalds 
17*1da177e4SLinus Torvalds /* FIXME: can we shrink this ? */
18*1da177e4SLinus Torvalds struct io {
19*1da177e4SLinus Torvalds 	unsigned long error;
20*1da177e4SLinus Torvalds 	atomic_t count;
21*1da177e4SLinus Torvalds 	struct task_struct *sleeper;
22*1da177e4SLinus Torvalds 	io_notify_fn callback;
23*1da177e4SLinus Torvalds 	void *context;
24*1da177e4SLinus Torvalds };
25*1da177e4SLinus Torvalds 
26*1da177e4SLinus Torvalds /*
27*1da177e4SLinus Torvalds  * io contexts are only dynamically allocated for asynchronous
28*1da177e4SLinus Torvalds  * io.  Since async io is likely to be the majority of io we'll
29*1da177e4SLinus Torvalds  * have the same number of io contexts as buffer heads ! (FIXME:
30*1da177e4SLinus Torvalds  * must reduce this).
31*1da177e4SLinus Torvalds  */
32*1da177e4SLinus Torvalds static unsigned _num_ios;
33*1da177e4SLinus Torvalds static mempool_t *_io_pool;
34*1da177e4SLinus Torvalds 
35*1da177e4SLinus Torvalds static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data)
36*1da177e4SLinus Torvalds {
37*1da177e4SLinus Torvalds 	return kmalloc(sizeof(struct io), gfp_mask);
38*1da177e4SLinus Torvalds }
39*1da177e4SLinus Torvalds 
40*1da177e4SLinus Torvalds static void free_io(void *element, void *pool_data)
41*1da177e4SLinus Torvalds {
42*1da177e4SLinus Torvalds 	kfree(element);
43*1da177e4SLinus Torvalds }
44*1da177e4SLinus Torvalds 
45*1da177e4SLinus Torvalds static unsigned int pages_to_ios(unsigned int pages)
46*1da177e4SLinus Torvalds {
47*1da177e4SLinus Torvalds 	return 4 * pages;	/* too many ? */
48*1da177e4SLinus Torvalds }
49*1da177e4SLinus Torvalds 
50*1da177e4SLinus Torvalds static int resize_pool(unsigned int new_ios)
51*1da177e4SLinus Torvalds {
52*1da177e4SLinus Torvalds 	int r = 0;
53*1da177e4SLinus Torvalds 
54*1da177e4SLinus Torvalds 	if (_io_pool) {
55*1da177e4SLinus Torvalds 		if (new_ios == 0) {
56*1da177e4SLinus Torvalds 			/* free off the pool */
57*1da177e4SLinus Torvalds 			mempool_destroy(_io_pool);
58*1da177e4SLinus Torvalds 			_io_pool = NULL;
59*1da177e4SLinus Torvalds 			bioset_free(_bios);
60*1da177e4SLinus Torvalds 
61*1da177e4SLinus Torvalds 		} else {
62*1da177e4SLinus Torvalds 			/* resize the pool */
63*1da177e4SLinus Torvalds 			r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
64*1da177e4SLinus Torvalds 		}
65*1da177e4SLinus Torvalds 
66*1da177e4SLinus Torvalds 	} else {
67*1da177e4SLinus Torvalds 		/* create new pool */
68*1da177e4SLinus Torvalds 		_io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
69*1da177e4SLinus Torvalds 		if (!_io_pool)
70*1da177e4SLinus Torvalds 			return -ENOMEM;
71*1da177e4SLinus Torvalds 
72*1da177e4SLinus Torvalds 		_bios = bioset_create(16, 16, 4);
73*1da177e4SLinus Torvalds 		if (!_bios) {
74*1da177e4SLinus Torvalds 			mempool_destroy(_io_pool);
75*1da177e4SLinus Torvalds 			_io_pool = NULL;
76*1da177e4SLinus Torvalds 			return -ENOMEM;
77*1da177e4SLinus Torvalds 		}
78*1da177e4SLinus Torvalds 	}
79*1da177e4SLinus Torvalds 
80*1da177e4SLinus Torvalds 	if (!r)
81*1da177e4SLinus Torvalds 		_num_ios = new_ios;
82*1da177e4SLinus Torvalds 
83*1da177e4SLinus Torvalds 	return r;
84*1da177e4SLinus Torvalds }
85*1da177e4SLinus Torvalds 
86*1da177e4SLinus Torvalds int dm_io_get(unsigned int num_pages)
87*1da177e4SLinus Torvalds {
88*1da177e4SLinus Torvalds 	return resize_pool(_num_ios + pages_to_ios(num_pages));
89*1da177e4SLinus Torvalds }
90*1da177e4SLinus Torvalds 
91*1da177e4SLinus Torvalds void dm_io_put(unsigned int num_pages)
92*1da177e4SLinus Torvalds {
93*1da177e4SLinus Torvalds 	resize_pool(_num_ios - pages_to_ios(num_pages));
94*1da177e4SLinus Torvalds }
95*1da177e4SLinus Torvalds 
96*1da177e4SLinus Torvalds /*-----------------------------------------------------------------
97*1da177e4SLinus Torvalds  * We need to keep track of which region a bio is doing io for.
98*1da177e4SLinus Torvalds  * In order to save a memory allocation we store this the last
99*1da177e4SLinus Torvalds  * bvec which we know is unused (blech).
100*1da177e4SLinus Torvalds  * XXX This is ugly and can OOPS with some configs... find another way.
101*1da177e4SLinus Torvalds  *---------------------------------------------------------------*/
102*1da177e4SLinus Torvalds static inline void bio_set_region(struct bio *bio, unsigned region)
103*1da177e4SLinus Torvalds {
104*1da177e4SLinus Torvalds 	bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len = region;
105*1da177e4SLinus Torvalds }
106*1da177e4SLinus Torvalds 
107*1da177e4SLinus Torvalds static inline unsigned bio_get_region(struct bio *bio)
108*1da177e4SLinus Torvalds {
109*1da177e4SLinus Torvalds 	return bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len;
110*1da177e4SLinus Torvalds }
111*1da177e4SLinus Torvalds 
112*1da177e4SLinus Torvalds /*-----------------------------------------------------------------
113*1da177e4SLinus Torvalds  * We need an io object to keep track of the number of bios that
114*1da177e4SLinus Torvalds  * have been dispatched for a particular io.
115*1da177e4SLinus Torvalds  *---------------------------------------------------------------*/
116*1da177e4SLinus Torvalds static void dec_count(struct io *io, unsigned int region, int error)
117*1da177e4SLinus Torvalds {
118*1da177e4SLinus Torvalds 	if (error)
119*1da177e4SLinus Torvalds 		set_bit(region, &io->error);
120*1da177e4SLinus Torvalds 
121*1da177e4SLinus Torvalds 	if (atomic_dec_and_test(&io->count)) {
122*1da177e4SLinus Torvalds 		if (io->sleeper)
123*1da177e4SLinus Torvalds 			wake_up_process(io->sleeper);
124*1da177e4SLinus Torvalds 
125*1da177e4SLinus Torvalds 		else {
126*1da177e4SLinus Torvalds 			int r = io->error;
127*1da177e4SLinus Torvalds 			io_notify_fn fn = io->callback;
128*1da177e4SLinus Torvalds 			void *context = io->context;
129*1da177e4SLinus Torvalds 
130*1da177e4SLinus Torvalds 			mempool_free(io, _io_pool);
131*1da177e4SLinus Torvalds 			fn(r, context);
132*1da177e4SLinus Torvalds 		}
133*1da177e4SLinus Torvalds 	}
134*1da177e4SLinus Torvalds }
135*1da177e4SLinus Torvalds 
136*1da177e4SLinus Torvalds static int endio(struct bio *bio, unsigned int done, int error)
137*1da177e4SLinus Torvalds {
138*1da177e4SLinus Torvalds 	struct io *io = (struct io *) bio->bi_private;
139*1da177e4SLinus Torvalds 
140*1da177e4SLinus Torvalds 	/* keep going until we've finished */
141*1da177e4SLinus Torvalds 	if (bio->bi_size)
142*1da177e4SLinus Torvalds 		return 1;
143*1da177e4SLinus Torvalds 
144*1da177e4SLinus Torvalds 	if (error && bio_data_dir(bio) == READ)
145*1da177e4SLinus Torvalds 		zero_fill_bio(bio);
146*1da177e4SLinus Torvalds 
147*1da177e4SLinus Torvalds 	dec_count(io, bio_get_region(bio), error);
148*1da177e4SLinus Torvalds 	bio_put(bio);
149*1da177e4SLinus Torvalds 
150*1da177e4SLinus Torvalds 	return 0;
151*1da177e4SLinus Torvalds }
152*1da177e4SLinus Torvalds 
153*1da177e4SLinus Torvalds /*-----------------------------------------------------------------
154*1da177e4SLinus Torvalds  * These little objects provide an abstraction for getting a new
155*1da177e4SLinus Torvalds  * destination page for io.
156*1da177e4SLinus Torvalds  *---------------------------------------------------------------*/
157*1da177e4SLinus Torvalds struct dpages {
158*1da177e4SLinus Torvalds 	void (*get_page)(struct dpages *dp,
159*1da177e4SLinus Torvalds 			 struct page **p, unsigned long *len, unsigned *offset);
160*1da177e4SLinus Torvalds 	void (*next_page)(struct dpages *dp);
161*1da177e4SLinus Torvalds 
162*1da177e4SLinus Torvalds 	unsigned context_u;
163*1da177e4SLinus Torvalds 	void *context_ptr;
164*1da177e4SLinus Torvalds };
165*1da177e4SLinus Torvalds 
166*1da177e4SLinus Torvalds /*
167*1da177e4SLinus Torvalds  * Functions for getting the pages from a list.
168*1da177e4SLinus Torvalds  */
169*1da177e4SLinus Torvalds static void list_get_page(struct dpages *dp,
170*1da177e4SLinus Torvalds 		  struct page **p, unsigned long *len, unsigned *offset)
171*1da177e4SLinus Torvalds {
172*1da177e4SLinus Torvalds 	unsigned o = dp->context_u;
173*1da177e4SLinus Torvalds 	struct page_list *pl = (struct page_list *) dp->context_ptr;
174*1da177e4SLinus Torvalds 
175*1da177e4SLinus Torvalds 	*p = pl->page;
176*1da177e4SLinus Torvalds 	*len = PAGE_SIZE - o;
177*1da177e4SLinus Torvalds 	*offset = o;
178*1da177e4SLinus Torvalds }
179*1da177e4SLinus Torvalds 
180*1da177e4SLinus Torvalds static void list_next_page(struct dpages *dp)
181*1da177e4SLinus Torvalds {
182*1da177e4SLinus Torvalds 	struct page_list *pl = (struct page_list *) dp->context_ptr;
183*1da177e4SLinus Torvalds 	dp->context_ptr = pl->next;
184*1da177e4SLinus Torvalds 	dp->context_u = 0;
185*1da177e4SLinus Torvalds }
186*1da177e4SLinus Torvalds 
187*1da177e4SLinus Torvalds static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
188*1da177e4SLinus Torvalds {
189*1da177e4SLinus Torvalds 	dp->get_page = list_get_page;
190*1da177e4SLinus Torvalds 	dp->next_page = list_next_page;
191*1da177e4SLinus Torvalds 	dp->context_u = offset;
192*1da177e4SLinus Torvalds 	dp->context_ptr = pl;
193*1da177e4SLinus Torvalds }
194*1da177e4SLinus Torvalds 
195*1da177e4SLinus Torvalds /*
196*1da177e4SLinus Torvalds  * Functions for getting the pages from a bvec.
197*1da177e4SLinus Torvalds  */
198*1da177e4SLinus Torvalds static void bvec_get_page(struct dpages *dp,
199*1da177e4SLinus Torvalds 		  struct page **p, unsigned long *len, unsigned *offset)
200*1da177e4SLinus Torvalds {
201*1da177e4SLinus Torvalds 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202*1da177e4SLinus Torvalds 	*p = bvec->bv_page;
203*1da177e4SLinus Torvalds 	*len = bvec->bv_len;
204*1da177e4SLinus Torvalds 	*offset = bvec->bv_offset;
205*1da177e4SLinus Torvalds }
206*1da177e4SLinus Torvalds 
207*1da177e4SLinus Torvalds static void bvec_next_page(struct dpages *dp)
208*1da177e4SLinus Torvalds {
209*1da177e4SLinus Torvalds 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
210*1da177e4SLinus Torvalds 	dp->context_ptr = bvec + 1;
211*1da177e4SLinus Torvalds }
212*1da177e4SLinus Torvalds 
213*1da177e4SLinus Torvalds static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
214*1da177e4SLinus Torvalds {
215*1da177e4SLinus Torvalds 	dp->get_page = bvec_get_page;
216*1da177e4SLinus Torvalds 	dp->next_page = bvec_next_page;
217*1da177e4SLinus Torvalds 	dp->context_ptr = bvec;
218*1da177e4SLinus Torvalds }
219*1da177e4SLinus Torvalds 
220*1da177e4SLinus Torvalds static void vm_get_page(struct dpages *dp,
221*1da177e4SLinus Torvalds 		 struct page **p, unsigned long *len, unsigned *offset)
222*1da177e4SLinus Torvalds {
223*1da177e4SLinus Torvalds 	*p = vmalloc_to_page(dp->context_ptr);
224*1da177e4SLinus Torvalds 	*offset = dp->context_u;
225*1da177e4SLinus Torvalds 	*len = PAGE_SIZE - dp->context_u;
226*1da177e4SLinus Torvalds }
227*1da177e4SLinus Torvalds 
228*1da177e4SLinus Torvalds static void vm_next_page(struct dpages *dp)
229*1da177e4SLinus Torvalds {
230*1da177e4SLinus Torvalds 	dp->context_ptr += PAGE_SIZE - dp->context_u;
231*1da177e4SLinus Torvalds 	dp->context_u = 0;
232*1da177e4SLinus Torvalds }
233*1da177e4SLinus Torvalds 
234*1da177e4SLinus Torvalds static void vm_dp_init(struct dpages *dp, void *data)
235*1da177e4SLinus Torvalds {
236*1da177e4SLinus Torvalds 	dp->get_page = vm_get_page;
237*1da177e4SLinus Torvalds 	dp->next_page = vm_next_page;
238*1da177e4SLinus Torvalds 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
239*1da177e4SLinus Torvalds 	dp->context_ptr = data;
240*1da177e4SLinus Torvalds }
241*1da177e4SLinus Torvalds 
242*1da177e4SLinus Torvalds /*-----------------------------------------------------------------
243*1da177e4SLinus Torvalds  * IO routines that accept a list of pages.
244*1da177e4SLinus Torvalds  *---------------------------------------------------------------*/
245*1da177e4SLinus Torvalds static void do_region(int rw, unsigned int region, struct io_region *where,
246*1da177e4SLinus Torvalds 		      struct dpages *dp, struct io *io)
247*1da177e4SLinus Torvalds {
248*1da177e4SLinus Torvalds 	struct bio *bio;
249*1da177e4SLinus Torvalds 	struct page *page;
250*1da177e4SLinus Torvalds 	unsigned long len;
251*1da177e4SLinus Torvalds 	unsigned offset;
252*1da177e4SLinus Torvalds 	unsigned num_bvecs;
253*1da177e4SLinus Torvalds 	sector_t remaining = where->count;
254*1da177e4SLinus Torvalds 
255*1da177e4SLinus Torvalds 	while (remaining) {
256*1da177e4SLinus Torvalds 		/*
257*1da177e4SLinus Torvalds 		 * Allocate a suitably sized bio, we add an extra
258*1da177e4SLinus Torvalds 		 * bvec for bio_get/set_region().
259*1da177e4SLinus Torvalds 		 */
260*1da177e4SLinus Torvalds 		num_bvecs = (remaining / (PAGE_SIZE >> 9)) + 2;
261*1da177e4SLinus Torvalds 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
262*1da177e4SLinus Torvalds 		bio->bi_sector = where->sector + (where->count - remaining);
263*1da177e4SLinus Torvalds 		bio->bi_bdev = where->bdev;
264*1da177e4SLinus Torvalds 		bio->bi_end_io = endio;
265*1da177e4SLinus Torvalds 		bio->bi_private = io;
266*1da177e4SLinus Torvalds 		bio_set_region(bio, region);
267*1da177e4SLinus Torvalds 
268*1da177e4SLinus Torvalds 		/*
269*1da177e4SLinus Torvalds 		 * Try and add as many pages as possible.
270*1da177e4SLinus Torvalds 		 */
271*1da177e4SLinus Torvalds 		while (remaining) {
272*1da177e4SLinus Torvalds 			dp->get_page(dp, &page, &len, &offset);
273*1da177e4SLinus Torvalds 			len = min(len, to_bytes(remaining));
274*1da177e4SLinus Torvalds 			if (!bio_add_page(bio, page, len, offset))
275*1da177e4SLinus Torvalds 				break;
276*1da177e4SLinus Torvalds 
277*1da177e4SLinus Torvalds 			offset = 0;
278*1da177e4SLinus Torvalds 			remaining -= to_sector(len);
279*1da177e4SLinus Torvalds 			dp->next_page(dp);
280*1da177e4SLinus Torvalds 		}
281*1da177e4SLinus Torvalds 
282*1da177e4SLinus Torvalds 		atomic_inc(&io->count);
283*1da177e4SLinus Torvalds 		submit_bio(rw, bio);
284*1da177e4SLinus Torvalds 	}
285*1da177e4SLinus Torvalds }
286*1da177e4SLinus Torvalds 
287*1da177e4SLinus Torvalds static void dispatch_io(int rw, unsigned int num_regions,
288*1da177e4SLinus Torvalds 			struct io_region *where, struct dpages *dp,
289*1da177e4SLinus Torvalds 			struct io *io, int sync)
290*1da177e4SLinus Torvalds {
291*1da177e4SLinus Torvalds 	int i;
292*1da177e4SLinus Torvalds 	struct dpages old_pages = *dp;
293*1da177e4SLinus Torvalds 
294*1da177e4SLinus Torvalds 	if (sync)
295*1da177e4SLinus Torvalds 		rw |= (1 << BIO_RW_SYNC);
296*1da177e4SLinus Torvalds 
297*1da177e4SLinus Torvalds 	/*
298*1da177e4SLinus Torvalds 	 * For multiple regions we need to be careful to rewind
299*1da177e4SLinus Torvalds 	 * the dp object for each call to do_region.
300*1da177e4SLinus Torvalds 	 */
301*1da177e4SLinus Torvalds 	for (i = 0; i < num_regions; i++) {
302*1da177e4SLinus Torvalds 		*dp = old_pages;
303*1da177e4SLinus Torvalds 		if (where[i].count)
304*1da177e4SLinus Torvalds 			do_region(rw, i, where + i, dp, io);
305*1da177e4SLinus Torvalds 	}
306*1da177e4SLinus Torvalds 
307*1da177e4SLinus Torvalds 	/*
308*1da177e4SLinus Torvalds 	 * Drop the extra refence that we were holding to avoid
309*1da177e4SLinus Torvalds 	 * the io being completed too early.
310*1da177e4SLinus Torvalds 	 */
311*1da177e4SLinus Torvalds 	dec_count(io, 0, 0);
312*1da177e4SLinus Torvalds }
313*1da177e4SLinus Torvalds 
314*1da177e4SLinus Torvalds static int sync_io(unsigned int num_regions, struct io_region *where,
315*1da177e4SLinus Torvalds 	    int rw, struct dpages *dp, unsigned long *error_bits)
316*1da177e4SLinus Torvalds {
317*1da177e4SLinus Torvalds 	struct io io;
318*1da177e4SLinus Torvalds 
319*1da177e4SLinus Torvalds 	if (num_regions > 1 && rw != WRITE) {
320*1da177e4SLinus Torvalds 		WARN_ON(1);
321*1da177e4SLinus Torvalds 		return -EIO;
322*1da177e4SLinus Torvalds 	}
323*1da177e4SLinus Torvalds 
324*1da177e4SLinus Torvalds 	io.error = 0;
325*1da177e4SLinus Torvalds 	atomic_set(&io.count, 1); /* see dispatch_io() */
326*1da177e4SLinus Torvalds 	io.sleeper = current;
327*1da177e4SLinus Torvalds 
328*1da177e4SLinus Torvalds 	dispatch_io(rw, num_regions, where, dp, &io, 1);
329*1da177e4SLinus Torvalds 
330*1da177e4SLinus Torvalds 	while (1) {
331*1da177e4SLinus Torvalds 		set_current_state(TASK_UNINTERRUPTIBLE);
332*1da177e4SLinus Torvalds 
333*1da177e4SLinus Torvalds 		if (!atomic_read(&io.count) || signal_pending(current))
334*1da177e4SLinus Torvalds 			break;
335*1da177e4SLinus Torvalds 
336*1da177e4SLinus Torvalds 		io_schedule();
337*1da177e4SLinus Torvalds 	}
338*1da177e4SLinus Torvalds 	set_current_state(TASK_RUNNING);
339*1da177e4SLinus Torvalds 
340*1da177e4SLinus Torvalds 	if (atomic_read(&io.count))
341*1da177e4SLinus Torvalds 		return -EINTR;
342*1da177e4SLinus Torvalds 
343*1da177e4SLinus Torvalds 	*error_bits = io.error;
344*1da177e4SLinus Torvalds 	return io.error ? -EIO : 0;
345*1da177e4SLinus Torvalds }
346*1da177e4SLinus Torvalds 
347*1da177e4SLinus Torvalds static int async_io(unsigned int num_regions, struct io_region *where, int rw,
348*1da177e4SLinus Torvalds 	     struct dpages *dp, io_notify_fn fn, void *context)
349*1da177e4SLinus Torvalds {
350*1da177e4SLinus Torvalds 	struct io *io;
351*1da177e4SLinus Torvalds 
352*1da177e4SLinus Torvalds 	if (num_regions > 1 && rw != WRITE) {
353*1da177e4SLinus Torvalds 		WARN_ON(1);
354*1da177e4SLinus Torvalds 		fn(1, context);
355*1da177e4SLinus Torvalds 		return -EIO;
356*1da177e4SLinus Torvalds 	}
357*1da177e4SLinus Torvalds 
358*1da177e4SLinus Torvalds 	io = mempool_alloc(_io_pool, GFP_NOIO);
359*1da177e4SLinus Torvalds 	io->error = 0;
360*1da177e4SLinus Torvalds 	atomic_set(&io->count, 1); /* see dispatch_io() */
361*1da177e4SLinus Torvalds 	io->sleeper = NULL;
362*1da177e4SLinus Torvalds 	io->callback = fn;
363*1da177e4SLinus Torvalds 	io->context = context;
364*1da177e4SLinus Torvalds 
365*1da177e4SLinus Torvalds 	dispatch_io(rw, num_regions, where, dp, io, 0);
366*1da177e4SLinus Torvalds 	return 0;
367*1da177e4SLinus Torvalds }
368*1da177e4SLinus Torvalds 
369*1da177e4SLinus Torvalds int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
370*1da177e4SLinus Torvalds 	       struct page_list *pl, unsigned int offset,
371*1da177e4SLinus Torvalds 	       unsigned long *error_bits)
372*1da177e4SLinus Torvalds {
373*1da177e4SLinus Torvalds 	struct dpages dp;
374*1da177e4SLinus Torvalds 	list_dp_init(&dp, pl, offset);
375*1da177e4SLinus Torvalds 	return sync_io(num_regions, where, rw, &dp, error_bits);
376*1da177e4SLinus Torvalds }
377*1da177e4SLinus Torvalds 
378*1da177e4SLinus Torvalds int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
379*1da177e4SLinus Torvalds 		    struct bio_vec *bvec, unsigned long *error_bits)
380*1da177e4SLinus Torvalds {
381*1da177e4SLinus Torvalds 	struct dpages dp;
382*1da177e4SLinus Torvalds 	bvec_dp_init(&dp, bvec);
383*1da177e4SLinus Torvalds 	return sync_io(num_regions, where, rw, &dp, error_bits);
384*1da177e4SLinus Torvalds }
385*1da177e4SLinus Torvalds 
386*1da177e4SLinus Torvalds int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
387*1da177e4SLinus Torvalds 		  void *data, unsigned long *error_bits)
388*1da177e4SLinus Torvalds {
389*1da177e4SLinus Torvalds 	struct dpages dp;
390*1da177e4SLinus Torvalds 	vm_dp_init(&dp, data);
391*1da177e4SLinus Torvalds 	return sync_io(num_regions, where, rw, &dp, error_bits);
392*1da177e4SLinus Torvalds }
393*1da177e4SLinus Torvalds 
394*1da177e4SLinus Torvalds int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
395*1da177e4SLinus Torvalds 		struct page_list *pl, unsigned int offset,
396*1da177e4SLinus Torvalds 		io_notify_fn fn, void *context)
397*1da177e4SLinus Torvalds {
398*1da177e4SLinus Torvalds 	struct dpages dp;
399*1da177e4SLinus Torvalds 	list_dp_init(&dp, pl, offset);
400*1da177e4SLinus Torvalds 	return async_io(num_regions, where, rw, &dp, fn, context);
401*1da177e4SLinus Torvalds }
402*1da177e4SLinus Torvalds 
403*1da177e4SLinus Torvalds int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
404*1da177e4SLinus Torvalds 		     struct bio_vec *bvec, io_notify_fn fn, void *context)
405*1da177e4SLinus Torvalds {
406*1da177e4SLinus Torvalds 	struct dpages dp;
407*1da177e4SLinus Torvalds 	bvec_dp_init(&dp, bvec);
408*1da177e4SLinus Torvalds 	return async_io(num_regions, where, rw, &dp, fn, context);
409*1da177e4SLinus Torvalds }
410*1da177e4SLinus Torvalds 
411*1da177e4SLinus Torvalds int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
412*1da177e4SLinus Torvalds 		   void *data, io_notify_fn fn, void *context)
413*1da177e4SLinus Torvalds {
414*1da177e4SLinus Torvalds 	struct dpages dp;
415*1da177e4SLinus Torvalds 	vm_dp_init(&dp, data);
416*1da177e4SLinus Torvalds 	return async_io(num_regions, where, rw, &dp, fn, context);
417*1da177e4SLinus Torvalds }
418*1da177e4SLinus Torvalds 
419*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_get);
420*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_put);
421*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_sync);
422*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_async);
423*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_sync_bvec);
424*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_async_bvec);
425*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_sync_vm);
426*1da177e4SLinus Torvalds EXPORT_SYMBOL(dm_io_async_vm);
427