13bd94003SHeinz Mauelshagen // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Copyright (C) 2003 Sistina Software
4891ce207SHeinz Mauelshagen * Copyright (C) 2006 Red Hat GmbH
51da177e4SLinus Torvalds *
61da177e4SLinus Torvalds * This file is released under the GPL.
71da177e4SLinus Torvalds */
81da177e4SLinus Torvalds
94cc96131SMike Snitzer #include "dm-core.h"
10952b3557SMikulas Patocka
11586e80e6SMikulas Patocka #include <linux/device-mapper.h>
121da177e4SLinus Torvalds
131da177e4SLinus Torvalds #include <linux/bio.h>
1410f1d5d1SJoe Thornber #include <linux/completion.h>
151da177e4SLinus Torvalds #include <linux/mempool.h>
161da177e4SLinus Torvalds #include <linux/module.h>
171da177e4SLinus Torvalds #include <linux/sched.h>
181da177e4SLinus Torvalds #include <linux/slab.h>
19a765e20eSAlasdair G Kergon #include <linux/dm-io.h>
201da177e4SLinus Torvalds
21f1e53987SMikulas Patocka #define DM_MSG_PREFIX "io"
22f1e53987SMikulas Patocka
23f1e53987SMikulas Patocka #define DM_IO_MAX_REGIONS BITS_PER_LONG
24f1e53987SMikulas Patocka
25891ce207SHeinz Mauelshagen struct dm_io_client {
266f1c819cSKent Overstreet mempool_t pool;
276f1c819cSKent Overstreet struct bio_set bios;
28891ce207SHeinz Mauelshagen };
29891ce207SHeinz Mauelshagen
30f1e53987SMikulas Patocka /*
31f1e53987SMikulas Patocka * Aligning 'struct io' reduces the number of bits required to store
32f1e53987SMikulas Patocka * its address. Refer to store_io_and_region_in_bio() below.
33f1e53987SMikulas Patocka */
341da177e4SLinus Torvalds struct io {
35e01fd7eeSAlasdair G Kergon unsigned long error_bits;
361da177e4SLinus Torvalds atomic_t count;
37891ce207SHeinz Mauelshagen struct dm_io_client *client;
381da177e4SLinus Torvalds io_notify_fn callback;
391da177e4SLinus Torvalds void *context;
40bb91bc7bSMikulas Patocka void *vma_invalidate_address;
41bb91bc7bSMikulas Patocka unsigned long vma_invalidate_size;
42f8922a48SHeinz Mauelshagen } __aligned(DM_IO_MAX_REGIONS);
431da177e4SLinus Torvalds
44952b3557SMikulas Patocka static struct kmem_cache *_dm_io_cache;
45952b3557SMikulas Patocka
461da177e4SLinus Torvalds /*
47c8b03afeSHeinz Mauelshagen * Create a client with mempool and bioset.
48c8b03afeSHeinz Mauelshagen */
dm_io_client_create(void)49bda8efecSMikulas Patocka struct dm_io_client *dm_io_client_create(void)
50c8b03afeSHeinz Mauelshagen {
51c8b03afeSHeinz Mauelshagen struct dm_io_client *client;
5286a3238cSHeinz Mauelshagen unsigned int min_ios = dm_get_reserved_bio_based_ios();
536f1c819cSKent Overstreet int ret;
54c8b03afeSHeinz Mauelshagen
55d3775354SKent Overstreet client = kzalloc(sizeof(*client), GFP_KERNEL);
56c8b03afeSHeinz Mauelshagen if (!client)
57c8b03afeSHeinz Mauelshagen return ERR_PTR(-ENOMEM);
58c8b03afeSHeinz Mauelshagen
596f1c819cSKent Overstreet ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
606f1c819cSKent Overstreet if (ret)
61c8b03afeSHeinz Mauelshagen goto bad;
62c8b03afeSHeinz Mauelshagen
636f1c819cSKent Overstreet ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
646f1c819cSKent Overstreet if (ret)
65c8b03afeSHeinz Mauelshagen goto bad;
66c8b03afeSHeinz Mauelshagen
67c8b03afeSHeinz Mauelshagen return client;
68c8b03afeSHeinz Mauelshagen
69c8b03afeSHeinz Mauelshagen bad:
706f1c819cSKent Overstreet mempool_exit(&client->pool);
71c8b03afeSHeinz Mauelshagen kfree(client);
726f1c819cSKent Overstreet return ERR_PTR(ret);
73c8b03afeSHeinz Mauelshagen }
74c8b03afeSHeinz Mauelshagen EXPORT_SYMBOL(dm_io_client_create);
75c8b03afeSHeinz Mauelshagen
dm_io_client_destroy(struct dm_io_client * client)76c8b03afeSHeinz Mauelshagen void dm_io_client_destroy(struct dm_io_client *client)
77c8b03afeSHeinz Mauelshagen {
786f1c819cSKent Overstreet mempool_exit(&client->pool);
796f1c819cSKent Overstreet bioset_exit(&client->bios);
80c8b03afeSHeinz Mauelshagen kfree(client);
81c8b03afeSHeinz Mauelshagen }
82c8b03afeSHeinz Mauelshagen EXPORT_SYMBOL(dm_io_client_destroy);
83c8b03afeSHeinz Mauelshagen
84a4a82ce3SHeinz Mauelshagen /*
85a4a82ce3SHeinz Mauelshagen *-------------------------------------------------------------------
861da177e4SLinus Torvalds * We need to keep track of which region a bio is doing io for.
87f1e53987SMikulas Patocka * To avoid a memory allocation to store just 5 or 6 bits, we
88f1e53987SMikulas Patocka * ensure the 'struct io' pointer is aligned so enough low bits are
89f1e53987SMikulas Patocka * always zero and then combine it with the region number directly in
90f1e53987SMikulas Patocka * bi_private.
91a4a82ce3SHeinz Mauelshagen *-------------------------------------------------------------------
92a4a82ce3SHeinz Mauelshagen */
store_io_and_region_in_bio(struct bio * bio,struct io * io,unsigned int region)93f1e53987SMikulas Patocka static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
9486a3238cSHeinz Mauelshagen unsigned int region)
951da177e4SLinus Torvalds {
96f1e53987SMikulas Patocka if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
97f1e53987SMikulas Patocka DMCRIT("Unaligned struct io pointer %p", io);
98f1e53987SMikulas Patocka BUG();
991da177e4SLinus Torvalds }
1001da177e4SLinus Torvalds
101f1e53987SMikulas Patocka bio->bi_private = (void *)((unsigned long)io | region);
102f1e53987SMikulas Patocka }
103f1e53987SMikulas Patocka
retrieve_io_and_region_from_bio(struct bio * bio,struct io ** io,unsigned int * region)104f1e53987SMikulas Patocka static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
10586a3238cSHeinz Mauelshagen unsigned int *region)
1061da177e4SLinus Torvalds {
107f1e53987SMikulas Patocka unsigned long val = (unsigned long)bio->bi_private;
108f1e53987SMikulas Patocka
109f1e53987SMikulas Patocka *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
110f1e53987SMikulas Patocka *region = val & (DM_IO_MAX_REGIONS - 1);
1111da177e4SLinus Torvalds }
1121da177e4SLinus Torvalds
113a4a82ce3SHeinz Mauelshagen /*
114a4a82ce3SHeinz Mauelshagen *--------------------------------------------------------------
1151da177e4SLinus Torvalds * We need an io object to keep track of the number of bios that
1161da177e4SLinus Torvalds * have been dispatched for a particular io.
117a4a82ce3SHeinz Mauelshagen *--------------------------------------------------------------
118a4a82ce3SHeinz Mauelshagen */
complete_io(struct io * io)11997e7cdf1SJoe Thornber static void complete_io(struct io *io)
12097e7cdf1SJoe Thornber {
12197e7cdf1SJoe Thornber unsigned long error_bits = io->error_bits;
12297e7cdf1SJoe Thornber io_notify_fn fn = io->callback;
12397e7cdf1SJoe Thornber void *context = io->context;
12497e7cdf1SJoe Thornber
12597e7cdf1SJoe Thornber if (io->vma_invalidate_size)
12697e7cdf1SJoe Thornber invalidate_kernel_vmap_range(io->vma_invalidate_address,
12797e7cdf1SJoe Thornber io->vma_invalidate_size);
12897e7cdf1SJoe Thornber
1296f1c819cSKent Overstreet mempool_free(io, &io->client->pool);
13097e7cdf1SJoe Thornber fn(error_bits, context);
13197e7cdf1SJoe Thornber }
13297e7cdf1SJoe Thornber
dec_count(struct io * io,unsigned int region,blk_status_t error)1334e4cbee9SChristoph Hellwig static void dec_count(struct io *io, unsigned int region, blk_status_t error)
1341da177e4SLinus Torvalds {
135d87f4c14STejun Heo if (error)
136e01fd7eeSAlasdair G Kergon set_bit(region, &io->error_bits);
1371da177e4SLinus Torvalds
13897e7cdf1SJoe Thornber if (atomic_dec_and_test(&io->count))
13997e7cdf1SJoe Thornber complete_io(io);
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds
endio(struct bio * bio)1424246a0b6SChristoph Hellwig static void endio(struct bio *bio)
1431da177e4SLinus Torvalds {
144c897feb3SHeinz Mauelshagen struct io *io;
14586a3238cSHeinz Mauelshagen unsigned int region;
1464e4cbee9SChristoph Hellwig blk_status_t error;
1471da177e4SLinus Torvalds
1484e4cbee9SChristoph Hellwig if (bio->bi_status && bio_data_dir(bio) == READ)
1491da177e4SLinus Torvalds zero_fill_bio(bio);
1501da177e4SLinus Torvalds
151c897feb3SHeinz Mauelshagen /*
152c897feb3SHeinz Mauelshagen * The bio destructor in bio_put() may use the io object.
153c897feb3SHeinz Mauelshagen */
154f1e53987SMikulas Patocka retrieve_io_and_region_from_bio(bio, &io, ®ion);
155c897feb3SHeinz Mauelshagen
1564e4cbee9SChristoph Hellwig error = bio->bi_status;
1571da177e4SLinus Torvalds bio_put(bio);
1581da177e4SLinus Torvalds
1599b81c842SSasha Levin dec_count(io, region, error);
1601da177e4SLinus Torvalds }
1611da177e4SLinus Torvalds
162a4a82ce3SHeinz Mauelshagen /*
163a4a82ce3SHeinz Mauelshagen *--------------------------------------------------------------
1641da177e4SLinus Torvalds * These little objects provide an abstraction for getting a new
1651da177e4SLinus Torvalds * destination page for io.
166a4a82ce3SHeinz Mauelshagen *--------------------------------------------------------------
167a4a82ce3SHeinz Mauelshagen */
1681da177e4SLinus Torvalds struct dpages {
1691da177e4SLinus Torvalds void (*get_page)(struct dpages *dp,
17086a3238cSHeinz Mauelshagen struct page **p, unsigned long *len, unsigned int *offset);
1711da177e4SLinus Torvalds void (*next_page)(struct dpages *dp);
1721da177e4SLinus Torvalds
173cacc7b05SMing Lei union {
17486a3238cSHeinz Mauelshagen unsigned int context_u;
175cacc7b05SMing Lei struct bvec_iter context_bi;
176cacc7b05SMing Lei };
1771da177e4SLinus Torvalds void *context_ptr;
178bb91bc7bSMikulas Patocka
179bb91bc7bSMikulas Patocka void *vma_invalidate_address;
180bb91bc7bSMikulas Patocka unsigned long vma_invalidate_size;
1811da177e4SLinus Torvalds };
1821da177e4SLinus Torvalds
1831da177e4SLinus Torvalds /*
1841da177e4SLinus Torvalds * Functions for getting the pages from a list.
1851da177e4SLinus Torvalds */
list_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)1861da177e4SLinus Torvalds static void list_get_page(struct dpages *dp,
18786a3238cSHeinz Mauelshagen struct page **p, unsigned long *len, unsigned int *offset)
1881da177e4SLinus Torvalds {
18986a3238cSHeinz Mauelshagen unsigned int o = dp->context_u;
19026cb62a2SYu Zhe struct page_list *pl = dp->context_ptr;
1911da177e4SLinus Torvalds
1921da177e4SLinus Torvalds *p = pl->page;
1931da177e4SLinus Torvalds *len = PAGE_SIZE - o;
1941da177e4SLinus Torvalds *offset = o;
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds
list_next_page(struct dpages * dp)1971da177e4SLinus Torvalds static void list_next_page(struct dpages *dp)
1981da177e4SLinus Torvalds {
19926cb62a2SYu Zhe struct page_list *pl = dp->context_ptr;
2000ef0b471SHeinz Mauelshagen
2011da177e4SLinus Torvalds dp->context_ptr = pl->next;
2021da177e4SLinus Torvalds dp->context_u = 0;
2031da177e4SLinus Torvalds }
2041da177e4SLinus Torvalds
list_dp_init(struct dpages * dp,struct page_list * pl,unsigned int offset)20586a3238cSHeinz Mauelshagen static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
2061da177e4SLinus Torvalds {
2071da177e4SLinus Torvalds dp->get_page = list_get_page;
2081da177e4SLinus Torvalds dp->next_page = list_next_page;
2091da177e4SLinus Torvalds dp->context_u = offset;
2101da177e4SLinus Torvalds dp->context_ptr = pl;
2111da177e4SLinus Torvalds }
2121da177e4SLinus Torvalds
2131da177e4SLinus Torvalds /*
2141da177e4SLinus Torvalds * Functions for getting the pages from a bvec.
2151da177e4SLinus Torvalds */
bio_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)216d73f9907SMikulas Patocka static void bio_get_page(struct dpages *dp, struct page **p,
21786a3238cSHeinz Mauelshagen unsigned long *len, unsigned int *offset)
2181da177e4SLinus Torvalds {
219cacc7b05SMing Lei struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
220cacc7b05SMing Lei dp->context_bi);
221cacc7b05SMing Lei
222cacc7b05SMing Lei *p = bvec.bv_page;
223cacc7b05SMing Lei *len = bvec.bv_len;
224cacc7b05SMing Lei *offset = bvec.bv_offset;
225cacc7b05SMing Lei
226cacc7b05SMing Lei /* avoid figuring it out again in bio_next_page() */
227cacc7b05SMing Lei dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
2281da177e4SLinus Torvalds }
2291da177e4SLinus Torvalds
bio_next_page(struct dpages * dp)230003b5c57SKent Overstreet static void bio_next_page(struct dpages *dp)
2311da177e4SLinus Torvalds {
232cacc7b05SMing Lei unsigned int len = (unsigned int)dp->context_bi.bi_sector;
233cacc7b05SMing Lei
234cacc7b05SMing Lei bvec_iter_advance((struct bio_vec *)dp->context_ptr,
235cacc7b05SMing Lei &dp->context_bi, len);
2361da177e4SLinus Torvalds }
2371da177e4SLinus Torvalds
bio_dp_init(struct dpages * dp,struct bio * bio)238003b5c57SKent Overstreet static void bio_dp_init(struct dpages *dp, struct bio *bio)
2391da177e4SLinus Torvalds {
240003b5c57SKent Overstreet dp->get_page = bio_get_page;
241003b5c57SKent Overstreet dp->next_page = bio_next_page;
242cacc7b05SMing Lei
243cacc7b05SMing Lei /*
244cacc7b05SMing Lei * We just use bvec iterator to retrieve pages, so it is ok to
245cacc7b05SMing Lei * access the bvec table directly here
246cacc7b05SMing Lei */
247cacc7b05SMing Lei dp->context_ptr = bio->bi_io_vec;
248cacc7b05SMing Lei dp->context_bi = bio->bi_iter;
2491da177e4SLinus Torvalds }
2501da177e4SLinus Torvalds
251c8b03afeSHeinz Mauelshagen /*
252c8b03afeSHeinz Mauelshagen * Functions for getting the pages from a VMA.
253c8b03afeSHeinz Mauelshagen */
vm_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)2541da177e4SLinus Torvalds static void vm_get_page(struct dpages *dp,
25586a3238cSHeinz Mauelshagen struct page **p, unsigned long *len, unsigned int *offset)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds *p = vmalloc_to_page(dp->context_ptr);
2581da177e4SLinus Torvalds *offset = dp->context_u;
2591da177e4SLinus Torvalds *len = PAGE_SIZE - dp->context_u;
2601da177e4SLinus Torvalds }
2611da177e4SLinus Torvalds
vm_next_page(struct dpages * dp)2621da177e4SLinus Torvalds static void vm_next_page(struct dpages *dp)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds dp->context_ptr += PAGE_SIZE - dp->context_u;
2651da177e4SLinus Torvalds dp->context_u = 0;
2661da177e4SLinus Torvalds }
2671da177e4SLinus Torvalds
vm_dp_init(struct dpages * dp,void * data)2681da177e4SLinus Torvalds static void vm_dp_init(struct dpages *dp, void *data)
2691da177e4SLinus Torvalds {
2701da177e4SLinus Torvalds dp->get_page = vm_get_page;
2711da177e4SLinus Torvalds dp->next_page = vm_next_page;
27293bbf583SAl Viro dp->context_u = offset_in_page(data);
2731da177e4SLinus Torvalds dp->context_ptr = data;
2741da177e4SLinus Torvalds }
2751da177e4SLinus Torvalds
276c8b03afeSHeinz Mauelshagen /*
277c8b03afeSHeinz Mauelshagen * Functions for getting the pages from kernel memory.
278c8b03afeSHeinz Mauelshagen */
km_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)279c8b03afeSHeinz Mauelshagen static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
28086a3238cSHeinz Mauelshagen unsigned int *offset)
281c8b03afeSHeinz Mauelshagen {
282c8b03afeSHeinz Mauelshagen *p = virt_to_page(dp->context_ptr);
283c8b03afeSHeinz Mauelshagen *offset = dp->context_u;
284c8b03afeSHeinz Mauelshagen *len = PAGE_SIZE - dp->context_u;
285c8b03afeSHeinz Mauelshagen }
286c8b03afeSHeinz Mauelshagen
km_next_page(struct dpages * dp)287c8b03afeSHeinz Mauelshagen static void km_next_page(struct dpages *dp)
288c8b03afeSHeinz Mauelshagen {
289c8b03afeSHeinz Mauelshagen dp->context_ptr += PAGE_SIZE - dp->context_u;
290c8b03afeSHeinz Mauelshagen dp->context_u = 0;
291c8b03afeSHeinz Mauelshagen }
292c8b03afeSHeinz Mauelshagen
km_dp_init(struct dpages * dp,void * data)293c8b03afeSHeinz Mauelshagen static void km_dp_init(struct dpages *dp, void *data)
294c8b03afeSHeinz Mauelshagen {
295c8b03afeSHeinz Mauelshagen dp->get_page = km_get_page;
296c8b03afeSHeinz Mauelshagen dp->next_page = km_next_page;
29793bbf583SAl Viro dp->context_u = offset_in_page(data);
298c8b03afeSHeinz Mauelshagen dp->context_ptr = data;
299c8b03afeSHeinz Mauelshagen }
300c8b03afeSHeinz Mauelshagen
301a4a82ce3SHeinz Mauelshagen /*
302a4a82ce3SHeinz Mauelshagen *---------------------------------------------------------------
3031da177e4SLinus Torvalds * IO routines that accept a list of pages.
304a4a82ce3SHeinz Mauelshagen *---------------------------------------------------------------
305a4a82ce3SHeinz Mauelshagen */
do_region(const blk_opf_t opf,unsigned int region,struct dm_io_region * where,struct dpages * dp,struct io * io,unsigned short ioprio)30686a3238cSHeinz Mauelshagen static void do_region(const blk_opf_t opf, unsigned int region,
307e6047149SMike Christie struct dm_io_region *where, struct dpages *dp,
308*5cfcea64SHongyu Jin struct io *io, unsigned short ioprio)
3091da177e4SLinus Torvalds {
3101da177e4SLinus Torvalds struct bio *bio;
3111da177e4SLinus Torvalds struct page *page;
3121da177e4SLinus Torvalds unsigned long len;
31386a3238cSHeinz Mauelshagen unsigned int offset;
31486a3238cSHeinz Mauelshagen unsigned int num_bvecs;
3151da177e4SLinus Torvalds sector_t remaining = where->count;
3160c535e0dSMilan Broz struct request_queue *q = bdev_get_queue(where->bdev);
31770d6c400SMike Snitzer sector_t num_sectors;
3183f649ab7SKees Cook unsigned int special_cmd_max_sectors;
319a3282b43SBart Van Assche const enum req_op op = opf & REQ_OP_MASK;
3201da177e4SLinus Torvalds
321e5db2980SDarrick J. Wong /*
322e5db2980SDarrick J. Wong * Reject unsupported discard and write same requests.
323e5db2980SDarrick J. Wong */
324e6047149SMike Christie if (op == REQ_OP_DISCARD)
325cf0fbf89SChristoph Hellwig special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
326ac62d620SChristoph Hellwig else if (op == REQ_OP_WRITE_ZEROES)
327ac62d620SChristoph Hellwig special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
328a773187eSChristoph Hellwig if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
329a773187eSChristoph Hellwig special_cmd_max_sectors == 0) {
330feb7695fSMike Snitzer atomic_inc(&io->count);
3314e4cbee9SChristoph Hellwig dec_count(io, region, BLK_STS_NOTSUPP);
33237527b86SDarrick J. Wong return;
33337527b86SDarrick J. Wong }
33437527b86SDarrick J. Wong
33512fc0f49SMikulas Patocka /*
336e6047149SMike Christie * where->count may be zero if op holds a flush and we need to
337d87f4c14STejun Heo * send a zero-sized flush.
33812fc0f49SMikulas Patocka */
33912fc0f49SMikulas Patocka do {
3401da177e4SLinus Torvalds /*
341f1e53987SMikulas Patocka * Allocate a suitably sized-bio.
3421da177e4SLinus Torvalds */
3430f5d690fSChristoph Hellwig switch (op) {
3440f5d690fSChristoph Hellwig case REQ_OP_DISCARD:
345ac62d620SChristoph Hellwig case REQ_OP_WRITE_ZEROES:
3460f5d690fSChristoph Hellwig num_bvecs = 0;
3470f5d690fSChristoph Hellwig break;
3480f5d690fSChristoph Hellwig default:
3495f7136dbSMatthew Wilcox (Oracle) num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
3505f7136dbSMatthew Wilcox (Oracle) (PAGE_SIZE >> SECTOR_SHIFT)));
3510f5d690fSChristoph Hellwig }
3520c535e0dSMilan Broz
353a3282b43SBart Van Assche bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
354a3282b43SBart Van Assche &io->client->bios);
3554f024f37SKent Overstreet bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
3561da177e4SLinus Torvalds bio->bi_end_io = endio;
357*5cfcea64SHongyu Jin bio->bi_ioprio = ioprio;
358f1e53987SMikulas Patocka store_io_and_region_in_bio(bio, io, region);
3591da177e4SLinus Torvalds
360ac62d620SChristoph Hellwig if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
361e5db2980SDarrick J. Wong num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
3624f024f37SKent Overstreet bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
36370d6c400SMike Snitzer remaining -= num_sectors;
36403b18887SHeinz Mauelshagen } else {
36503b18887SHeinz Mauelshagen while (remaining) {
3661da177e4SLinus Torvalds /*
3671da177e4SLinus Torvalds * Try and add as many pages as possible.
3681da177e4SLinus Torvalds */
3691da177e4SLinus Torvalds dp->get_page(dp, &page, &len, &offset);
3701da177e4SLinus Torvalds len = min(len, to_bytes(remaining));
3711da177e4SLinus Torvalds if (!bio_add_page(bio, page, len, offset))
3721da177e4SLinus Torvalds break;
3731da177e4SLinus Torvalds
3741da177e4SLinus Torvalds offset = 0;
3751da177e4SLinus Torvalds remaining -= to_sector(len);
3761da177e4SLinus Torvalds dp->next_page(dp);
3771da177e4SLinus Torvalds }
37803b18887SHeinz Mauelshagen }
3791da177e4SLinus Torvalds
3801da177e4SLinus Torvalds atomic_inc(&io->count);
3814e49ea4aSMike Christie submit_bio(bio);
38212fc0f49SMikulas Patocka } while (remaining);
3831da177e4SLinus Torvalds }
3841da177e4SLinus Torvalds
dispatch_io(blk_opf_t opf,unsigned int num_regions,struct dm_io_region * where,struct dpages * dp,struct io * io,int sync,unsigned short ioprio)385a3282b43SBart Van Assche static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
38622a1ceb1SHeinz Mauelshagen struct dm_io_region *where, struct dpages *dp,
387*5cfcea64SHongyu Jin struct io *io, int sync, unsigned short ioprio)
3881da177e4SLinus Torvalds {
3891da177e4SLinus Torvalds int i;
3901da177e4SLinus Torvalds struct dpages old_pages = *dp;
3911da177e4SLinus Torvalds
392f1e53987SMikulas Patocka BUG_ON(num_regions > DM_IO_MAX_REGIONS);
393f1e53987SMikulas Patocka
3941da177e4SLinus Torvalds if (sync)
395a3282b43SBart Van Assche opf |= REQ_SYNC;
3961da177e4SLinus Torvalds
3971da177e4SLinus Torvalds /*
3981da177e4SLinus Torvalds * For multiple regions we need to be careful to rewind
3991da177e4SLinus Torvalds * the dp object for each call to do_region.
4001da177e4SLinus Torvalds */
4011da177e4SLinus Torvalds for (i = 0; i < num_regions; i++) {
4021da177e4SLinus Torvalds *dp = old_pages;
403a3282b43SBart Van Assche if (where[i].count || (opf & REQ_PREFLUSH))
404*5cfcea64SHongyu Jin do_region(opf, i, where + i, dp, io, ioprio);
4051da177e4SLinus Torvalds }
4061da177e4SLinus Torvalds
4071da177e4SLinus Torvalds /*
408f00b16adSHeinz Mauelshagen * Drop the extra reference that we were holding to avoid
4091da177e4SLinus Torvalds * the io being completed too early.
4101da177e4SLinus Torvalds */
4111da177e4SLinus Torvalds dec_count(io, 0, 0);
4121da177e4SLinus Torvalds }
4131da177e4SLinus Torvalds
41497e7cdf1SJoe Thornber struct sync_io {
41597e7cdf1SJoe Thornber unsigned long error_bits;
41697e7cdf1SJoe Thornber struct completion wait;
41797e7cdf1SJoe Thornber };
41897e7cdf1SJoe Thornber
sync_io_complete(unsigned long error,void * context)41997e7cdf1SJoe Thornber static void sync_io_complete(unsigned long error, void *context)
42097e7cdf1SJoe Thornber {
42197e7cdf1SJoe Thornber struct sync_io *sio = context;
42297e7cdf1SJoe Thornber
42397e7cdf1SJoe Thornber sio->error_bits = error;
42497e7cdf1SJoe Thornber complete(&sio->wait);
42597e7cdf1SJoe Thornber }
42697e7cdf1SJoe Thornber
sync_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,blk_opf_t opf,struct dpages * dp,unsigned long * error_bits,unsigned short ioprio)427891ce207SHeinz Mauelshagen static int sync_io(struct dm_io_client *client, unsigned int num_regions,
428a3282b43SBart Van Assche struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
429*5cfcea64SHongyu Jin unsigned long *error_bits, unsigned short ioprio)
4301da177e4SLinus Torvalds {
43197e7cdf1SJoe Thornber struct io *io;
43297e7cdf1SJoe Thornber struct sync_io sio;
4331da177e4SLinus Torvalds
434a3282b43SBart Van Assche if (num_regions > 1 && !op_is_write(opf)) {
4351da177e4SLinus Torvalds WARN_ON(1);
4361da177e4SLinus Torvalds return -EIO;
4371da177e4SLinus Torvalds }
4381da177e4SLinus Torvalds
43997e7cdf1SJoe Thornber init_completion(&sio.wait);
44097e7cdf1SJoe Thornber
4416f1c819cSKent Overstreet io = mempool_alloc(&client->pool, GFP_NOIO);
442f1e53987SMikulas Patocka io->error_bits = 0;
443f1e53987SMikulas Patocka atomic_set(&io->count, 1); /* see dispatch_io() */
444f1e53987SMikulas Patocka io->client = client;
44597e7cdf1SJoe Thornber io->callback = sync_io_complete;
44697e7cdf1SJoe Thornber io->context = &sio;
4471da177e4SLinus Torvalds
448bb91bc7bSMikulas Patocka io->vma_invalidate_address = dp->vma_invalidate_address;
449bb91bc7bSMikulas Patocka io->vma_invalidate_size = dp->vma_invalidate_size;
450bb91bc7bSMikulas Patocka
451*5cfcea64SHongyu Jin dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
4521da177e4SLinus Torvalds
45397e7cdf1SJoe Thornber wait_for_completion_io(&sio.wait);
4541da177e4SLinus Torvalds
455891ce207SHeinz Mauelshagen if (error_bits)
45697e7cdf1SJoe Thornber *error_bits = sio.error_bits;
457891ce207SHeinz Mauelshagen
45897e7cdf1SJoe Thornber return sio.error_bits ? -EIO : 0;
4591da177e4SLinus Torvalds }
4601da177e4SLinus Torvalds
async_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,blk_opf_t opf,struct dpages * dp,io_notify_fn fn,void * context,unsigned short ioprio)461891ce207SHeinz Mauelshagen static int async_io(struct dm_io_client *client, unsigned int num_regions,
462a3282b43SBart Van Assche struct dm_io_region *where, blk_opf_t opf,
463*5cfcea64SHongyu Jin struct dpages *dp, io_notify_fn fn, void *context,
464*5cfcea64SHongyu Jin unsigned short ioprio)
4651da177e4SLinus Torvalds {
4661da177e4SLinus Torvalds struct io *io;
4671da177e4SLinus Torvalds
468a3282b43SBart Van Assche if (num_regions > 1 && !op_is_write(opf)) {
4691da177e4SLinus Torvalds WARN_ON(1);
4701da177e4SLinus Torvalds fn(1, context);
4711da177e4SLinus Torvalds return -EIO;
4721da177e4SLinus Torvalds }
4731da177e4SLinus Torvalds
4746f1c819cSKent Overstreet io = mempool_alloc(&client->pool, GFP_NOIO);
475e01fd7eeSAlasdair G Kergon io->error_bits = 0;
4761da177e4SLinus Torvalds atomic_set(&io->count, 1); /* see dispatch_io() */
477891ce207SHeinz Mauelshagen io->client = client;
4781da177e4SLinus Torvalds io->callback = fn;
4791da177e4SLinus Torvalds io->context = context;
4801da177e4SLinus Torvalds
481bb91bc7bSMikulas Patocka io->vma_invalidate_address = dp->vma_invalidate_address;
482bb91bc7bSMikulas Patocka io->vma_invalidate_size = dp->vma_invalidate_size;
483bb91bc7bSMikulas Patocka
484*5cfcea64SHongyu Jin dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
4851da177e4SLinus Torvalds return 0;
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds
dp_init(struct dm_io_request * io_req,struct dpages * dp,unsigned long size)488bb91bc7bSMikulas Patocka static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
489bb91bc7bSMikulas Patocka unsigned long size)
490c8b03afeSHeinz Mauelshagen {
491c8b03afeSHeinz Mauelshagen /* Set up dpages based on memory type */
492bb91bc7bSMikulas Patocka
493bb91bc7bSMikulas Patocka dp->vma_invalidate_address = NULL;
494bb91bc7bSMikulas Patocka dp->vma_invalidate_size = 0;
495bb91bc7bSMikulas Patocka
496c8b03afeSHeinz Mauelshagen switch (io_req->mem.type) {
497c8b03afeSHeinz Mauelshagen case DM_IO_PAGE_LIST:
498c8b03afeSHeinz Mauelshagen list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
499c8b03afeSHeinz Mauelshagen break;
500c8b03afeSHeinz Mauelshagen
501003b5c57SKent Overstreet case DM_IO_BIO:
502003b5c57SKent Overstreet bio_dp_init(dp, io_req->mem.ptr.bio);
503c8b03afeSHeinz Mauelshagen break;
504c8b03afeSHeinz Mauelshagen
505c8b03afeSHeinz Mauelshagen case DM_IO_VMA:
506bb91bc7bSMikulas Patocka flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
507581075e4SBart Van Assche if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
508bb91bc7bSMikulas Patocka dp->vma_invalidate_address = io_req->mem.ptr.vma;
509bb91bc7bSMikulas Patocka dp->vma_invalidate_size = size;
510bb91bc7bSMikulas Patocka }
511c8b03afeSHeinz Mauelshagen vm_dp_init(dp, io_req->mem.ptr.vma);
512c8b03afeSHeinz Mauelshagen break;
513c8b03afeSHeinz Mauelshagen
514c8b03afeSHeinz Mauelshagen case DM_IO_KMEM:
515c8b03afeSHeinz Mauelshagen km_dp_init(dp, io_req->mem.ptr.addr);
516c8b03afeSHeinz Mauelshagen break;
517c8b03afeSHeinz Mauelshagen
518c8b03afeSHeinz Mauelshagen default:
519c8b03afeSHeinz Mauelshagen return -EINVAL;
520c8b03afeSHeinz Mauelshagen }
521c8b03afeSHeinz Mauelshagen
522c8b03afeSHeinz Mauelshagen return 0;
523c8b03afeSHeinz Mauelshagen }
524c8b03afeSHeinz Mauelshagen
dm_io(struct dm_io_request * io_req,unsigned int num_regions,struct dm_io_region * where,unsigned long * sync_error_bits,unsigned short ioprio)52586a3238cSHeinz Mauelshagen int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
526*5cfcea64SHongyu Jin struct dm_io_region *where, unsigned long *sync_error_bits,
527*5cfcea64SHongyu Jin unsigned short ioprio)
528c8b03afeSHeinz Mauelshagen {
529c8b03afeSHeinz Mauelshagen int r;
530c8b03afeSHeinz Mauelshagen struct dpages dp;
531c8b03afeSHeinz Mauelshagen
532bb91bc7bSMikulas Patocka r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
533c8b03afeSHeinz Mauelshagen if (r)
534c8b03afeSHeinz Mauelshagen return r;
535c8b03afeSHeinz Mauelshagen
536c8b03afeSHeinz Mauelshagen if (!io_req->notify.fn)
537c8b03afeSHeinz Mauelshagen return sync_io(io_req->client, num_regions, where,
538*5cfcea64SHongyu Jin io_req->bi_opf, &dp, sync_error_bits, ioprio);
539c8b03afeSHeinz Mauelshagen
540581075e4SBart Van Assche return async_io(io_req->client, num_regions, where,
541a3282b43SBart Van Assche io_req->bi_opf, &dp, io_req->notify.fn,
542*5cfcea64SHongyu Jin io_req->notify.context, ioprio);
543c8b03afeSHeinz Mauelshagen }
544c8b03afeSHeinz Mauelshagen EXPORT_SYMBOL(dm_io);
545952b3557SMikulas Patocka
dm_io_init(void)546952b3557SMikulas Patocka int __init dm_io_init(void)
547952b3557SMikulas Patocka {
548952b3557SMikulas Patocka _dm_io_cache = KMEM_CACHE(io, 0);
549952b3557SMikulas Patocka if (!_dm_io_cache)
550952b3557SMikulas Patocka return -ENOMEM;
551952b3557SMikulas Patocka
552952b3557SMikulas Patocka return 0;
553952b3557SMikulas Patocka }
554952b3557SMikulas Patocka
dm_io_exit(void)555952b3557SMikulas Patocka void dm_io_exit(void)
556952b3557SMikulas Patocka {
557952b3557SMikulas Patocka kmem_cache_destroy(_dm_io_cache);
558952b3557SMikulas Patocka _dm_io_cache = NULL;
559952b3557SMikulas Patocka }
560