Lines Matching +full:mem +full:- +full:io

1 // SPDX-License-Identifier: GPL-2.0-only
9 #include "dm-core.h"
11 #include <linux/device-mapper.h>
19 #include <linux/dm-io.h>
21 #define DM_MSG_PREFIX "io"
31 * Aligning 'struct io' reduces the number of bits required to store
34 struct io { struct
57 return ERR_PTR(-ENOMEM); in dm_io_client_create()
59 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache); in dm_io_client_create()
63 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS); in dm_io_client_create()
70 mempool_exit(&client->pool); in dm_io_client_create()
78 mempool_exit(&client->pool); in dm_io_client_destroy()
79 bioset_exit(&client->bios); in dm_io_client_destroy()
85 *-------------------------------------------------------------------
86 * We need to keep track of which region a bio is doing io for.
88 * ensure the 'struct io' pointer is aligned so enough low bits are
91 *-------------------------------------------------------------------
93 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, in store_io_and_region_in_bio() argument
96 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { in store_io_and_region_in_bio()
97 DMCRIT("Unaligned struct io pointer %p", io); in store_io_and_region_in_bio()
101 bio->bi_private = (void *)((unsigned long)io | region); in store_io_and_region_in_bio()
104 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, in retrieve_io_and_region_from_bio() argument
107 unsigned long val = (unsigned long)bio->bi_private; in retrieve_io_and_region_from_bio()
109 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); in retrieve_io_and_region_from_bio()
110 *region = val & (DM_IO_MAX_REGIONS - 1); in retrieve_io_and_region_from_bio()
114 *--------------------------------------------------------------
115 * We need an io object to keep track of the number of bios that
116 * have been dispatched for a particular io.
117 *--------------------------------------------------------------
119 static void complete_io(struct io *io) in complete_io() argument
121 unsigned long error_bits = io->error_bits; in complete_io()
122 io_notify_fn fn = io->callback; in complete_io()
123 void *context = io->context; in complete_io()
125 if (io->vma_invalidate_size) in complete_io()
126 invalidate_kernel_vmap_range(io->vma_invalidate_address, in complete_io()
127 io->vma_invalidate_size); in complete_io()
129 mempool_free(io, &io->client->pool); in complete_io()
133 static void dec_count(struct io *io, unsigned int region, blk_status_t error) in dec_count() argument
136 set_bit(region, &io->error_bits); in dec_count()
138 if (atomic_dec_and_test(&io->count)) in dec_count()
139 complete_io(io); in dec_count()
144 struct io *io; in endio() local
148 if (bio->bi_status && bio_data_dir(bio) == READ) in endio()
152 * The bio destructor in bio_put() may use the io object. in endio()
154 retrieve_io_and_region_from_bio(bio, &io, &region); in endio()
156 error = bio->bi_status; in endio()
159 dec_count(io, region, error); in endio()
163 *--------------------------------------------------------------
165 * destination page for io.
166 *--------------------------------------------------------------
189 unsigned int o = dp->context_u; in list_get_page()
190 struct page_list *pl = dp->context_ptr; in list_get_page()
192 *p = pl->page; in list_get_page()
193 *len = PAGE_SIZE - o; in list_get_page()
199 struct page_list *pl = dp->context_ptr; in list_next_page()
201 dp->context_ptr = pl->next; in list_next_page()
202 dp->context_u = 0; in list_next_page()
207 dp->get_page = list_get_page; in list_dp_init()
208 dp->next_page = list_next_page; in list_dp_init()
209 dp->context_u = offset; in list_dp_init()
210 dp->context_ptr = pl; in list_dp_init()
219 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, in bio_get_page()
220 dp->context_bi); in bio_get_page()
227 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; in bio_get_page()
232 unsigned int len = (unsigned int)dp->context_bi.bi_sector; in bio_next_page()
234 bvec_iter_advance((struct bio_vec *)dp->context_ptr, in bio_next_page()
235 &dp->context_bi, len); in bio_next_page()
240 dp->get_page = bio_get_page; in bio_dp_init()
241 dp->next_page = bio_next_page; in bio_dp_init()
247 dp->context_ptr = bio->bi_io_vec; in bio_dp_init()
248 dp->context_bi = bio->bi_iter; in bio_dp_init()
257 *p = vmalloc_to_page(dp->context_ptr); in vm_get_page()
258 *offset = dp->context_u; in vm_get_page()
259 *len = PAGE_SIZE - dp->context_u; in vm_get_page()
264 dp->context_ptr += PAGE_SIZE - dp->context_u; in vm_next_page()
265 dp->context_u = 0; in vm_next_page()
270 dp->get_page = vm_get_page; in vm_dp_init()
271 dp->next_page = vm_next_page; in vm_dp_init()
272 dp->context_u = offset_in_page(data); in vm_dp_init()
273 dp->context_ptr = data; in vm_dp_init()
282 *p = virt_to_page(dp->context_ptr); in km_get_page()
283 *offset = dp->context_u; in km_get_page()
284 *len = PAGE_SIZE - dp->context_u; in km_get_page()
289 dp->context_ptr += PAGE_SIZE - dp->context_u; in km_next_page()
290 dp->context_u = 0; in km_next_page()
295 dp->get_page = km_get_page; in km_dp_init()
296 dp->next_page = km_next_page; in km_dp_init()
297 dp->context_u = offset_in_page(data); in km_dp_init()
298 dp->context_ptr = data; in km_dp_init()
302 *---------------------------------------------------------------
303 * IO routines that accept a list of pages.
304 *---------------------------------------------------------------
308 struct io *io, unsigned short ioprio) in do_region() argument
315 sector_t remaining = where->count; in do_region()
316 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
325 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev); in do_region()
327 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; in do_region()
330 atomic_inc(&io->count); in do_region()
331 dec_count(io, region, BLK_STS_NOTSUPP); in do_region()
336 * where->count may be zero if op holds a flush and we need to in do_region()
337 * send a zero-sized flush. in do_region()
341 * Allocate a suitably sized-bio. in do_region()
353 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO, in do_region()
354 &io->client->bios); in do_region()
355 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); in do_region()
356 bio->bi_end_io = endio; in do_region()
357 bio->bi_ioprio = ioprio; in do_region()
358 store_io_and_region_in_bio(bio, io, region); in do_region()
362 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region()
363 remaining -= num_sectors; in do_region()
369 dp->get_page(dp, &page, &len, &offset); in do_region()
375 remaining -= to_sector(len); in do_region()
376 dp->next_page(dp); in do_region()
380 atomic_inc(&io->count); in do_region()
387 struct io *io, int sync, unsigned short ioprio) in dispatch_io() argument
404 do_region(opf, i, where + i, dp, io, ioprio); in dispatch_io()
409 * the io being completed too early. in dispatch_io()
411 dec_count(io, 0, 0); in dispatch_io()
423 sio->error_bits = error; in sync_io_complete()
424 complete(&sio->wait); in sync_io_complete()
431 struct io *io; in sync_io() local
436 return -EIO; in sync_io()
441 io = mempool_alloc(&client->pool, GFP_NOIO); in sync_io()
442 io->error_bits = 0; in sync_io()
443 atomic_set(&io->count, 1); /* see dispatch_io() */ in sync_io()
444 io->client = client; in sync_io()
445 io->callback = sync_io_complete; in sync_io()
446 io->context = &sio; in sync_io()
448 io->vma_invalidate_address = dp->vma_invalidate_address; in sync_io()
449 io->vma_invalidate_size = dp->vma_invalidate_size; in sync_io()
451 dispatch_io(opf, num_regions, where, dp, io, 1, ioprio); in sync_io()
458 return sio.error_bits ? -EIO : 0; in sync_io()
466 struct io *io; in async_io() local
471 return -EIO; in async_io()
474 io = mempool_alloc(&client->pool, GFP_NOIO); in async_io()
475 io->error_bits = 0; in async_io()
476 atomic_set(&io->count, 1); /* see dispatch_io() */ in async_io()
477 io->client = client; in async_io()
478 io->callback = fn; in async_io()
479 io->context = context; in async_io()
481 io->vma_invalidate_address = dp->vma_invalidate_address; in async_io()
482 io->vma_invalidate_size = dp->vma_invalidate_size; in async_io()
484 dispatch_io(opf, num_regions, where, dp, io, 0, ioprio); in async_io()
493 dp->vma_invalidate_address = NULL; in dp_init()
494 dp->vma_invalidate_size = 0; in dp_init()
496 switch (io_req->mem.type) { in dp_init()
498 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); in dp_init()
502 bio_dp_init(dp, io_req->mem.ptr.bio); in dp_init()
506 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); in dp_init()
507 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) { in dp_init()
508 dp->vma_invalidate_address = io_req->mem.ptr.vma; in dp_init()
509 dp->vma_invalidate_size = size; in dp_init()
511 vm_dp_init(dp, io_req->mem.ptr.vma); in dp_init()
515 km_dp_init(dp, io_req->mem.ptr.addr); in dp_init()
519 return -EINVAL; in dp_init()
532 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); in dm_io()
536 if (!io_req->notify.fn) in dm_io()
537 return sync_io(io_req->client, num_regions, where, in dm_io()
538 io_req->bi_opf, &dp, sync_error_bits, ioprio); in dm_io()
540 return async_io(io_req->client, num_regions, where, in dm_io()
541 io_req->bi_opf, &dp, io_req->notify.fn, in dm_io()
542 io_req->notify.context, ioprio); in dm_io()
548 _dm_io_cache = KMEM_CACHE(io, 0); in dm_io_init()
550 return -ENOMEM; in dm_io_init()