xref: /openbmc/linux/drivers/md/dm-ebs-target.c (revision 67a7b9a5b54fa3a1b9e4ab5b9808198680cba082)
1d3c7b35cSHeinz Mauelshagen /*
2d3c7b35cSHeinz Mauelshagen  * Copyright (C) 2020 Red Hat GmbH
3d3c7b35cSHeinz Mauelshagen  *
4d3c7b35cSHeinz Mauelshagen  * This file is released under the GPL.
5d3c7b35cSHeinz Mauelshagen  *
6d3c7b35cSHeinz Mauelshagen  * Device-mapper target to emulate smaller logical block
7d3c7b35cSHeinz Mauelshagen  * size on backing devices exposing (natively) larger ones.
8d3c7b35cSHeinz Mauelshagen  *
9d3c7b35cSHeinz Mauelshagen  * E.g. 512 byte sector emulation on 4K native disks.
10d3c7b35cSHeinz Mauelshagen  */
11d3c7b35cSHeinz Mauelshagen 
12d3c7b35cSHeinz Mauelshagen #include "dm.h"
13d3c7b35cSHeinz Mauelshagen #include <linux/module.h>
14d3c7b35cSHeinz Mauelshagen #include <linux/workqueue.h>
15d3c7b35cSHeinz Mauelshagen #include <linux/dm-bufio.h>
16d3c7b35cSHeinz Mauelshagen 
17d3c7b35cSHeinz Mauelshagen #define DM_MSG_PREFIX "ebs"
18d3c7b35cSHeinz Mauelshagen 
19d3c7b35cSHeinz Mauelshagen static void ebs_dtr(struct dm_target *ti);
20d3c7b35cSHeinz Mauelshagen 
21d3c7b35cSHeinz Mauelshagen /* Emulated block size context. */
22d3c7b35cSHeinz Mauelshagen struct ebs_c {
23d3c7b35cSHeinz Mauelshagen 	struct dm_dev *dev;		/* Underlying device to emulate block size on. */
24d3c7b35cSHeinz Mauelshagen 	struct dm_bufio_client *bufio;	/* Use dm-bufio for read and read-modify-write processing. */
25d3c7b35cSHeinz Mauelshagen 	struct workqueue_struct *wq;	/* Workqueue for ^ processing of bios. */
26d3c7b35cSHeinz Mauelshagen 	struct work_struct ws;		/* Work item used for ^. */
27d3c7b35cSHeinz Mauelshagen 	struct bio_list bios_in;	/* Worker bios input list. */
28d3c7b35cSHeinz Mauelshagen 	spinlock_t lock;		/* Guard bios input list above. */
29d3c7b35cSHeinz Mauelshagen 	sector_t start;			/* <start> table line argument, see ebs_ctr below. */
30d3c7b35cSHeinz Mauelshagen 	unsigned int e_bs;		/* Emulated block size in sectors exposed to upper layer. */
311c72e023SBhaskar Chowdhury 	unsigned int u_bs;		/* Underlying block size in sectors retrieved from/set on lower layer device. */
32d3c7b35cSHeinz Mauelshagen 	unsigned char block_shift;	/* bitshift sectors -> blocks used in dm-bufio API. */
33d3c7b35cSHeinz Mauelshagen 	bool u_bs_set:1;		/* Flag to indicate underlying block size is set on table line. */
34d3c7b35cSHeinz Mauelshagen };
35d3c7b35cSHeinz Mauelshagen 
36d3c7b35cSHeinz Mauelshagen static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector)
37d3c7b35cSHeinz Mauelshagen {
38d3c7b35cSHeinz Mauelshagen 	return sector >> ec->block_shift;
39d3c7b35cSHeinz Mauelshagen }
40d3c7b35cSHeinz Mauelshagen 
41d3c7b35cSHeinz Mauelshagen static inline sector_t __block_mod(sector_t sector, unsigned int bs)
42d3c7b35cSHeinz Mauelshagen {
43d3c7b35cSHeinz Mauelshagen 	return sector & (bs - 1);
44d3c7b35cSHeinz Mauelshagen }
45d3c7b35cSHeinz Mauelshagen 
461c72e023SBhaskar Chowdhury /* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
47d3c7b35cSHeinz Mauelshagen static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
48d3c7b35cSHeinz Mauelshagen {
49d3c7b35cSHeinz Mauelshagen 	sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
50d3c7b35cSHeinz Mauelshagen 
51d3c7b35cSHeinz Mauelshagen 	return __sector_to_block(ec, end_sector) + (__block_mod(end_sector, ec->u_bs) ? 1 : 0);
52d3c7b35cSHeinz Mauelshagen }
53d3c7b35cSHeinz Mauelshagen 
54d3c7b35cSHeinz Mauelshagen static inline bool __ebs_check_bs(unsigned int bs)
55d3c7b35cSHeinz Mauelshagen {
56d3c7b35cSHeinz Mauelshagen 	return bs && is_power_of_2(bs);
57d3c7b35cSHeinz Mauelshagen }
58d3c7b35cSHeinz Mauelshagen 
59d3c7b35cSHeinz Mauelshagen /*
60d3c7b35cSHeinz Mauelshagen  * READ/WRITE:
61d3c7b35cSHeinz Mauelshagen  *
62d3c7b35cSHeinz Mauelshagen  * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
63d3c7b35cSHeinz Mauelshagen  */
64*67a7b9a5SBart Van Assche static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
65*67a7b9a5SBart Van Assche 			 struct bvec_iter *iter)
66d3c7b35cSHeinz Mauelshagen {
67d3c7b35cSHeinz Mauelshagen 	int r = 0;
68d3c7b35cSHeinz Mauelshagen 	unsigned char *ba, *pa;
69d3c7b35cSHeinz Mauelshagen 	unsigned int cur_len;
70d3c7b35cSHeinz Mauelshagen 	unsigned int bv_len = bv->bv_len;
71d3c7b35cSHeinz Mauelshagen 	unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
72d3c7b35cSHeinz Mauelshagen 	sector_t block = __sector_to_block(ec, iter->bi_sector);
73d3c7b35cSHeinz Mauelshagen 	struct dm_buffer *b;
74d3c7b35cSHeinz Mauelshagen 
75d3c7b35cSHeinz Mauelshagen 	if (unlikely(!bv->bv_page || !bv_len))
76d3c7b35cSHeinz Mauelshagen 		return -EIO;
77d3c7b35cSHeinz Mauelshagen 
783a8ba33bSChristoph Hellwig 	pa = bvec_virt(bv);
79d3c7b35cSHeinz Mauelshagen 
80d3c7b35cSHeinz Mauelshagen 	/* Handle overlapping page <-> blocks */
81d3c7b35cSHeinz Mauelshagen 	while (bv_len) {
82d3c7b35cSHeinz Mauelshagen 		cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
83d3c7b35cSHeinz Mauelshagen 
84d3c7b35cSHeinz Mauelshagen 		/* Avoid reading for writes in case bio vector's page overwrites block completely. */
85*67a7b9a5SBart Van Assche 		if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
86d3c7b35cSHeinz Mauelshagen 			ba = dm_bufio_read(ec->bufio, block, &b);
87d3c7b35cSHeinz Mauelshagen 		else
88d3c7b35cSHeinz Mauelshagen 			ba = dm_bufio_new(ec->bufio, block, &b);
89d3c7b35cSHeinz Mauelshagen 
9052252adeSAntonio Quartulli 		if (IS_ERR(ba)) {
91d3c7b35cSHeinz Mauelshagen 			/*
92d3c7b35cSHeinz Mauelshagen 			 * Carry on with next buffer, if any, to issue all possible
93d3c7b35cSHeinz Mauelshagen 			 * data but return error.
94d3c7b35cSHeinz Mauelshagen 			 */
95d3c7b35cSHeinz Mauelshagen 			r = PTR_ERR(ba);
96d3c7b35cSHeinz Mauelshagen 		} else {
97d3c7b35cSHeinz Mauelshagen 			/* Copy data to/from bio to buffer if read/new was successful above. */
98d3c7b35cSHeinz Mauelshagen 			ba += buf_off;
99*67a7b9a5SBart Van Assche 			if (op == REQ_OP_READ) {
100d3c7b35cSHeinz Mauelshagen 				memcpy(pa, ba, cur_len);
101d3c7b35cSHeinz Mauelshagen 				flush_dcache_page(bv->bv_page);
102d3c7b35cSHeinz Mauelshagen 			} else {
103d3c7b35cSHeinz Mauelshagen 				flush_dcache_page(bv->bv_page);
104d3c7b35cSHeinz Mauelshagen 				memcpy(ba, pa, cur_len);
105d3c7b35cSHeinz Mauelshagen 				dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
106d3c7b35cSHeinz Mauelshagen 			}
107d3c7b35cSHeinz Mauelshagen 
108d3c7b35cSHeinz Mauelshagen 			dm_bufio_release(b);
109d3c7b35cSHeinz Mauelshagen 		}
110d3c7b35cSHeinz Mauelshagen 
111d3c7b35cSHeinz Mauelshagen 		pa += cur_len;
112d3c7b35cSHeinz Mauelshagen 		bv_len -= cur_len;
113d3c7b35cSHeinz Mauelshagen 		buf_off = 0;
114d3c7b35cSHeinz Mauelshagen 		block++;
115d3c7b35cSHeinz Mauelshagen 	}
116d3c7b35cSHeinz Mauelshagen 
117d3c7b35cSHeinz Mauelshagen 	return r;
118d3c7b35cSHeinz Mauelshagen }
119d3c7b35cSHeinz Mauelshagen 
120d3c7b35cSHeinz Mauelshagen /* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
121*67a7b9a5SBart Van Assche static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
122d3c7b35cSHeinz Mauelshagen {
123d3c7b35cSHeinz Mauelshagen 	int r = 0, rr;
124d3c7b35cSHeinz Mauelshagen 	struct bio_vec bv;
125d3c7b35cSHeinz Mauelshagen 	struct bvec_iter iter;
126d3c7b35cSHeinz Mauelshagen 
127d3c7b35cSHeinz Mauelshagen 	bio_for_each_bvec(bv, bio, iter) {
128*67a7b9a5SBart Van Assche 		rr = __ebs_rw_bvec(ec, op, &bv, &iter);
129d3c7b35cSHeinz Mauelshagen 		if (rr)
130d3c7b35cSHeinz Mauelshagen 			r = rr;
131d3c7b35cSHeinz Mauelshagen 	}
132d3c7b35cSHeinz Mauelshagen 
133d3c7b35cSHeinz Mauelshagen 	return r;
134d3c7b35cSHeinz Mauelshagen }
135d3c7b35cSHeinz Mauelshagen 
136a5089a95SHeinz Mauelshagen /*
137a5089a95SHeinz Mauelshagen  * Discard bio's blocks, i.e. pass discards down.
138a5089a95SHeinz Mauelshagen  *
139a5089a95SHeinz Mauelshagen  * Avoid discarding partial blocks at beginning and end;
140a5089a95SHeinz Mauelshagen  * return 0 in case no blocks can be discarded as a result.
141a5089a95SHeinz Mauelshagen  */
142a5089a95SHeinz Mauelshagen static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
143a5089a95SHeinz Mauelshagen {
144a5089a95SHeinz Mauelshagen 	sector_t block, blocks, sector = bio->bi_iter.bi_sector;
145a5089a95SHeinz Mauelshagen 
146a5089a95SHeinz Mauelshagen 	block = __sector_to_block(ec, sector);
147a5089a95SHeinz Mauelshagen 	blocks = __nr_blocks(ec, bio);
148a5089a95SHeinz Mauelshagen 
149a5089a95SHeinz Mauelshagen 	/*
150a5089a95SHeinz Mauelshagen 	 * Partial first underlying block (__nr_blocks() may have
151a5089a95SHeinz Mauelshagen 	 * resulted in one block).
152a5089a95SHeinz Mauelshagen 	 */
153a5089a95SHeinz Mauelshagen 	if (__block_mod(sector, ec->u_bs)) {
154a5089a95SHeinz Mauelshagen 		block++;
155a5089a95SHeinz Mauelshagen 		blocks--;
156a5089a95SHeinz Mauelshagen 	}
157a5089a95SHeinz Mauelshagen 
158a5089a95SHeinz Mauelshagen 	/* Partial last underlying block if any. */
159a5089a95SHeinz Mauelshagen 	if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
160a5089a95SHeinz Mauelshagen 		blocks--;
161a5089a95SHeinz Mauelshagen 
162a5089a95SHeinz Mauelshagen 	return blocks ? dm_bufio_issue_discard(ec->bufio, block, blocks) : 0;
163a5089a95SHeinz Mauelshagen }
164a5089a95SHeinz Mauelshagen 
165a5089a95SHeinz Mauelshagen /* Release blocks them from the bufio cache. */
166a5089a95SHeinz Mauelshagen static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
167d3c7b35cSHeinz Mauelshagen {
168d3c7b35cSHeinz Mauelshagen 	sector_t blocks, sector = bio->bi_iter.bi_sector;
169d3c7b35cSHeinz Mauelshagen 
170d3c7b35cSHeinz Mauelshagen 	blocks = __nr_blocks(ec, bio);
171334b4fc1SMikulas Patocka 
172334b4fc1SMikulas Patocka 	dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
173d3c7b35cSHeinz Mauelshagen }
174d3c7b35cSHeinz Mauelshagen 
1751c72e023SBhaskar Chowdhury /* Worker function to process incoming bios. */
176d3c7b35cSHeinz Mauelshagen static void __ebs_process_bios(struct work_struct *ws)
177d3c7b35cSHeinz Mauelshagen {
178d3c7b35cSHeinz Mauelshagen 	int r;
179d3c7b35cSHeinz Mauelshagen 	bool write = false;
180d3c7b35cSHeinz Mauelshagen 	sector_t block1, block2;
181d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = container_of(ws, struct ebs_c, ws);
182d3c7b35cSHeinz Mauelshagen 	struct bio *bio;
183d3c7b35cSHeinz Mauelshagen 	struct bio_list bios;
184d3c7b35cSHeinz Mauelshagen 
185d3c7b35cSHeinz Mauelshagen 	bio_list_init(&bios);
186d3c7b35cSHeinz Mauelshagen 
187d3c7b35cSHeinz Mauelshagen 	spin_lock_irq(&ec->lock);
188d3c7b35cSHeinz Mauelshagen 	bios = ec->bios_in;
189d3c7b35cSHeinz Mauelshagen 	bio_list_init(&ec->bios_in);
190d3c7b35cSHeinz Mauelshagen 	spin_unlock_irq(&ec->lock);
191d3c7b35cSHeinz Mauelshagen 
192d3c7b35cSHeinz Mauelshagen 	/* Prefetch all read and any mis-aligned write buffers */
193d3c7b35cSHeinz Mauelshagen 	bio_list_for_each(bio, &bios) {
194d3c7b35cSHeinz Mauelshagen 		block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
195d3c7b35cSHeinz Mauelshagen 		if (bio_op(bio) == REQ_OP_READ)
196d3c7b35cSHeinz Mauelshagen 			dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
197d3c7b35cSHeinz Mauelshagen 		else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
198d3c7b35cSHeinz Mauelshagen 			block2 = __sector_to_block(ec, bio_end_sector(bio));
199d3c7b35cSHeinz Mauelshagen 			if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
200d3c7b35cSHeinz Mauelshagen 				dm_bufio_prefetch(ec->bufio, block1, 1);
201d3c7b35cSHeinz Mauelshagen 			if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
202d3c7b35cSHeinz Mauelshagen 				dm_bufio_prefetch(ec->bufio, block2, 1);
203d3c7b35cSHeinz Mauelshagen 		}
204d3c7b35cSHeinz Mauelshagen 	}
205d3c7b35cSHeinz Mauelshagen 
206d3c7b35cSHeinz Mauelshagen 	bio_list_for_each(bio, &bios) {
207d3c7b35cSHeinz Mauelshagen 		r = -EIO;
208d3c7b35cSHeinz Mauelshagen 		if (bio_op(bio) == REQ_OP_READ)
209*67a7b9a5SBart Van Assche 			r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
210d3c7b35cSHeinz Mauelshagen 		else if (bio_op(bio) == REQ_OP_WRITE) {
211d3c7b35cSHeinz Mauelshagen 			write = true;
212*67a7b9a5SBart Van Assche 			r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
213d3c7b35cSHeinz Mauelshagen 		} else if (bio_op(bio) == REQ_OP_DISCARD) {
214a5089a95SHeinz Mauelshagen 			__ebs_forget_bio(ec, bio);
215a5089a95SHeinz Mauelshagen 			r = __ebs_discard_bio(ec, bio);
216d3c7b35cSHeinz Mauelshagen 		}
217d3c7b35cSHeinz Mauelshagen 
218d3c7b35cSHeinz Mauelshagen 		if (r < 0)
219d3c7b35cSHeinz Mauelshagen 			bio->bi_status = errno_to_blk_status(r);
220d3c7b35cSHeinz Mauelshagen 	}
221d3c7b35cSHeinz Mauelshagen 
222d3c7b35cSHeinz Mauelshagen 	/*
223d3c7b35cSHeinz Mauelshagen 	 * We write dirty buffers after processing I/O on them
224d3c7b35cSHeinz Mauelshagen 	 * but before we endio thus addressing REQ_FUA/REQ_SYNC.
225d3c7b35cSHeinz Mauelshagen 	 */
226d3c7b35cSHeinz Mauelshagen 	r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
227d3c7b35cSHeinz Mauelshagen 
228d3c7b35cSHeinz Mauelshagen 	while ((bio = bio_list_pop(&bios))) {
229d3c7b35cSHeinz Mauelshagen 		/* Any other request is endioed. */
230d3c7b35cSHeinz Mauelshagen 		if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
231d3c7b35cSHeinz Mauelshagen 			bio_io_error(bio);
232d3c7b35cSHeinz Mauelshagen 		else
233d3c7b35cSHeinz Mauelshagen 			bio_endio(bio);
234d3c7b35cSHeinz Mauelshagen 	}
235d3c7b35cSHeinz Mauelshagen }
236d3c7b35cSHeinz Mauelshagen 
237d3c7b35cSHeinz Mauelshagen /*
238d3c7b35cSHeinz Mauelshagen  * Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>]
239d3c7b35cSHeinz Mauelshagen  *
240d3c7b35cSHeinz Mauelshagen  * <dev_path>: path of the underlying device
241d3c7b35cSHeinz Mauelshagen  * <offset>: offset in 512 bytes sectors into <dev_path>
242d3c7b35cSHeinz Mauelshagen  * <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
243d3c7b35cSHeinz Mauelshagen  * [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
244d3c7b35cSHeinz Mauelshagen  * 	    optional, if not supplied, retrieve logical block size from underlying device
245d3c7b35cSHeinz Mauelshagen  */
246d3c7b35cSHeinz Mauelshagen static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
247d3c7b35cSHeinz Mauelshagen {
248d3c7b35cSHeinz Mauelshagen 	int r;
249d3c7b35cSHeinz Mauelshagen 	unsigned short tmp1;
250d3c7b35cSHeinz Mauelshagen 	unsigned long long tmp;
251d3c7b35cSHeinz Mauelshagen 	char dummy;
252d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec;
253d3c7b35cSHeinz Mauelshagen 
254d3c7b35cSHeinz Mauelshagen 	if (argc < 3 || argc > 4) {
255d3c7b35cSHeinz Mauelshagen 		ti->error = "Invalid argument count";
256d3c7b35cSHeinz Mauelshagen 		return -EINVAL;
257d3c7b35cSHeinz Mauelshagen 	}
258d3c7b35cSHeinz Mauelshagen 
259d3c7b35cSHeinz Mauelshagen 	ec = ti->private = kzalloc(sizeof(*ec), GFP_KERNEL);
260d3c7b35cSHeinz Mauelshagen 	if (!ec) {
261d3c7b35cSHeinz Mauelshagen 		ti->error = "Cannot allocate ebs context";
262d3c7b35cSHeinz Mauelshagen 		return -ENOMEM;
263d3c7b35cSHeinz Mauelshagen 	}
264d3c7b35cSHeinz Mauelshagen 
265d3c7b35cSHeinz Mauelshagen 	r = -EINVAL;
266d3c7b35cSHeinz Mauelshagen 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 ||
267d3c7b35cSHeinz Mauelshagen 	    tmp != (sector_t)tmp ||
268d3c7b35cSHeinz Mauelshagen 	    (sector_t)tmp >= ti->len) {
269d3c7b35cSHeinz Mauelshagen 		ti->error = "Invalid device offset sector";
270d3c7b35cSHeinz Mauelshagen 		goto bad;
271d3c7b35cSHeinz Mauelshagen 	}
272d3c7b35cSHeinz Mauelshagen 	ec->start = tmp;
273d3c7b35cSHeinz Mauelshagen 
274d3c7b35cSHeinz Mauelshagen 	if (sscanf(argv[2], "%hu%c", &tmp1, &dummy) != 1 ||
275d3c7b35cSHeinz Mauelshagen 	    !__ebs_check_bs(tmp1) ||
276d3c7b35cSHeinz Mauelshagen 	    to_bytes(tmp1) > PAGE_SIZE) {
277d3c7b35cSHeinz Mauelshagen 		ti->error = "Invalid emulated block size";
278d3c7b35cSHeinz Mauelshagen 		goto bad;
279d3c7b35cSHeinz Mauelshagen 	}
280d3c7b35cSHeinz Mauelshagen 	ec->e_bs = tmp1;
281d3c7b35cSHeinz Mauelshagen 
282d3c7b35cSHeinz Mauelshagen 	if (argc > 3) {
283d3c7b35cSHeinz Mauelshagen 		if (sscanf(argv[3], "%hu%c", &tmp1, &dummy) != 1 || !__ebs_check_bs(tmp1)) {
284d3c7b35cSHeinz Mauelshagen 			ti->error = "Invalid underlying block size";
285d3c7b35cSHeinz Mauelshagen 			goto bad;
286d3c7b35cSHeinz Mauelshagen 		}
287d3c7b35cSHeinz Mauelshagen 		ec->u_bs = tmp1;
288d3c7b35cSHeinz Mauelshagen 		ec->u_bs_set = true;
289d3c7b35cSHeinz Mauelshagen 	} else
290d3c7b35cSHeinz Mauelshagen 		ec->u_bs_set = false;
291d3c7b35cSHeinz Mauelshagen 
292d3c7b35cSHeinz Mauelshagen 	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
293d3c7b35cSHeinz Mauelshagen 	if (r) {
294d3c7b35cSHeinz Mauelshagen 		ti->error = "Device lookup failed";
295d3c7b35cSHeinz Mauelshagen 		ec->dev = NULL;
296d3c7b35cSHeinz Mauelshagen 		goto bad;
297d3c7b35cSHeinz Mauelshagen 	}
298d3c7b35cSHeinz Mauelshagen 
299d3c7b35cSHeinz Mauelshagen 	r = -EINVAL;
300d3c7b35cSHeinz Mauelshagen 	if (!ec->u_bs_set) {
301d3c7b35cSHeinz Mauelshagen 		ec->u_bs = to_sector(bdev_logical_block_size(ec->dev->bdev));
302d3c7b35cSHeinz Mauelshagen 		if (!__ebs_check_bs(ec->u_bs)) {
303d3c7b35cSHeinz Mauelshagen 			ti->error = "Invalid retrieved underlying block size";
304d3c7b35cSHeinz Mauelshagen 			goto bad;
305d3c7b35cSHeinz Mauelshagen 		}
306d3c7b35cSHeinz Mauelshagen 	}
307d3c7b35cSHeinz Mauelshagen 
308d3c7b35cSHeinz Mauelshagen 	if (!ec->u_bs_set && ec->e_bs == ec->u_bs)
309d3c7b35cSHeinz Mauelshagen 		DMINFO("Emulation superfluous: emulated equal to underlying block size");
310d3c7b35cSHeinz Mauelshagen 
311d3c7b35cSHeinz Mauelshagen 	if (__block_mod(ec->start, ec->u_bs)) {
312d3c7b35cSHeinz Mauelshagen 		ti->error = "Device offset must be multiple of underlying block size";
313d3c7b35cSHeinz Mauelshagen 		goto bad;
314d3c7b35cSHeinz Mauelshagen 	}
315d3c7b35cSHeinz Mauelshagen 
316d3c7b35cSHeinz Mauelshagen 	ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
317d3c7b35cSHeinz Mauelshagen 	if (IS_ERR(ec->bufio)) {
318d3c7b35cSHeinz Mauelshagen 		ti->error = "Cannot create dm bufio client";
319d3c7b35cSHeinz Mauelshagen 		r = PTR_ERR(ec->bufio);
320d3c7b35cSHeinz Mauelshagen 		ec->bufio = NULL;
321d3c7b35cSHeinz Mauelshagen 		goto bad;
322d3c7b35cSHeinz Mauelshagen 	}
323d3c7b35cSHeinz Mauelshagen 
324d3c7b35cSHeinz Mauelshagen 	ec->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
325d3c7b35cSHeinz Mauelshagen 	if (!ec->wq) {
326d3c7b35cSHeinz Mauelshagen 		ti->error = "Cannot create dm-" DM_MSG_PREFIX " workqueue";
327d3c7b35cSHeinz Mauelshagen 		r = -ENOMEM;
328d3c7b35cSHeinz Mauelshagen 		goto bad;
329d3c7b35cSHeinz Mauelshagen 	}
330d3c7b35cSHeinz Mauelshagen 
331d3c7b35cSHeinz Mauelshagen 	ec->block_shift = __ffs(ec->u_bs);
332d3c7b35cSHeinz Mauelshagen 	INIT_WORK(&ec->ws, &__ebs_process_bios);
333d3c7b35cSHeinz Mauelshagen 	bio_list_init(&ec->bios_in);
334d3c7b35cSHeinz Mauelshagen 	spin_lock_init(&ec->lock);
335d3c7b35cSHeinz Mauelshagen 
336d3c7b35cSHeinz Mauelshagen 	ti->num_flush_bios = 1;
337d3c7b35cSHeinz Mauelshagen 	ti->num_discard_bios = 1;
338d3c7b35cSHeinz Mauelshagen 	ti->num_secure_erase_bios = 0;
339d3c7b35cSHeinz Mauelshagen 	ti->num_write_zeroes_bios = 0;
340d3c7b35cSHeinz Mauelshagen 	return 0;
341d3c7b35cSHeinz Mauelshagen bad:
342d3c7b35cSHeinz Mauelshagen 	ebs_dtr(ti);
343d3c7b35cSHeinz Mauelshagen 	return r;
344d3c7b35cSHeinz Mauelshagen }
345d3c7b35cSHeinz Mauelshagen 
346d3c7b35cSHeinz Mauelshagen static void ebs_dtr(struct dm_target *ti)
347d3c7b35cSHeinz Mauelshagen {
348d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
349d3c7b35cSHeinz Mauelshagen 
350d3c7b35cSHeinz Mauelshagen 	if (ec->wq)
351d3c7b35cSHeinz Mauelshagen 		destroy_workqueue(ec->wq);
352d3c7b35cSHeinz Mauelshagen 	if (ec->bufio)
353d3c7b35cSHeinz Mauelshagen 		dm_bufio_client_destroy(ec->bufio);
354d3c7b35cSHeinz Mauelshagen 	if (ec->dev)
355d3c7b35cSHeinz Mauelshagen 		dm_put_device(ti, ec->dev);
356d3c7b35cSHeinz Mauelshagen 	kfree(ec);
357d3c7b35cSHeinz Mauelshagen }
358d3c7b35cSHeinz Mauelshagen 
359d3c7b35cSHeinz Mauelshagen static int ebs_map(struct dm_target *ti, struct bio *bio)
360d3c7b35cSHeinz Mauelshagen {
361d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
362d3c7b35cSHeinz Mauelshagen 
363d3c7b35cSHeinz Mauelshagen 	bio_set_dev(bio, ec->dev->bdev);
364d3c7b35cSHeinz Mauelshagen 	bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
365d3c7b35cSHeinz Mauelshagen 
3664cb6f226SJohn Dorminy 	if (unlikely(bio_op(bio) == REQ_OP_FLUSH))
367d3c7b35cSHeinz Mauelshagen 		return DM_MAPIO_REMAPPED;
368d3c7b35cSHeinz Mauelshagen 	/*
369d3c7b35cSHeinz Mauelshagen 	 * Only queue for bufio processing in case of partial or overlapping buffers
370d3c7b35cSHeinz Mauelshagen 	 * -or-
371d3c7b35cSHeinz Mauelshagen 	 * emulation with ebs == ubs aiming for tests of dm-bufio overhead.
372d3c7b35cSHeinz Mauelshagen 	 */
373d3c7b35cSHeinz Mauelshagen 	if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
374d3c7b35cSHeinz Mauelshagen 		   __block_mod(bio_end_sector(bio), ec->u_bs) ||
375d3c7b35cSHeinz Mauelshagen 		   ec->e_bs == ec->u_bs)) {
376d3c7b35cSHeinz Mauelshagen 		spin_lock_irq(&ec->lock);
377d3c7b35cSHeinz Mauelshagen 		bio_list_add(&ec->bios_in, bio);
378d3c7b35cSHeinz Mauelshagen 		spin_unlock_irq(&ec->lock);
379d3c7b35cSHeinz Mauelshagen 
380d3c7b35cSHeinz Mauelshagen 		queue_work(ec->wq, &ec->ws);
381d3c7b35cSHeinz Mauelshagen 
382d3c7b35cSHeinz Mauelshagen 		return DM_MAPIO_SUBMITTED;
383d3c7b35cSHeinz Mauelshagen 	}
384d3c7b35cSHeinz Mauelshagen 
385d3c7b35cSHeinz Mauelshagen 	/* Forget any buffer content relative to this direct backing device I/O. */
386d3c7b35cSHeinz Mauelshagen 	__ebs_forget_bio(ec, bio);
387d3c7b35cSHeinz Mauelshagen 
388d3c7b35cSHeinz Mauelshagen 	return DM_MAPIO_REMAPPED;
389d3c7b35cSHeinz Mauelshagen }
390d3c7b35cSHeinz Mauelshagen 
391d3c7b35cSHeinz Mauelshagen static void ebs_status(struct dm_target *ti, status_type_t type,
392d3c7b35cSHeinz Mauelshagen 		       unsigned status_flags, char *result, unsigned maxlen)
393d3c7b35cSHeinz Mauelshagen {
394d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
395d3c7b35cSHeinz Mauelshagen 
396d3c7b35cSHeinz Mauelshagen 	switch (type) {
397d3c7b35cSHeinz Mauelshagen 	case STATUSTYPE_INFO:
398d3c7b35cSHeinz Mauelshagen 		*result = '\0';
399d3c7b35cSHeinz Mauelshagen 		break;
400d3c7b35cSHeinz Mauelshagen 	case STATUSTYPE_TABLE:
401d3c7b35cSHeinz Mauelshagen 		snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
402d3c7b35cSHeinz Mauelshagen 			 ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
403d3c7b35cSHeinz Mauelshagen 		break;
4048ec45662STushar Sugandhi 	case STATUSTYPE_IMA:
4058ec45662STushar Sugandhi 		*result = '\0';
4068ec45662STushar Sugandhi 		break;
407d3c7b35cSHeinz Mauelshagen 	}
408d3c7b35cSHeinz Mauelshagen }
409d3c7b35cSHeinz Mauelshagen 
410d3c7b35cSHeinz Mauelshagen static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
411d3c7b35cSHeinz Mauelshagen {
412d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
413d3c7b35cSHeinz Mauelshagen 	struct dm_dev *dev = ec->dev;
414d3c7b35cSHeinz Mauelshagen 
415d3c7b35cSHeinz Mauelshagen 	/*
416d3c7b35cSHeinz Mauelshagen 	 * Only pass ioctls through if the device sizes match exactly.
417d3c7b35cSHeinz Mauelshagen 	 */
418d3c7b35cSHeinz Mauelshagen 	*bdev = dev->bdev;
4196dcbb52cSChristoph Hellwig 	return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
420d3c7b35cSHeinz Mauelshagen }
421d3c7b35cSHeinz Mauelshagen 
422d3c7b35cSHeinz Mauelshagen static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
423d3c7b35cSHeinz Mauelshagen {
424d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
425d3c7b35cSHeinz Mauelshagen 
426d3c7b35cSHeinz Mauelshagen 	limits->logical_block_size = to_bytes(ec->e_bs);
427d3c7b35cSHeinz Mauelshagen 	limits->physical_block_size = to_bytes(ec->u_bs);
428d3c7b35cSHeinz Mauelshagen 	limits->alignment_offset = limits->physical_block_size;
429d3c7b35cSHeinz Mauelshagen 	blk_limits_io_min(limits, limits->logical_block_size);
430d3c7b35cSHeinz Mauelshagen }
431d3c7b35cSHeinz Mauelshagen 
432d3c7b35cSHeinz Mauelshagen static int ebs_iterate_devices(struct dm_target *ti,
433d3c7b35cSHeinz Mauelshagen 				  iterate_devices_callout_fn fn, void *data)
434d3c7b35cSHeinz Mauelshagen {
435d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
436d3c7b35cSHeinz Mauelshagen 
437d3c7b35cSHeinz Mauelshagen 	return fn(ti, ec->dev, ec->start, ti->len, data);
438d3c7b35cSHeinz Mauelshagen }
439d3c7b35cSHeinz Mauelshagen 
440d3c7b35cSHeinz Mauelshagen static struct target_type ebs_target = {
441d3c7b35cSHeinz Mauelshagen 	.name		 = "ebs",
442a5089a95SHeinz Mauelshagen 	.version	 = {1, 0, 1},
443d3c7b35cSHeinz Mauelshagen 	.features	 = DM_TARGET_PASSES_INTEGRITY,
444d3c7b35cSHeinz Mauelshagen 	.module		 = THIS_MODULE,
445d3c7b35cSHeinz Mauelshagen 	.ctr		 = ebs_ctr,
446d3c7b35cSHeinz Mauelshagen 	.dtr		 = ebs_dtr,
447d3c7b35cSHeinz Mauelshagen 	.map		 = ebs_map,
448d3c7b35cSHeinz Mauelshagen 	.status		 = ebs_status,
449d3c7b35cSHeinz Mauelshagen 	.io_hints	 = ebs_io_hints,
450d3c7b35cSHeinz Mauelshagen 	.prepare_ioctl	 = ebs_prepare_ioctl,
451d3c7b35cSHeinz Mauelshagen 	.iterate_devices = ebs_iterate_devices,
452d3c7b35cSHeinz Mauelshagen };
453d3c7b35cSHeinz Mauelshagen 
454d3c7b35cSHeinz Mauelshagen static int __init dm_ebs_init(void)
455d3c7b35cSHeinz Mauelshagen {
456d3c7b35cSHeinz Mauelshagen 	int r = dm_register_target(&ebs_target);
457d3c7b35cSHeinz Mauelshagen 
458d3c7b35cSHeinz Mauelshagen 	if (r < 0)
459d3c7b35cSHeinz Mauelshagen 		DMERR("register failed %d", r);
460d3c7b35cSHeinz Mauelshagen 
461d3c7b35cSHeinz Mauelshagen 	return r;
462d3c7b35cSHeinz Mauelshagen }
463d3c7b35cSHeinz Mauelshagen 
464d3c7b35cSHeinz Mauelshagen static void dm_ebs_exit(void)
465d3c7b35cSHeinz Mauelshagen {
466d3c7b35cSHeinz Mauelshagen 	dm_unregister_target(&ebs_target);
467d3c7b35cSHeinz Mauelshagen }
468d3c7b35cSHeinz Mauelshagen 
469d3c7b35cSHeinz Mauelshagen module_init(dm_ebs_init);
470d3c7b35cSHeinz Mauelshagen module_exit(dm_ebs_exit);
471d3c7b35cSHeinz Mauelshagen 
472d3c7b35cSHeinz Mauelshagen MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
473d3c7b35cSHeinz Mauelshagen MODULE_DESCRIPTION(DM_NAME " emulated block size target");
474d3c7b35cSHeinz Mauelshagen MODULE_LICENSE("GPL");
475