11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c66ac9dbSNicholas Bellinger /*******************************************************************************
3c66ac9dbSNicholas Bellinger  * Filename:  target_core_iblock.c
4c66ac9dbSNicholas Bellinger  *
5c66ac9dbSNicholas Bellinger  * This file contains the Storage Engine  <-> Linux BlockIO transport
6c66ac9dbSNicholas Bellinger  * specific functions.
7c66ac9dbSNicholas Bellinger  *
84c76251eSNicholas Bellinger  * (c) Copyright 2003-2013 Datera, Inc.
9c66ac9dbSNicholas Bellinger  *
10c66ac9dbSNicholas Bellinger  * Nicholas A. Bellinger <nab@kernel.org>
11c66ac9dbSNicholas Bellinger  *
12c66ac9dbSNicholas Bellinger  ******************************************************************************/
13c66ac9dbSNicholas Bellinger 
14c66ac9dbSNicholas Bellinger #include <linux/string.h>
15c66ac9dbSNicholas Bellinger #include <linux/parser.h>
16c66ac9dbSNicholas Bellinger #include <linux/timer.h>
17c66ac9dbSNicholas Bellinger #include <linux/fs.h>
18c66ac9dbSNicholas Bellinger #include <linux/blkdev.h>
19fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
20c66ac9dbSNicholas Bellinger #include <linux/slab.h>
21c66ac9dbSNicholas Bellinger #include <linux/spinlock.h>
22c66ac9dbSNicholas Bellinger #include <linux/bio.h>
23c66ac9dbSNicholas Bellinger #include <linux/file.h>
24827509e3SPaul Gortmaker #include <linux/module.h>
2524b83debSChristoph Hellwig #include <linux/scatterlist.h>
26ba929992SBart Van Assche #include <scsi/scsi_proto.h>
2714150a6bSChristoph Hellwig #include <asm/unaligned.h>
28c66ac9dbSNicholas Bellinger 
29c66ac9dbSNicholas Bellinger #include <target/target_core_base.h>
30c4795fb2SChristoph Hellwig #include <target/target_core_backend.h>
31c66ac9dbSNicholas Bellinger 
32c66ac9dbSNicholas Bellinger #include "target_core_iblock.h"
33c66ac9dbSNicholas Bellinger 
34d5b4a21bSChristoph Hellwig #define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
35d5b4a21bSChristoph Hellwig #define IBLOCK_BIO_POOL_SIZE	128
36d5b4a21bSChristoph Hellwig 
370fd97ccfSChristoph Hellwig static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
380fd97ccfSChristoph Hellwig {
390fd97ccfSChristoph Hellwig 	return container_of(dev, struct iblock_dev, dev);
400fd97ccfSChristoph Hellwig }
410fd97ccfSChristoph Hellwig 
420fd97ccfSChristoph Hellwig 
43c66ac9dbSNicholas Bellinger static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
44c66ac9dbSNicholas Bellinger {
456708bb27SAndy Grover 	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
46c66ac9dbSNicholas Bellinger 		" Generic Target Core Stack %s\n", hba->hba_id,
47ce8dd25dSChristoph Hellwig 		IBLOCK_VERSION, TARGET_CORE_VERSION);
48c66ac9dbSNicholas Bellinger 	return 0;
49c66ac9dbSNicholas Bellinger }
50c66ac9dbSNicholas Bellinger 
51c66ac9dbSNicholas Bellinger static void iblock_detach_hba(struct se_hba *hba)
52c66ac9dbSNicholas Bellinger {
53c66ac9dbSNicholas Bellinger }
54c66ac9dbSNicholas Bellinger 
550fd97ccfSChristoph Hellwig static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
56c66ac9dbSNicholas Bellinger {
57c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = NULL;
58c66ac9dbSNicholas Bellinger 
59c66ac9dbSNicholas Bellinger 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
606708bb27SAndy Grover 	if (!ib_dev) {
616708bb27SAndy Grover 		pr_err("Unable to allocate struct iblock_dev\n");
62c66ac9dbSNicholas Bellinger 		return NULL;
63c66ac9dbSNicholas Bellinger 	}
64c66ac9dbSNicholas Bellinger 
65415ccd98SMike Christie 	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
66415ccd98SMike Christie 				   GFP_KERNEL);
67415ccd98SMike Christie 	if (!ib_dev->ibd_plug)
68415ccd98SMike Christie 		goto free_dev;
69415ccd98SMike Christie 
706708bb27SAndy Grover 	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
71c66ac9dbSNicholas Bellinger 
720fd97ccfSChristoph Hellwig 	return &ib_dev->dev;
73415ccd98SMike Christie 
74415ccd98SMike Christie free_dev:
75415ccd98SMike Christie 	kfree(ib_dev);
76415ccd98SMike Christie 	return NULL;
77c66ac9dbSNicholas Bellinger }
78c66ac9dbSNicholas Bellinger 
790fd97ccfSChristoph Hellwig static int iblock_configure_device(struct se_device *dev)
80c66ac9dbSNicholas Bellinger {
810fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
82c66ac9dbSNicholas Bellinger 	struct request_queue *q;
830fd97ccfSChristoph Hellwig 	struct block_device *bd = NULL;
84ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
8544bfd018SAndy Grover 	fmode_t mode;
862237498fSNicholas Bellinger 	unsigned int max_write_zeroes_sectors;
878f13142aSColin Ian King 	int ret;
88c66ac9dbSNicholas Bellinger 
890fd97ccfSChristoph Hellwig 	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
900fd97ccfSChristoph Hellwig 		pr_err("Missing udev_path= parameters for IBLOCK\n");
910fd97ccfSChristoph Hellwig 		return -EINVAL;
92c66ac9dbSNicholas Bellinger 	}
93d5b4a21bSChristoph Hellwig 
94a47a28b7SKent Overstreet 	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
95a47a28b7SKent Overstreet 	if (ret) {
960fd97ccfSChristoph Hellwig 		pr_err("IBLOCK: Unable to create bioset\n");
970fd97ccfSChristoph Hellwig 		goto out;
98c66ac9dbSNicholas Bellinger 	}
990fd97ccfSChristoph Hellwig 
1006708bb27SAndy Grover 	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
101c66ac9dbSNicholas Bellinger 			ib_dev->ibd_udev_path);
102c66ac9dbSNicholas Bellinger 
10344bfd018SAndy Grover 	mode = FMODE_READ|FMODE_EXCL;
10444bfd018SAndy Grover 	if (!ib_dev->ibd_readonly)
10544bfd018SAndy Grover 		mode |= FMODE_WRITE;
106eeeb9522SNicholas Bellinger 	else
107eeeb9522SNicholas Bellinger 		dev->dev_flags |= DF_READ_ONLY;
10844bfd018SAndy Grover 
10944bfd018SAndy Grover 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
110613640e4SNicholas Bellinger 	if (IS_ERR(bd)) {
111613640e4SNicholas Bellinger 		ret = PTR_ERR(bd);
1120fd97ccfSChristoph Hellwig 		goto out_free_bioset;
113613640e4SNicholas Bellinger 	}
114c66ac9dbSNicholas Bellinger 	ib_dev->ibd_bd = bd;
115c66ac9dbSNicholas Bellinger 
1160fd97ccfSChristoph Hellwig 	q = bdev_get_queue(bd);
1170fd97ccfSChristoph Hellwig 
1180fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
119046ba642SNicholas Bellinger 	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
1200fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
121c66ac9dbSNicholas Bellinger 
122817e8b51SChristoph Hellwig 	if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
1236708bb27SAndy Grover 		pr_debug("IBLOCK: BLOCK Discard support available,"
124c66ac9dbSNicholas Bellinger 			 " disabled by default\n");
1258a9ebe71SMike Christie 
126f6970ad3SNicholas Bellinger 	/*
127f6970ad3SNicholas Bellinger 	 * Enable write same emulation for IBLOCK and use 0xFFFF as
128f6970ad3SNicholas Bellinger 	 * the smaller WRITE_SAME(10) only has a two-byte block count.
129f6970ad3SNicholas Bellinger 	 */
1302237498fSNicholas Bellinger 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
1312237498fSNicholas Bellinger 	if (max_write_zeroes_sectors)
1322237498fSNicholas Bellinger 		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
1332237498fSNicholas Bellinger 	else
134f6970ad3SNicholas Bellinger 		dev->dev_attrib.max_write_same_len = 0xFFFF;
135c66ac9dbSNicholas Bellinger 
13610f0d2a5SChristoph Hellwig 	if (bdev_nonrot(bd))
1370fd97ccfSChristoph Hellwig 		dev->dev_attrib.is_nonrot = 1;
138d0c8b259SNicholas Bellinger 
139ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(bd);
140ecebbf6cSNicholas Bellinger 	if (bi) {
141a47a28b7SKent Overstreet 		struct bio_set *bs = &ib_dev->ibd_bio_set;
142ecebbf6cSNicholas Bellinger 
1430f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
1440f8087ecSMartin K. Petersen 		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
145ecebbf6cSNicholas Bellinger 			pr_err("IBLOCK export of blk_integrity: %s not"
1460f8087ecSMartin K. Petersen 			       " supported\n", bi->profile->name);
147ecebbf6cSNicholas Bellinger 			ret = -ENOSYS;
148ecebbf6cSNicholas Bellinger 			goto out_blkdev_put;
149ecebbf6cSNicholas Bellinger 		}
150ecebbf6cSNicholas Bellinger 
1510f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
152ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
1530f8087ecSMartin K. Petersen 		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
154ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
155ecebbf6cSNicholas Bellinger 		}
156ecebbf6cSNicholas Bellinger 
157ecebbf6cSNicholas Bellinger 		if (dev->dev_attrib.pi_prot_type) {
158ecebbf6cSNicholas Bellinger 			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
159ecebbf6cSNicholas Bellinger 				pr_err("Unable to allocate bioset for PI\n");
160ecebbf6cSNicholas Bellinger 				ret = -ENOMEM;
161ecebbf6cSNicholas Bellinger 				goto out_blkdev_put;
162ecebbf6cSNicholas Bellinger 			}
163ecebbf6cSNicholas Bellinger 			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
164f4f8154aSKent Overstreet 				 &bs->bio_integrity_pool);
165ecebbf6cSNicholas Bellinger 		}
166ecebbf6cSNicholas Bellinger 		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
167ecebbf6cSNicholas Bellinger 	}
168ecebbf6cSNicholas Bellinger 
1690fd97ccfSChristoph Hellwig 	return 0;
170e22a7f07SRoland Dreier 
171ecebbf6cSNicholas Bellinger out_blkdev_put:
172ecebbf6cSNicholas Bellinger 	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
1730fd97ccfSChristoph Hellwig out_free_bioset:
174a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
1750fd97ccfSChristoph Hellwig out:
1760fd97ccfSChristoph Hellwig 	return ret;
177c66ac9dbSNicholas Bellinger }
178c66ac9dbSNicholas Bellinger 
1794cc987eaSNicholas Bellinger static void iblock_dev_call_rcu(struct rcu_head *p)
1804cc987eaSNicholas Bellinger {
1814cc987eaSNicholas Bellinger 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1824cc987eaSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1834cc987eaSNicholas Bellinger 
184415ccd98SMike Christie 	kfree(ib_dev->ibd_plug);
1854cc987eaSNicholas Bellinger 	kfree(ib_dev);
1864cc987eaSNicholas Bellinger }
1874cc987eaSNicholas Bellinger 
1880fd97ccfSChristoph Hellwig static void iblock_free_device(struct se_device *dev)
189c66ac9dbSNicholas Bellinger {
19092634706SMike Christie 	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
19192634706SMike Christie }
19292634706SMike Christie 
19392634706SMike Christie static void iblock_destroy_device(struct se_device *dev)
19492634706SMike Christie {
1950fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
196c66ac9dbSNicholas Bellinger 
197bc665524SNicholas Bellinger 	if (ib_dev->ibd_bd != NULL)
198c66ac9dbSNicholas Bellinger 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
199a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
200c66ac9dbSNicholas Bellinger }
201c66ac9dbSNicholas Bellinger 
202415ccd98SMike Christie static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
203415ccd98SMike Christie {
204415ccd98SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
205415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug;
206415ccd98SMike Christie 
207415ccd98SMike Christie 	/*
2085aaeca25SMike Christie 	 * Each se_device has a per cpu work this can be run from. We
209415ccd98SMike Christie 	 * shouldn't have multiple threads on the same cpu calling this
210415ccd98SMike Christie 	 * at the same time.
211415ccd98SMike Christie 	 */
2125aaeca25SMike Christie 	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
213415ccd98SMike Christie 	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
214415ccd98SMike Christie 		return NULL;
215415ccd98SMike Christie 
216415ccd98SMike Christie 	blk_start_plug(&ib_dev_plug->blk_plug);
217415ccd98SMike Christie 	return &ib_dev_plug->se_plug;
218415ccd98SMike Christie }
219415ccd98SMike Christie 
220415ccd98SMike Christie static void iblock_unplug_device(struct se_dev_plug *se_plug)
221415ccd98SMike Christie {
222415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
223415ccd98SMike Christie 					struct iblock_dev_plug, se_plug);
224415ccd98SMike Christie 
225415ccd98SMike Christie 	blk_finish_plug(&ib_dev_plug->blk_plug);
226415ccd98SMike Christie 	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
227415ccd98SMike Christie }
228415ccd98SMike Christie 
229c66ac9dbSNicholas Bellinger static unsigned long long iblock_emulate_read_cap_with_block_size(
230c66ac9dbSNicholas Bellinger 	struct se_device *dev,
231c66ac9dbSNicholas Bellinger 	struct block_device *bd,
232c66ac9dbSNicholas Bellinger 	struct request_queue *q)
233c66ac9dbSNicholas Bellinger {
234c66ac9dbSNicholas Bellinger 	u32 block_size = bdev_logical_block_size(bd);
23564f0f426SChristoph Hellwig 	unsigned long long blocks_long =
23664f0f426SChristoph Hellwig 		div_u64(bdev_nr_bytes(bd), block_size) - 1;
237c66ac9dbSNicholas Bellinger 
2380fd97ccfSChristoph Hellwig 	if (block_size == dev->dev_attrib.block_size)
239c66ac9dbSNicholas Bellinger 		return blocks_long;
240c66ac9dbSNicholas Bellinger 
241c66ac9dbSNicholas Bellinger 	switch (block_size) {
242c66ac9dbSNicholas Bellinger 	case 4096:
2430fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
244c66ac9dbSNicholas Bellinger 		case 2048:
245c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
246c66ac9dbSNicholas Bellinger 			break;
247c66ac9dbSNicholas Bellinger 		case 1024:
248c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
249c66ac9dbSNicholas Bellinger 			break;
250c66ac9dbSNicholas Bellinger 		case 512:
251c66ac9dbSNicholas Bellinger 			blocks_long <<= 3;
252492096ecSGustavo A. R. Silva 			break;
253c66ac9dbSNicholas Bellinger 		default:
254c66ac9dbSNicholas Bellinger 			break;
255c66ac9dbSNicholas Bellinger 		}
256c66ac9dbSNicholas Bellinger 		break;
257c66ac9dbSNicholas Bellinger 	case 2048:
2580fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
259c66ac9dbSNicholas Bellinger 		case 4096:
260c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
261c66ac9dbSNicholas Bellinger 			break;
262c66ac9dbSNicholas Bellinger 		case 1024:
263c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
264c66ac9dbSNicholas Bellinger 			break;
265c66ac9dbSNicholas Bellinger 		case 512:
266c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
267c66ac9dbSNicholas Bellinger 			break;
268c66ac9dbSNicholas Bellinger 		default:
269c66ac9dbSNicholas Bellinger 			break;
270c66ac9dbSNicholas Bellinger 		}
271c66ac9dbSNicholas Bellinger 		break;
272c66ac9dbSNicholas Bellinger 	case 1024:
2730fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
274c66ac9dbSNicholas Bellinger 		case 4096:
275c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
276c66ac9dbSNicholas Bellinger 			break;
277c66ac9dbSNicholas Bellinger 		case 2048:
278c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
279c66ac9dbSNicholas Bellinger 			break;
280c66ac9dbSNicholas Bellinger 		case 512:
281c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
282c66ac9dbSNicholas Bellinger 			break;
283c66ac9dbSNicholas Bellinger 		default:
284c66ac9dbSNicholas Bellinger 			break;
285c66ac9dbSNicholas Bellinger 		}
286c66ac9dbSNicholas Bellinger 		break;
287c66ac9dbSNicholas Bellinger 	case 512:
2880fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
289c66ac9dbSNicholas Bellinger 		case 4096:
290c66ac9dbSNicholas Bellinger 			blocks_long >>= 3;
291c66ac9dbSNicholas Bellinger 			break;
292c66ac9dbSNicholas Bellinger 		case 2048:
293c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
294c66ac9dbSNicholas Bellinger 			break;
295c66ac9dbSNicholas Bellinger 		case 1024:
296c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
297c66ac9dbSNicholas Bellinger 			break;
298c66ac9dbSNicholas Bellinger 		default:
299c66ac9dbSNicholas Bellinger 			break;
300c66ac9dbSNicholas Bellinger 		}
301c66ac9dbSNicholas Bellinger 		break;
302c66ac9dbSNicholas Bellinger 	default:
303c66ac9dbSNicholas Bellinger 		break;
304c66ac9dbSNicholas Bellinger 	}
305c66ac9dbSNicholas Bellinger 
306c66ac9dbSNicholas Bellinger 	return blocks_long;
307c66ac9dbSNicholas Bellinger }
308c66ac9dbSNicholas Bellinger 
3093a41d85fSNicholas Bellinger static void iblock_complete_cmd(struct se_cmd *cmd)
3103a41d85fSNicholas Bellinger {
3113a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3123a41d85fSNicholas Bellinger 	u8 status;
3133a41d85fSNicholas Bellinger 
3145981c245SElena Reshetova 	if (!refcount_dec_and_test(&ibr->pending))
3153a41d85fSNicholas Bellinger 		return;
3163a41d85fSNicholas Bellinger 
3173a41d85fSNicholas Bellinger 	if (atomic_read(&ibr->ib_bio_err_cnt))
3183a41d85fSNicholas Bellinger 		status = SAM_STAT_CHECK_CONDITION;
3193a41d85fSNicholas Bellinger 	else
3203a41d85fSNicholas Bellinger 		status = SAM_STAT_GOOD;
3213a41d85fSNicholas Bellinger 
3223a41d85fSNicholas Bellinger 	target_complete_cmd(cmd, status);
3233a41d85fSNicholas Bellinger 	kfree(ibr);
3243a41d85fSNicholas Bellinger }
3253a41d85fSNicholas Bellinger 
3264246a0b6SChristoph Hellwig static void iblock_bio_done(struct bio *bio)
3273a41d85fSNicholas Bellinger {
3283a41d85fSNicholas Bellinger 	struct se_cmd *cmd = bio->bi_private;
3293a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3303a41d85fSNicholas Bellinger 
3314e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
3324e4cbee9SChristoph Hellwig 		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
3333a41d85fSNicholas Bellinger 		/*
3343a41d85fSNicholas Bellinger 		 * Bump the ib_bio_err_cnt and release bio.
3353a41d85fSNicholas Bellinger 		 */
3363a41d85fSNicholas Bellinger 		atomic_inc(&ibr->ib_bio_err_cnt);
3374e857c58SPeter Zijlstra 		smp_mb__after_atomic();
3383a41d85fSNicholas Bellinger 	}
3393a41d85fSNicholas Bellinger 
3403a41d85fSNicholas Bellinger 	bio_put(bio);
3413a41d85fSNicholas Bellinger 
3423a41d85fSNicholas Bellinger 	iblock_complete_cmd(cmd);
3433a41d85fSNicholas Bellinger }
3443a41d85fSNicholas Bellinger 
345bc9e0e36SChaitanya Kulkarni static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
346bc9e0e36SChaitanya Kulkarni 				  unsigned int opf)
3473a41d85fSNicholas Bellinger {
3483a41d85fSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
3493a41d85fSNicholas Bellinger 	struct bio *bio;
3503a41d85fSNicholas Bellinger 
3513a41d85fSNicholas Bellinger 	/*
3523a41d85fSNicholas Bellinger 	 * Only allocate as many vector entries as the bio code allows us to,
3533a41d85fSNicholas Bellinger 	 * we'll loop later on until we have handled the whole request.
3543a41d85fSNicholas Bellinger 	 */
355609be106SChristoph Hellwig 	bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
356609be106SChristoph Hellwig 			       GFP_NOIO, &ib_dev->ibd_bio_set);
3573a41d85fSNicholas Bellinger 	if (!bio) {
3583a41d85fSNicholas Bellinger 		pr_err("Unable to allocate memory for bio\n");
3593a41d85fSNicholas Bellinger 		return NULL;
3603a41d85fSNicholas Bellinger 	}
3613a41d85fSNicholas Bellinger 
3623a41d85fSNicholas Bellinger 	bio->bi_private = cmd;
3633a41d85fSNicholas Bellinger 	bio->bi_end_io = &iblock_bio_done;
3644f024f37SKent Overstreet 	bio->bi_iter.bi_sector = lba;
3653a41d85fSNicholas Bellinger 
3663a41d85fSNicholas Bellinger 	return bio;
3673a41d85fSNicholas Bellinger }
3683a41d85fSNicholas Bellinger 
3694e49ea4aSMike Christie static void iblock_submit_bios(struct bio_list *list)
3703a41d85fSNicholas Bellinger {
3713a41d85fSNicholas Bellinger 	struct blk_plug plug;
3723a41d85fSNicholas Bellinger 	struct bio *bio;
373415ccd98SMike Christie 	/*
374415ccd98SMike Christie 	 * The block layer handles nested plugs, so just plug/unplug to handle
375415ccd98SMike Christie 	 * fabric drivers that didn't support batching and multi bio cmds.
376415ccd98SMike Christie 	 */
3773a41d85fSNicholas Bellinger 	blk_start_plug(&plug);
3783a41d85fSNicholas Bellinger 	while ((bio = bio_list_pop(list)))
3794e49ea4aSMike Christie 		submit_bio(bio);
3803a41d85fSNicholas Bellinger 	blk_finish_plug(&plug);
3813a41d85fSNicholas Bellinger }
3823a41d85fSNicholas Bellinger 
3834246a0b6SChristoph Hellwig static void iblock_end_io_flush(struct bio *bio)
384df5fa691SChristoph Hellwig {
385df5fa691SChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
386df5fa691SChristoph Hellwig 
3874e4cbee9SChristoph Hellwig 	if (bio->bi_status)
3884e4cbee9SChristoph Hellwig 		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
389df5fa691SChristoph Hellwig 
3905787cacdSChristoph Hellwig 	if (cmd) {
3914e4cbee9SChristoph Hellwig 		if (bio->bi_status)
3925787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
393de103c93SChristoph Hellwig 		else
3945787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_GOOD);
3955787cacdSChristoph Hellwig 	}
3965787cacdSChristoph Hellwig 
397df5fa691SChristoph Hellwig 	bio_put(bio);
398df5fa691SChristoph Hellwig }
399df5fa691SChristoph Hellwig 
400c66ac9dbSNicholas Bellinger /*
401df5fa691SChristoph Hellwig  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
402df5fa691SChristoph Hellwig  * always flush the whole cache.
403c66ac9dbSNicholas Bellinger  */
404de103c93SChristoph Hellwig static sense_reason_t
405de103c93SChristoph Hellwig iblock_execute_sync_cache(struct se_cmd *cmd)
406c66ac9dbSNicholas Bellinger {
4070fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
408a1d8b49aSAndy Grover 	int immed = (cmd->t_task_cdb[1] & 0x2);
409df5fa691SChristoph Hellwig 	struct bio *bio;
410c66ac9dbSNicholas Bellinger 
411c66ac9dbSNicholas Bellinger 	/*
412c66ac9dbSNicholas Bellinger 	 * If the Immediate bit is set, queue up the GOOD response
413df5fa691SChristoph Hellwig 	 * for this SYNCHRONIZE_CACHE op.
414c66ac9dbSNicholas Bellinger 	 */
415c66ac9dbSNicholas Bellinger 	if (immed)
4165787cacdSChristoph Hellwig 		target_complete_cmd(cmd, SAM_STAT_GOOD);
417c66ac9dbSNicholas Bellinger 
41807888c66SChristoph Hellwig 	bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
41907888c66SChristoph Hellwig 			GFP_KERNEL);
420df5fa691SChristoph Hellwig 	bio->bi_end_io = iblock_end_io_flush;
421c66ac9dbSNicholas Bellinger 	if (!immed)
422df5fa691SChristoph Hellwig 		bio->bi_private = cmd;
4234e49ea4aSMike Christie 	submit_bio(bio);
424ad67f0d9SChristoph Hellwig 	return 0;
425c66ac9dbSNicholas Bellinger }
426c66ac9dbSNicholas Bellinger 
427de103c93SChristoph Hellwig static sense_reason_t
42862e46942SChristoph Hellwig iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
429dbc21c5aSAsias He {
43062e46942SChristoph Hellwig 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
4318a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
432dbc21c5aSAsias He 	int ret;
433dbc21c5aSAsias He 
4348a9ebe71SMike Christie 	ret = blkdev_issue_discard(bdev,
4358a9ebe71SMike Christie 				   target_to_linux_sector(dev, lba),
4368a9ebe71SMike Christie 				   target_to_linux_sector(dev,  nolb),
4378a9ebe71SMike Christie 				   GFP_KERNEL, 0);
438dbc21c5aSAsias He 	if (ret < 0) {
439dbc21c5aSAsias He 		pr_err("blkdev_issue_discard() failed: %d\n", ret);
440dbc21c5aSAsias He 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
441dbc21c5aSAsias He 	}
442dbc21c5aSAsias He 
443dbc21c5aSAsias He 	return 0;
444dbc21c5aSAsias He }
445dbc21c5aSAsias He 
446dbc21c5aSAsias He static sense_reason_t
4472237498fSNicholas Bellinger iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
44807b63196SMike Christie {
44907b63196SMike Christie 	struct se_device *dev = cmd->se_dev;
45007b63196SMike Christie 	struct scatterlist *sg = &cmd->t_data_sg[0];
451f5957dadSBryant G Ly 	unsigned char *buf, *not_zero;
452f5957dadSBryant G Ly 	int ret;
45307b63196SMike Christie 
4542237498fSNicholas Bellinger 	buf = kmap(sg_page(sg)) + sg->offset;
4552237498fSNicholas Bellinger 	if (!buf)
4562237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4572237498fSNicholas Bellinger 	/*
4582237498fSNicholas Bellinger 	 * Fall back to block_execute_write_same() slow-path if
4592237498fSNicholas Bellinger 	 * incoming WRITE_SAME payload does not contain zeros.
4602237498fSNicholas Bellinger 	 */
461f5957dadSBryant G Ly 	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
4622237498fSNicholas Bellinger 	kunmap(sg_page(sg));
46307b63196SMike Christie 
464f5957dadSBryant G Ly 	if (not_zero)
4652237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4662237498fSNicholas Bellinger 
4672237498fSNicholas Bellinger 	ret = blkdev_issue_zeroout(bdev,
46807b63196SMike Christie 				target_to_linux_sector(dev, cmd->t_task_lba),
46907b63196SMike Christie 				target_to_linux_sector(dev,
47007b63196SMike Christie 					sbc_get_write_same_sectors(cmd)),
4711d2ff149SDavid Disseldorp 				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
47207b63196SMike Christie 	if (ret)
47307b63196SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
47407b63196SMike Christie 
47514b40c1eSHannes Reinecke 	target_complete_cmd(cmd, SAM_STAT_GOOD);
47607b63196SMike Christie 	return 0;
47707b63196SMike Christie }
47807b63196SMike Christie 
47907b63196SMike Christie static sense_reason_t
480f6970ad3SNicholas Bellinger iblock_execute_write_same(struct se_cmd *cmd)
481f6970ad3SNicholas Bellinger {
48207b63196SMike Christie 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
483f6970ad3SNicholas Bellinger 	struct iblock_req *ibr;
484f6970ad3SNicholas Bellinger 	struct scatterlist *sg;
485f6970ad3SNicholas Bellinger 	struct bio *bio;
486f6970ad3SNicholas Bellinger 	struct bio_list list;
4878a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
4888a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
4898a9ebe71SMike Christie 	sector_t sectors = target_to_linux_sector(dev,
4908a9ebe71SMike Christie 					sbc_get_write_same_sectors(cmd));
491f6970ad3SNicholas Bellinger 
492afd73f1bSNicholas Bellinger 	if (cmd->prot_op) {
493afd73f1bSNicholas Bellinger 		pr_err("WRITE_SAME: Protection information with IBLOCK"
494afd73f1bSNicholas Bellinger 		       " backends not supported\n");
495afd73f1bSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
496afd73f1bSNicholas Bellinger 	}
497f6970ad3SNicholas Bellinger 	sg = &cmd->t_data_sg[0];
498f6970ad3SNicholas Bellinger 
499f6970ad3SNicholas Bellinger 	if (cmd->t_data_nents > 1 ||
500f6970ad3SNicholas Bellinger 	    sg->length != cmd->se_dev->dev_attrib.block_size) {
501f6970ad3SNicholas Bellinger 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
502f6970ad3SNicholas Bellinger 			" block_size: %u\n", cmd->t_data_nents, sg->length,
503f6970ad3SNicholas Bellinger 			cmd->se_dev->dev_attrib.block_size);
504f6970ad3SNicholas Bellinger 		return TCM_INVALID_CDB_FIELD;
505f6970ad3SNicholas Bellinger 	}
506f6970ad3SNicholas Bellinger 
5072237498fSNicholas Bellinger 	if (bdev_write_zeroes_sectors(bdev)) {
5082237498fSNicholas Bellinger 		if (!iblock_execute_zero_out(bdev, cmd))
5092237498fSNicholas Bellinger 			return 0;
5102237498fSNicholas Bellinger 	}
51107b63196SMike Christie 
512f6970ad3SNicholas Bellinger 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
513f6970ad3SNicholas Bellinger 	if (!ibr)
514f6970ad3SNicholas Bellinger 		goto fail;
515f6970ad3SNicholas Bellinger 	cmd->priv = ibr;
516f6970ad3SNicholas Bellinger 
517bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
518f6970ad3SNicholas Bellinger 	if (!bio)
519f6970ad3SNicholas Bellinger 		goto fail_free_ibr;
520f6970ad3SNicholas Bellinger 
521f6970ad3SNicholas Bellinger 	bio_list_init(&list);
522f6970ad3SNicholas Bellinger 	bio_list_add(&list, bio);
523f6970ad3SNicholas Bellinger 
5245981c245SElena Reshetova 	refcount_set(&ibr->pending, 1);
525f6970ad3SNicholas Bellinger 
526f6970ad3SNicholas Bellinger 	while (sectors) {
527f6970ad3SNicholas Bellinger 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
528f6970ad3SNicholas Bellinger 				!= sg->length) {
529f6970ad3SNicholas Bellinger 
530bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
531f6970ad3SNicholas Bellinger 			if (!bio)
532f6970ad3SNicholas Bellinger 				goto fail_put_bios;
533f6970ad3SNicholas Bellinger 
5345981c245SElena Reshetova 			refcount_inc(&ibr->pending);
535f6970ad3SNicholas Bellinger 			bio_list_add(&list, bio);
536f6970ad3SNicholas Bellinger 		}
537f6970ad3SNicholas Bellinger 
538f6970ad3SNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
53980b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
5405676234fSRoman Bolshakov 		sectors -= sg->length >> SECTOR_SHIFT;
541f6970ad3SNicholas Bellinger 	}
542f6970ad3SNicholas Bellinger 
5434e49ea4aSMike Christie 	iblock_submit_bios(&list);
544f6970ad3SNicholas Bellinger 	return 0;
545f6970ad3SNicholas Bellinger 
546f6970ad3SNicholas Bellinger fail_put_bios:
547f6970ad3SNicholas Bellinger 	while ((bio = bio_list_pop(&list)))
548f6970ad3SNicholas Bellinger 		bio_put(bio);
549f6970ad3SNicholas Bellinger fail_free_ibr:
550f6970ad3SNicholas Bellinger 	kfree(ibr);
551f6970ad3SNicholas Bellinger fail:
552f6970ad3SNicholas Bellinger 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
553f6970ad3SNicholas Bellinger }
554f6970ad3SNicholas Bellinger 
555c66ac9dbSNicholas Bellinger enum {
55644bfd018SAndy Grover 	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
557c66ac9dbSNicholas Bellinger };
558c66ac9dbSNicholas Bellinger 
559c66ac9dbSNicholas Bellinger static match_table_t tokens = {
560c66ac9dbSNicholas Bellinger 	{Opt_udev_path, "udev_path=%s"},
56144bfd018SAndy Grover 	{Opt_readonly, "readonly=%d"},
562c66ac9dbSNicholas Bellinger 	{Opt_force, "force=%d"},
563c66ac9dbSNicholas Bellinger 	{Opt_err, NULL}
564c66ac9dbSNicholas Bellinger };
565c66ac9dbSNicholas Bellinger 
5660fd97ccfSChristoph Hellwig static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
567c66ac9dbSNicholas Bellinger 		const char *page, ssize_t count)
568c66ac9dbSNicholas Bellinger {
5690fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
5706d180253SJesper Juhl 	char *orig, *ptr, *arg_p, *opts;
571c66ac9dbSNicholas Bellinger 	substring_t args[MAX_OPT_ARGS];
57221bca31cSRoland Dreier 	int ret = 0, token;
57344bfd018SAndy Grover 	unsigned long tmp_readonly;
574c66ac9dbSNicholas Bellinger 
575c66ac9dbSNicholas Bellinger 	opts = kstrdup(page, GFP_KERNEL);
576c66ac9dbSNicholas Bellinger 	if (!opts)
577c66ac9dbSNicholas Bellinger 		return -ENOMEM;
578c66ac9dbSNicholas Bellinger 
579c66ac9dbSNicholas Bellinger 	orig = opts;
580c66ac9dbSNicholas Bellinger 
58190c161b6SSebastian Andrzej Siewior 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
582c66ac9dbSNicholas Bellinger 		if (!*ptr)
583c66ac9dbSNicholas Bellinger 			continue;
584c66ac9dbSNicholas Bellinger 
585c66ac9dbSNicholas Bellinger 		token = match_token(ptr, tokens, args);
586c66ac9dbSNicholas Bellinger 		switch (token) {
587c66ac9dbSNicholas Bellinger 		case Opt_udev_path:
588c66ac9dbSNicholas Bellinger 			if (ib_dev->ibd_bd) {
5896708bb27SAndy Grover 				pr_err("Unable to set udev_path= while"
590c66ac9dbSNicholas Bellinger 					" ib_dev->ibd_bd exists\n");
591c66ac9dbSNicholas Bellinger 				ret = -EEXIST;
592c66ac9dbSNicholas Bellinger 				goto out;
593c66ac9dbSNicholas Bellinger 			}
594852b6ed1SNicholas Bellinger 			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
595852b6ed1SNicholas Bellinger 				SE_UDEV_PATH_LEN) == 0) {
596852b6ed1SNicholas Bellinger 				ret = -EINVAL;
5976d180253SJesper Juhl 				break;
5986d180253SJesper Juhl 			}
5996708bb27SAndy Grover 			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
600c66ac9dbSNicholas Bellinger 					ib_dev->ibd_udev_path);
601c66ac9dbSNicholas Bellinger 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
602c66ac9dbSNicholas Bellinger 			break;
60344bfd018SAndy Grover 		case Opt_readonly:
60444bfd018SAndy Grover 			arg_p = match_strdup(&args[0]);
60544bfd018SAndy Grover 			if (!arg_p) {
60644bfd018SAndy Grover 				ret = -ENOMEM;
60744bfd018SAndy Grover 				break;
60844bfd018SAndy Grover 			}
60957103d7fSJingoo Han 			ret = kstrtoul(arg_p, 0, &tmp_readonly);
61044bfd018SAndy Grover 			kfree(arg_p);
61144bfd018SAndy Grover 			if (ret < 0) {
61257103d7fSJingoo Han 				pr_err("kstrtoul() failed for"
61344bfd018SAndy Grover 						" readonly=\n");
61444bfd018SAndy Grover 				goto out;
61544bfd018SAndy Grover 			}
61644bfd018SAndy Grover 			ib_dev->ibd_readonly = tmp_readonly;
61744bfd018SAndy Grover 			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
61844bfd018SAndy Grover 			break;
619c66ac9dbSNicholas Bellinger 		case Opt_force:
620c66ac9dbSNicholas Bellinger 			break;
621c66ac9dbSNicholas Bellinger 		default:
622c66ac9dbSNicholas Bellinger 			break;
623c66ac9dbSNicholas Bellinger 		}
624c66ac9dbSNicholas Bellinger 	}
625c66ac9dbSNicholas Bellinger 
626c66ac9dbSNicholas Bellinger out:
627c66ac9dbSNicholas Bellinger 	kfree(orig);
628c66ac9dbSNicholas Bellinger 	return (!ret) ? count : ret;
629c66ac9dbSNicholas Bellinger }
630c66ac9dbSNicholas Bellinger 
6310fd97ccfSChristoph Hellwig static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
632c66ac9dbSNicholas Bellinger {
6330fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
6340fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
635c66ac9dbSNicholas Bellinger 	ssize_t bl = 0;
636c66ac9dbSNicholas Bellinger 
637c66ac9dbSNicholas Bellinger 	if (bd)
6381b74ab77SChristoph Hellwig 		bl += sprintf(b + bl, "iBlock device: %pg", bd);
6390fd97ccfSChristoph Hellwig 	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
64044bfd018SAndy Grover 		bl += sprintf(b + bl, "  UDEV PATH: %s",
6410fd97ccfSChristoph Hellwig 				ib_dev->ibd_udev_path);
6420fd97ccfSChristoph Hellwig 	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
643c66ac9dbSNicholas Bellinger 
644c66ac9dbSNicholas Bellinger 	bl += sprintf(b + bl, "        ");
645c66ac9dbSNicholas Bellinger 	if (bd) {
646c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
64757ba1059SChristoph Hellwig 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
64857ba1059SChristoph Hellwig 			"CLAIMED: IBLOCK");
649c66ac9dbSNicholas Bellinger 	} else {
65021bca31cSRoland Dreier 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
651c66ac9dbSNicholas Bellinger 	}
652c66ac9dbSNicholas Bellinger 
653c66ac9dbSNicholas Bellinger 	return bl;
654c66ac9dbSNicholas Bellinger }
655c66ac9dbSNicholas Bellinger 
656ecebbf6cSNicholas Bellinger static int
657fed564f6SGreg Edwards iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
658fed564f6SGreg Edwards 		 struct sg_mapping_iter *miter)
659ecebbf6cSNicholas Bellinger {
660ecebbf6cSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
661ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
662ecebbf6cSNicholas Bellinger 	struct bio_integrity_payload *bip;
663ecebbf6cSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
664fed564f6SGreg Edwards 	int rc;
665fed564f6SGreg Edwards 	size_t resid, len;
666ecebbf6cSNicholas Bellinger 
667ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(ib_dev->ibd_bd);
668ecebbf6cSNicholas Bellinger 	if (!bi) {
669ecebbf6cSNicholas Bellinger 		pr_err("Unable to locate bio_integrity\n");
670ecebbf6cSNicholas Bellinger 		return -ENODEV;
671ecebbf6cSNicholas Bellinger 	}
672ecebbf6cSNicholas Bellinger 
6735f7136dbSMatthew Wilcox (Oracle) 	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
67406c1e390SKeith Busch 	if (IS_ERR(bip)) {
675ecebbf6cSNicholas Bellinger 		pr_err("Unable to allocate bio_integrity_payload\n");
67606c1e390SKeith Busch 		return PTR_ERR(bip);
677ecebbf6cSNicholas Bellinger 	}
678ecebbf6cSNicholas Bellinger 
679fed564f6SGreg Edwards 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
680e4dc9a4cSIsrael Rukshin 	/* virtual start sector must be in integrity interval units */
681e4dc9a4cSIsrael Rukshin 	bip_set_seed(bip, bio->bi_iter.bi_sector >>
682e4dc9a4cSIsrael Rukshin 				  (bi->interval_exp - SECTOR_SHIFT));
683ecebbf6cSNicholas Bellinger 
6844e13c5d0SLinus Torvalds 	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
6854e13c5d0SLinus Torvalds 		 (unsigned long long)bip->bip_iter.bi_sector);
686ecebbf6cSNicholas Bellinger 
687fed564f6SGreg Edwards 	resid = bip->bip_iter.bi_size;
688fed564f6SGreg Edwards 	while (resid > 0 && sg_miter_next(miter)) {
689ecebbf6cSNicholas Bellinger 
690fed564f6SGreg Edwards 		len = min_t(size_t, miter->length, resid);
691fed564f6SGreg Edwards 		rc = bio_integrity_add_page(bio, miter->page, len,
692fed564f6SGreg Edwards 					    offset_in_page(miter->addr));
693fed564f6SGreg Edwards 		if (rc != len) {
694ecebbf6cSNicholas Bellinger 			pr_err("bio_integrity_add_page() failed; %d\n", rc);
695fed564f6SGreg Edwards 			sg_miter_stop(miter);
696ecebbf6cSNicholas Bellinger 			return -ENOMEM;
697ecebbf6cSNicholas Bellinger 		}
698ecebbf6cSNicholas Bellinger 
699fed564f6SGreg Edwards 		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
700fed564f6SGreg Edwards 			  miter->page, len, offset_in_page(miter->addr));
701fed564f6SGreg Edwards 
702fed564f6SGreg Edwards 		resid -= len;
703fed564f6SGreg Edwards 		if (len < miter->length)
704fed564f6SGreg Edwards 			miter->consumed -= miter->length - len;
705ecebbf6cSNicholas Bellinger 	}
706fed564f6SGreg Edwards 	sg_miter_stop(miter);
707ecebbf6cSNicholas Bellinger 
708ecebbf6cSNicholas Bellinger 	return 0;
709ecebbf6cSNicholas Bellinger }
710ecebbf6cSNicholas Bellinger 
711de103c93SChristoph Hellwig static sense_reason_t
712a82a9538SNicholas Bellinger iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
713a82a9538SNicholas Bellinger 		  enum dma_data_direction data_direction)
714c66ac9dbSNicholas Bellinger {
7155951146dSAndy Grover 	struct se_device *dev = cmd->se_dev;
7168a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
7175787cacdSChristoph Hellwig 	struct iblock_req *ibr;
718fed564f6SGreg Edwards 	struct bio *bio;
719dbbf3e94SChristoph Hellwig 	struct bio_list list;
720c66ac9dbSNicholas Bellinger 	struct scatterlist *sg;
7215787cacdSChristoph Hellwig 	u32 sg_num = sgl_nents;
722bc9e0e36SChaitanya Kulkarni 	unsigned int opf;
723d5b4a21bSChristoph Hellwig 	unsigned bio_cnt;
724bc9e0e36SChaitanya Kulkarni 	int i, rc;
725fed564f6SGreg Edwards 	struct sg_mapping_iter prot_miter;
726bc9e0e36SChaitanya Kulkarni 	unsigned int miter_dir;
727dbbf3e94SChristoph Hellwig 
7285787cacdSChristoph Hellwig 	if (data_direction == DMA_TO_DEVICE) {
729d0c8b259SNicholas Bellinger 		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
730dbbf3e94SChristoph Hellwig 		/*
73170fd7614SChristoph Hellwig 		 * Force writethrough using REQ_FUA if a volatile write cache
732d0c8b259SNicholas Bellinger 		 * is not enabled, or if initiator set the Force Unit Access bit.
733dbbf3e94SChristoph Hellwig 		 */
734bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_WRITE;
735bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_TO_SG;
736*a557e82eSChristoph Hellwig 		if (bdev_fua(ib_dev->ibd_bd)) {
737d0c8b259SNicholas Bellinger 			if (cmd->se_cmd_flags & SCF_FUA)
738bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
73908e688fdSChristoph Hellwig 			else if (!bdev_write_cache(ib_dev->ibd_bd))
740bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
741d0c8b259SNicholas Bellinger 		}
742dbbf3e94SChristoph Hellwig 	} else {
743bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_READ;
744bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_FROM_SG;
745dbbf3e94SChristoph Hellwig 	}
746dbbf3e94SChristoph Hellwig 
7475787cacdSChristoph Hellwig 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
7485787cacdSChristoph Hellwig 	if (!ibr)
7495787cacdSChristoph Hellwig 		goto fail;
7505787cacdSChristoph Hellwig 	cmd->priv = ibr;
7515787cacdSChristoph Hellwig 
752e0de4457SPaolo Bonzini 	if (!sgl_nents) {
7535981c245SElena Reshetova 		refcount_set(&ibr->pending, 1);
754e0de4457SPaolo Bonzini 		iblock_complete_cmd(cmd);
755e0de4457SPaolo Bonzini 		return 0;
756e0de4457SPaolo Bonzini 	}
757e0de4457SPaolo Bonzini 
758bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
7595787cacdSChristoph Hellwig 	if (!bio)
7605787cacdSChristoph Hellwig 		goto fail_free_ibr;
761c66ac9dbSNicholas Bellinger 
762dbbf3e94SChristoph Hellwig 	bio_list_init(&list);
763dbbf3e94SChristoph Hellwig 	bio_list_add(&list, bio);
7645787cacdSChristoph Hellwig 
7655981c245SElena Reshetova 	refcount_set(&ibr->pending, 2);
766d5b4a21bSChristoph Hellwig 	bio_cnt = 1;
767dbbf3e94SChristoph Hellwig 
768fed564f6SGreg Edwards 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
769fed564f6SGreg Edwards 		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
770bc9e0e36SChaitanya Kulkarni 			       miter_dir);
771fed564f6SGreg Edwards 
7725787cacdSChristoph Hellwig 	for_each_sg(sgl, sg, sgl_nents, i) {
773dbbf3e94SChristoph Hellwig 		/*
774dbbf3e94SChristoph Hellwig 		 * XXX: if the length the device accepts is shorter than the
775dbbf3e94SChristoph Hellwig 		 *	length of the S/G list entry this will cause and
776dbbf3e94SChristoph Hellwig 		 *	endless loop.  Better hope no driver uses huge pages.
777dbbf3e94SChristoph Hellwig 		 */
778dbbf3e94SChristoph Hellwig 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
779dbbf3e94SChristoph Hellwig 				!= sg->length) {
780fed564f6SGreg Edwards 			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
781fed564f6SGreg Edwards 				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
782fed564f6SGreg Edwards 				if (rc)
783fed564f6SGreg Edwards 					goto fail_put_bios;
784fed564f6SGreg Edwards 			}
785fed564f6SGreg Edwards 
786d5b4a21bSChristoph Hellwig 			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
7874e49ea4aSMike Christie 				iblock_submit_bios(&list);
788d5b4a21bSChristoph Hellwig 				bio_cnt = 0;
789d5b4a21bSChristoph Hellwig 			}
790d5b4a21bSChristoph Hellwig 
791bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
7926708bb27SAndy Grover 			if (!bio)
7935787cacdSChristoph Hellwig 				goto fail_put_bios;
7945787cacdSChristoph Hellwig 
7955981c245SElena Reshetova 			refcount_inc(&ibr->pending);
796dbbf3e94SChristoph Hellwig 			bio_list_add(&list, bio);
797d5b4a21bSChristoph Hellwig 			bio_cnt++;
798c66ac9dbSNicholas Bellinger 		}
799dbbf3e94SChristoph Hellwig 
800c66ac9dbSNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
80180b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
802c66ac9dbSNicholas Bellinger 		sg_num--;
803c66ac9dbSNicholas Bellinger 	}
804c66ac9dbSNicholas Bellinger 
8056f16ec43SNicholas Bellinger 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
806fed564f6SGreg Edwards 		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
807ecebbf6cSNicholas Bellinger 		if (rc)
808ecebbf6cSNicholas Bellinger 			goto fail_put_bios;
809ecebbf6cSNicholas Bellinger 	}
810ecebbf6cSNicholas Bellinger 
8114e49ea4aSMike Christie 	iblock_submit_bios(&list);
8125787cacdSChristoph Hellwig 	iblock_complete_cmd(cmd);
81303e98c9eSNicholas Bellinger 	return 0;
814dbbf3e94SChristoph Hellwig 
8155787cacdSChristoph Hellwig fail_put_bios:
816dbbf3e94SChristoph Hellwig 	while ((bio = bio_list_pop(&list)))
817c66ac9dbSNicholas Bellinger 		bio_put(bio);
8185787cacdSChristoph Hellwig fail_free_ibr:
8195787cacdSChristoph Hellwig 	kfree(ibr);
8205787cacdSChristoph Hellwig fail:
821de103c93SChristoph Hellwig 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
822c66ac9dbSNicholas Bellinger }
823c66ac9dbSNicholas Bellinger 
824c66ac9dbSNicholas Bellinger static sector_t iblock_get_blocks(struct se_device *dev)
825c66ac9dbSNicholas Bellinger {
8260fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8270fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
828c66ac9dbSNicholas Bellinger 	struct request_queue *q = bdev_get_queue(bd);
829c66ac9dbSNicholas Bellinger 
830c66ac9dbSNicholas Bellinger 	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
831c66ac9dbSNicholas Bellinger }
832c66ac9dbSNicholas Bellinger 
8337f7caf6aSAndy Grover static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
8347f7caf6aSAndy Grover {
8357f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8367f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8377f7caf6aSAndy Grover 	int ret;
8387f7caf6aSAndy Grover 
8397f7caf6aSAndy Grover 	ret = bdev_alignment_offset(bd);
8407f7caf6aSAndy Grover 	if (ret == -1)
8417f7caf6aSAndy Grover 		return 0;
8427f7caf6aSAndy Grover 
8437f7caf6aSAndy Grover 	/* convert offset-bytes to offset-lbas */
8447f7caf6aSAndy Grover 	return ret / bdev_logical_block_size(bd);
8457f7caf6aSAndy Grover }
8467f7caf6aSAndy Grover 
8477f7caf6aSAndy Grover static unsigned int iblock_get_lbppbe(struct se_device *dev)
8487f7caf6aSAndy Grover {
8497f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8507f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
851c151eddbSChaitanya Kulkarni 	unsigned int logs_per_phys =
852a2c6c6a3SChaitanya Kulkarni 		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
8537f7caf6aSAndy Grover 
8547f7caf6aSAndy Grover 	return ilog2(logs_per_phys);
8557f7caf6aSAndy Grover }
8567f7caf6aSAndy Grover 
8577f7caf6aSAndy Grover static unsigned int iblock_get_io_min(struct se_device *dev)
8587f7caf6aSAndy Grover {
8597f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8607f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8617f7caf6aSAndy Grover 
8627f7caf6aSAndy Grover 	return bdev_io_min(bd);
8637f7caf6aSAndy Grover }
8647f7caf6aSAndy Grover 
8657f7caf6aSAndy Grover static unsigned int iblock_get_io_opt(struct se_device *dev)
8667f7caf6aSAndy Grover {
8677f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8687f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8697f7caf6aSAndy Grover 
8707f7caf6aSAndy Grover 	return bdev_io_opt(bd);
8717f7caf6aSAndy Grover }
8727f7caf6aSAndy Grover 
8739e999a6cSChristoph Hellwig static struct sbc_ops iblock_sbc_ops = {
8740c2ad7d1SChristoph Hellwig 	.execute_rw		= iblock_execute_rw,
875ad67f0d9SChristoph Hellwig 	.execute_sync_cache	= iblock_execute_sync_cache,
8766f974e8cSChristoph Hellwig 	.execute_write_same	= iblock_execute_write_same,
87714150a6bSChristoph Hellwig 	.execute_unmap		= iblock_execute_unmap,
8780c2ad7d1SChristoph Hellwig };
8790c2ad7d1SChristoph Hellwig 
880de103c93SChristoph Hellwig static sense_reason_t
881de103c93SChristoph Hellwig iblock_parse_cdb(struct se_cmd *cmd)
8820c2ad7d1SChristoph Hellwig {
8839e999a6cSChristoph Hellwig 	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
8840c2ad7d1SChristoph Hellwig }
8850c2ad7d1SChristoph Hellwig 
886452e2010SRashika Kheria static bool iblock_get_write_cache(struct se_device *dev)
887d0c8b259SNicholas Bellinger {
88808e688fdSChristoph Hellwig 	return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
889d0c8b259SNicholas Bellinger }
890d0c8b259SNicholas Bellinger 
8910a06d430SChristoph Hellwig static const struct target_backend_ops iblock_ops = {
892c66ac9dbSNicholas Bellinger 	.name			= "iblock",
8930fd97ccfSChristoph Hellwig 	.inquiry_prod		= "IBLOCK",
8940fd97ccfSChristoph Hellwig 	.inquiry_rev		= IBLOCK_VERSION,
895c66ac9dbSNicholas Bellinger 	.owner			= THIS_MODULE,
896c66ac9dbSNicholas Bellinger 	.attach_hba		= iblock_attach_hba,
897c66ac9dbSNicholas Bellinger 	.detach_hba		= iblock_detach_hba,
8980fd97ccfSChristoph Hellwig 	.alloc_device		= iblock_alloc_device,
8990fd97ccfSChristoph Hellwig 	.configure_device	= iblock_configure_device,
90092634706SMike Christie 	.destroy_device		= iblock_destroy_device,
901c66ac9dbSNicholas Bellinger 	.free_device		= iblock_free_device,
902415ccd98SMike Christie 	.plug_device		= iblock_plug_device,
903415ccd98SMike Christie 	.unplug_device		= iblock_unplug_device,
9040c2ad7d1SChristoph Hellwig 	.parse_cdb		= iblock_parse_cdb,
905c66ac9dbSNicholas Bellinger 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
906c66ac9dbSNicholas Bellinger 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
9076f23ac8aSChristoph Hellwig 	.get_device_type	= sbc_get_device_type,
908c66ac9dbSNicholas Bellinger 	.get_blocks		= iblock_get_blocks,
9097f7caf6aSAndy Grover 	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
9107f7caf6aSAndy Grover 	.get_lbppbe		= iblock_get_lbppbe,
9117f7caf6aSAndy Grover 	.get_io_min		= iblock_get_io_min,
9127f7caf6aSAndy Grover 	.get_io_opt		= iblock_get_io_opt,
913d0c8b259SNicholas Bellinger 	.get_write_cache	= iblock_get_write_cache,
9145873c4d1SChristoph Hellwig 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
915c66ac9dbSNicholas Bellinger };
916c66ac9dbSNicholas Bellinger 
917c66ac9dbSNicholas Bellinger static int __init iblock_module_init(void)
918c66ac9dbSNicholas Bellinger {
9190a06d430SChristoph Hellwig 	return transport_backend_register(&iblock_ops);
920c66ac9dbSNicholas Bellinger }
921c66ac9dbSNicholas Bellinger 
92263b91d5aSAsias He static void __exit iblock_module_exit(void)
923c66ac9dbSNicholas Bellinger {
9240a06d430SChristoph Hellwig 	target_backend_unregister(&iblock_ops);
925c66ac9dbSNicholas Bellinger }
926c66ac9dbSNicholas Bellinger 
927c66ac9dbSNicholas Bellinger MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
928c66ac9dbSNicholas Bellinger MODULE_AUTHOR("nab@Linux-iSCSI.org");
929c66ac9dbSNicholas Bellinger MODULE_LICENSE("GPL");
930c66ac9dbSNicholas Bellinger 
931c66ac9dbSNicholas Bellinger module_init(iblock_module_init);
932c66ac9dbSNicholas Bellinger module_exit(iblock_module_exit);
933