11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c66ac9dbSNicholas Bellinger /*******************************************************************************
3c66ac9dbSNicholas Bellinger  * Filename:  target_core_iblock.c
4c66ac9dbSNicholas Bellinger  *
5c66ac9dbSNicholas Bellinger  * This file contains the Storage Engine  <-> Linux BlockIO transport
6c66ac9dbSNicholas Bellinger  * specific functions.
7c66ac9dbSNicholas Bellinger  *
84c76251eSNicholas Bellinger  * (c) Copyright 2003-2013 Datera, Inc.
9c66ac9dbSNicholas Bellinger  *
10c66ac9dbSNicholas Bellinger  * Nicholas A. Bellinger <nab@kernel.org>
11c66ac9dbSNicholas Bellinger  *
12c66ac9dbSNicholas Bellinger  ******************************************************************************/
13c66ac9dbSNicholas Bellinger 
14c66ac9dbSNicholas Bellinger #include <linux/string.h>
15c66ac9dbSNicholas Bellinger #include <linux/parser.h>
16c66ac9dbSNicholas Bellinger #include <linux/timer.h>
17c66ac9dbSNicholas Bellinger #include <linux/fs.h>
18c66ac9dbSNicholas Bellinger #include <linux/blkdev.h>
19*fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
20c66ac9dbSNicholas Bellinger #include <linux/slab.h>
21c66ac9dbSNicholas Bellinger #include <linux/spinlock.h>
22c66ac9dbSNicholas Bellinger #include <linux/bio.h>
23c66ac9dbSNicholas Bellinger #include <linux/genhd.h>
24c66ac9dbSNicholas Bellinger #include <linux/file.h>
25827509e3SPaul Gortmaker #include <linux/module.h>
26ba929992SBart Van Assche #include <scsi/scsi_proto.h>
2714150a6bSChristoph Hellwig #include <asm/unaligned.h>
28c66ac9dbSNicholas Bellinger 
29c66ac9dbSNicholas Bellinger #include <target/target_core_base.h>
30c4795fb2SChristoph Hellwig #include <target/target_core_backend.h>
31c66ac9dbSNicholas Bellinger 
32c66ac9dbSNicholas Bellinger #include "target_core_iblock.h"
33c66ac9dbSNicholas Bellinger 
34d5b4a21bSChristoph Hellwig #define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
35d5b4a21bSChristoph Hellwig #define IBLOCK_BIO_POOL_SIZE	128
36d5b4a21bSChristoph Hellwig 
370fd97ccfSChristoph Hellwig static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
380fd97ccfSChristoph Hellwig {
390fd97ccfSChristoph Hellwig 	return container_of(dev, struct iblock_dev, dev);
400fd97ccfSChristoph Hellwig }
410fd97ccfSChristoph Hellwig 
420fd97ccfSChristoph Hellwig 
43c66ac9dbSNicholas Bellinger static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
44c66ac9dbSNicholas Bellinger {
456708bb27SAndy Grover 	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
46c66ac9dbSNicholas Bellinger 		" Generic Target Core Stack %s\n", hba->hba_id,
47ce8dd25dSChristoph Hellwig 		IBLOCK_VERSION, TARGET_CORE_VERSION);
48c66ac9dbSNicholas Bellinger 	return 0;
49c66ac9dbSNicholas Bellinger }
50c66ac9dbSNicholas Bellinger 
51c66ac9dbSNicholas Bellinger static void iblock_detach_hba(struct se_hba *hba)
52c66ac9dbSNicholas Bellinger {
53c66ac9dbSNicholas Bellinger }
54c66ac9dbSNicholas Bellinger 
550fd97ccfSChristoph Hellwig static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
56c66ac9dbSNicholas Bellinger {
57c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = NULL;
58c66ac9dbSNicholas Bellinger 
59c66ac9dbSNicholas Bellinger 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
606708bb27SAndy Grover 	if (!ib_dev) {
616708bb27SAndy Grover 		pr_err("Unable to allocate struct iblock_dev\n");
62c66ac9dbSNicholas Bellinger 		return NULL;
63c66ac9dbSNicholas Bellinger 	}
64c66ac9dbSNicholas Bellinger 
65415ccd98SMike Christie 	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
66415ccd98SMike Christie 				   GFP_KERNEL);
67415ccd98SMike Christie 	if (!ib_dev->ibd_plug)
68415ccd98SMike Christie 		goto free_dev;
69415ccd98SMike Christie 
706708bb27SAndy Grover 	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
71c66ac9dbSNicholas Bellinger 
720fd97ccfSChristoph Hellwig 	return &ib_dev->dev;
73415ccd98SMike Christie 
74415ccd98SMike Christie free_dev:
75415ccd98SMike Christie 	kfree(ib_dev);
76415ccd98SMike Christie 	return NULL;
77c66ac9dbSNicholas Bellinger }
78c66ac9dbSNicholas Bellinger 
790fd97ccfSChristoph Hellwig static int iblock_configure_device(struct se_device *dev)
80c66ac9dbSNicholas Bellinger {
810fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
82c66ac9dbSNicholas Bellinger 	struct request_queue *q;
830fd97ccfSChristoph Hellwig 	struct block_device *bd = NULL;
84ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
8544bfd018SAndy Grover 	fmode_t mode;
862237498fSNicholas Bellinger 	unsigned int max_write_zeroes_sectors;
878f13142aSColin Ian King 	int ret;
88c66ac9dbSNicholas Bellinger 
890fd97ccfSChristoph Hellwig 	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
900fd97ccfSChristoph Hellwig 		pr_err("Missing udev_path= parameters for IBLOCK\n");
910fd97ccfSChristoph Hellwig 		return -EINVAL;
92c66ac9dbSNicholas Bellinger 	}
93d5b4a21bSChristoph Hellwig 
94a47a28b7SKent Overstreet 	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
95a47a28b7SKent Overstreet 	if (ret) {
960fd97ccfSChristoph Hellwig 		pr_err("IBLOCK: Unable to create bioset\n");
970fd97ccfSChristoph Hellwig 		goto out;
98c66ac9dbSNicholas Bellinger 	}
990fd97ccfSChristoph Hellwig 
1006708bb27SAndy Grover 	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
101c66ac9dbSNicholas Bellinger 			ib_dev->ibd_udev_path);
102c66ac9dbSNicholas Bellinger 
10344bfd018SAndy Grover 	mode = FMODE_READ|FMODE_EXCL;
10444bfd018SAndy Grover 	if (!ib_dev->ibd_readonly)
10544bfd018SAndy Grover 		mode |= FMODE_WRITE;
106eeeb9522SNicholas Bellinger 	else
107eeeb9522SNicholas Bellinger 		dev->dev_flags |= DF_READ_ONLY;
10844bfd018SAndy Grover 
10944bfd018SAndy Grover 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
110613640e4SNicholas Bellinger 	if (IS_ERR(bd)) {
111613640e4SNicholas Bellinger 		ret = PTR_ERR(bd);
1120fd97ccfSChristoph Hellwig 		goto out_free_bioset;
113613640e4SNicholas Bellinger 	}
114c66ac9dbSNicholas Bellinger 	ib_dev->ibd_bd = bd;
115c66ac9dbSNicholas Bellinger 
1160fd97ccfSChristoph Hellwig 	q = bdev_get_queue(bd);
1170fd97ccfSChristoph Hellwig 
1180fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
119046ba642SNicholas Bellinger 	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
1200fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
121c66ac9dbSNicholas Bellinger 
122ea263c7fSMike Christie 	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
1236708bb27SAndy Grover 		pr_debug("IBLOCK: BLOCK Discard support available,"
124c66ac9dbSNicholas Bellinger 			 " disabled by default\n");
1258a9ebe71SMike Christie 
126f6970ad3SNicholas Bellinger 	/*
127f6970ad3SNicholas Bellinger 	 * Enable write same emulation for IBLOCK and use 0xFFFF as
128f6970ad3SNicholas Bellinger 	 * the smaller WRITE_SAME(10) only has a two-byte block count.
129f6970ad3SNicholas Bellinger 	 */
1302237498fSNicholas Bellinger 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
1312237498fSNicholas Bellinger 	if (max_write_zeroes_sectors)
1322237498fSNicholas Bellinger 		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
1332237498fSNicholas Bellinger 	else
134f6970ad3SNicholas Bellinger 		dev->dev_attrib.max_write_same_len = 0xFFFF;
135c66ac9dbSNicholas Bellinger 
136e22a7f07SRoland Dreier 	if (blk_queue_nonrot(q))
1370fd97ccfSChristoph Hellwig 		dev->dev_attrib.is_nonrot = 1;
138d0c8b259SNicholas Bellinger 
139ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(bd);
140ecebbf6cSNicholas Bellinger 	if (bi) {
141a47a28b7SKent Overstreet 		struct bio_set *bs = &ib_dev->ibd_bio_set;
142ecebbf6cSNicholas Bellinger 
1430f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
1440f8087ecSMartin K. Petersen 		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
145ecebbf6cSNicholas Bellinger 			pr_err("IBLOCK export of blk_integrity: %s not"
1460f8087ecSMartin K. Petersen 			       " supported\n", bi->profile->name);
147ecebbf6cSNicholas Bellinger 			ret = -ENOSYS;
148ecebbf6cSNicholas Bellinger 			goto out_blkdev_put;
149ecebbf6cSNicholas Bellinger 		}
150ecebbf6cSNicholas Bellinger 
1510f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
152ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
1530f8087ecSMartin K. Petersen 		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
154ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
155ecebbf6cSNicholas Bellinger 		}
156ecebbf6cSNicholas Bellinger 
157ecebbf6cSNicholas Bellinger 		if (dev->dev_attrib.pi_prot_type) {
158ecebbf6cSNicholas Bellinger 			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
159ecebbf6cSNicholas Bellinger 				pr_err("Unable to allocate bioset for PI\n");
160ecebbf6cSNicholas Bellinger 				ret = -ENOMEM;
161ecebbf6cSNicholas Bellinger 				goto out_blkdev_put;
162ecebbf6cSNicholas Bellinger 			}
163ecebbf6cSNicholas Bellinger 			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
164f4f8154aSKent Overstreet 				 &bs->bio_integrity_pool);
165ecebbf6cSNicholas Bellinger 		}
166ecebbf6cSNicholas Bellinger 		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
167ecebbf6cSNicholas Bellinger 	}
168ecebbf6cSNicholas Bellinger 
1690fd97ccfSChristoph Hellwig 	return 0;
170e22a7f07SRoland Dreier 
171ecebbf6cSNicholas Bellinger out_blkdev_put:
172ecebbf6cSNicholas Bellinger 	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
1730fd97ccfSChristoph Hellwig out_free_bioset:
174a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
1750fd97ccfSChristoph Hellwig out:
1760fd97ccfSChristoph Hellwig 	return ret;
177c66ac9dbSNicholas Bellinger }
178c66ac9dbSNicholas Bellinger 
1794cc987eaSNicholas Bellinger static void iblock_dev_call_rcu(struct rcu_head *p)
1804cc987eaSNicholas Bellinger {
1814cc987eaSNicholas Bellinger 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1824cc987eaSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1834cc987eaSNicholas Bellinger 
184415ccd98SMike Christie 	kfree(ib_dev->ibd_plug);
1854cc987eaSNicholas Bellinger 	kfree(ib_dev);
1864cc987eaSNicholas Bellinger }
1874cc987eaSNicholas Bellinger 
1880fd97ccfSChristoph Hellwig static void iblock_free_device(struct se_device *dev)
189c66ac9dbSNicholas Bellinger {
19092634706SMike Christie 	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
19192634706SMike Christie }
19292634706SMike Christie 
19392634706SMike Christie static void iblock_destroy_device(struct se_device *dev)
19492634706SMike Christie {
1950fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
196c66ac9dbSNicholas Bellinger 
197bc665524SNicholas Bellinger 	if (ib_dev->ibd_bd != NULL)
198c66ac9dbSNicholas Bellinger 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
199a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
200c66ac9dbSNicholas Bellinger }
201c66ac9dbSNicholas Bellinger 
202415ccd98SMike Christie static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
203415ccd98SMike Christie {
204415ccd98SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
205415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug;
206415ccd98SMike Christie 
207415ccd98SMike Christie 	/*
2085aaeca25SMike Christie 	 * Each se_device has a per cpu work this can be run from. We
209415ccd98SMike Christie 	 * shouldn't have multiple threads on the same cpu calling this
210415ccd98SMike Christie 	 * at the same time.
211415ccd98SMike Christie 	 */
2125aaeca25SMike Christie 	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
213415ccd98SMike Christie 	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
214415ccd98SMike Christie 		return NULL;
215415ccd98SMike Christie 
216415ccd98SMike Christie 	blk_start_plug(&ib_dev_plug->blk_plug);
217415ccd98SMike Christie 	return &ib_dev_plug->se_plug;
218415ccd98SMike Christie }
219415ccd98SMike Christie 
220415ccd98SMike Christie static void iblock_unplug_device(struct se_dev_plug *se_plug)
221415ccd98SMike Christie {
222415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
223415ccd98SMike Christie 					struct iblock_dev_plug, se_plug);
224415ccd98SMike Christie 
225415ccd98SMike Christie 	blk_finish_plug(&ib_dev_plug->blk_plug);
226415ccd98SMike Christie 	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
227415ccd98SMike Christie }
228415ccd98SMike Christie 
229c66ac9dbSNicholas Bellinger static unsigned long long iblock_emulate_read_cap_with_block_size(
230c66ac9dbSNicholas Bellinger 	struct se_device *dev,
231c66ac9dbSNicholas Bellinger 	struct block_device *bd,
232c66ac9dbSNicholas Bellinger 	struct request_queue *q)
233c66ac9dbSNicholas Bellinger {
234c66ac9dbSNicholas Bellinger 	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
235c66ac9dbSNicholas Bellinger 					bdev_logical_block_size(bd)) - 1);
236c66ac9dbSNicholas Bellinger 	u32 block_size = bdev_logical_block_size(bd);
237c66ac9dbSNicholas Bellinger 
2380fd97ccfSChristoph Hellwig 	if (block_size == dev->dev_attrib.block_size)
239c66ac9dbSNicholas Bellinger 		return blocks_long;
240c66ac9dbSNicholas Bellinger 
241c66ac9dbSNicholas Bellinger 	switch (block_size) {
242c66ac9dbSNicholas Bellinger 	case 4096:
2430fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
244c66ac9dbSNicholas Bellinger 		case 2048:
245c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
246c66ac9dbSNicholas Bellinger 			break;
247c66ac9dbSNicholas Bellinger 		case 1024:
248c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
249c66ac9dbSNicholas Bellinger 			break;
250c66ac9dbSNicholas Bellinger 		case 512:
251c66ac9dbSNicholas Bellinger 			blocks_long <<= 3;
252492096ecSGustavo A. R. Silva 			break;
253c66ac9dbSNicholas Bellinger 		default:
254c66ac9dbSNicholas Bellinger 			break;
255c66ac9dbSNicholas Bellinger 		}
256c66ac9dbSNicholas Bellinger 		break;
257c66ac9dbSNicholas Bellinger 	case 2048:
2580fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
259c66ac9dbSNicholas Bellinger 		case 4096:
260c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
261c66ac9dbSNicholas Bellinger 			break;
262c66ac9dbSNicholas Bellinger 		case 1024:
263c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
264c66ac9dbSNicholas Bellinger 			break;
265c66ac9dbSNicholas Bellinger 		case 512:
266c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
267c66ac9dbSNicholas Bellinger 			break;
268c66ac9dbSNicholas Bellinger 		default:
269c66ac9dbSNicholas Bellinger 			break;
270c66ac9dbSNicholas Bellinger 		}
271c66ac9dbSNicholas Bellinger 		break;
272c66ac9dbSNicholas Bellinger 	case 1024:
2730fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
274c66ac9dbSNicholas Bellinger 		case 4096:
275c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
276c66ac9dbSNicholas Bellinger 			break;
277c66ac9dbSNicholas Bellinger 		case 2048:
278c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
279c66ac9dbSNicholas Bellinger 			break;
280c66ac9dbSNicholas Bellinger 		case 512:
281c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
282c66ac9dbSNicholas Bellinger 			break;
283c66ac9dbSNicholas Bellinger 		default:
284c66ac9dbSNicholas Bellinger 			break;
285c66ac9dbSNicholas Bellinger 		}
286c66ac9dbSNicholas Bellinger 		break;
287c66ac9dbSNicholas Bellinger 	case 512:
2880fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
289c66ac9dbSNicholas Bellinger 		case 4096:
290c66ac9dbSNicholas Bellinger 			blocks_long >>= 3;
291c66ac9dbSNicholas Bellinger 			break;
292c66ac9dbSNicholas Bellinger 		case 2048:
293c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
294c66ac9dbSNicholas Bellinger 			break;
295c66ac9dbSNicholas Bellinger 		case 1024:
296c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
297c66ac9dbSNicholas Bellinger 			break;
298c66ac9dbSNicholas Bellinger 		default:
299c66ac9dbSNicholas Bellinger 			break;
300c66ac9dbSNicholas Bellinger 		}
301c66ac9dbSNicholas Bellinger 		break;
302c66ac9dbSNicholas Bellinger 	default:
303c66ac9dbSNicholas Bellinger 		break;
304c66ac9dbSNicholas Bellinger 	}
305c66ac9dbSNicholas Bellinger 
306c66ac9dbSNicholas Bellinger 	return blocks_long;
307c66ac9dbSNicholas Bellinger }
308c66ac9dbSNicholas Bellinger 
3093a41d85fSNicholas Bellinger static void iblock_complete_cmd(struct se_cmd *cmd)
3103a41d85fSNicholas Bellinger {
3113a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3123a41d85fSNicholas Bellinger 	u8 status;
3133a41d85fSNicholas Bellinger 
3145981c245SElena Reshetova 	if (!refcount_dec_and_test(&ibr->pending))
3153a41d85fSNicholas Bellinger 		return;
3163a41d85fSNicholas Bellinger 
3173a41d85fSNicholas Bellinger 	if (atomic_read(&ibr->ib_bio_err_cnt))
3183a41d85fSNicholas Bellinger 		status = SAM_STAT_CHECK_CONDITION;
3193a41d85fSNicholas Bellinger 	else
3203a41d85fSNicholas Bellinger 		status = SAM_STAT_GOOD;
3213a41d85fSNicholas Bellinger 
3223a41d85fSNicholas Bellinger 	target_complete_cmd(cmd, status);
3233a41d85fSNicholas Bellinger 	kfree(ibr);
3243a41d85fSNicholas Bellinger }
3253a41d85fSNicholas Bellinger 
3264246a0b6SChristoph Hellwig static void iblock_bio_done(struct bio *bio)
3273a41d85fSNicholas Bellinger {
3283a41d85fSNicholas Bellinger 	struct se_cmd *cmd = bio->bi_private;
3293a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3303a41d85fSNicholas Bellinger 
3314e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
3324e4cbee9SChristoph Hellwig 		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
3333a41d85fSNicholas Bellinger 		/*
3343a41d85fSNicholas Bellinger 		 * Bump the ib_bio_err_cnt and release bio.
3353a41d85fSNicholas Bellinger 		 */
3363a41d85fSNicholas Bellinger 		atomic_inc(&ibr->ib_bio_err_cnt);
3374e857c58SPeter Zijlstra 		smp_mb__after_atomic();
3383a41d85fSNicholas Bellinger 	}
3393a41d85fSNicholas Bellinger 
3403a41d85fSNicholas Bellinger 	bio_put(bio);
3413a41d85fSNicholas Bellinger 
3423a41d85fSNicholas Bellinger 	iblock_complete_cmd(cmd);
3433a41d85fSNicholas Bellinger }
3443a41d85fSNicholas Bellinger 
345bc9e0e36SChaitanya Kulkarni static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
346bc9e0e36SChaitanya Kulkarni 				  unsigned int opf)
3473a41d85fSNicholas Bellinger {
3483a41d85fSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
3493a41d85fSNicholas Bellinger 	struct bio *bio;
3503a41d85fSNicholas Bellinger 
3513a41d85fSNicholas Bellinger 	/*
3523a41d85fSNicholas Bellinger 	 * Only allocate as many vector entries as the bio code allows us to,
3533a41d85fSNicholas Bellinger 	 * we'll loop later on until we have handled the whole request.
3543a41d85fSNicholas Bellinger 	 */
3555f7136dbSMatthew Wilcox (Oracle) 	bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
3565f7136dbSMatthew Wilcox (Oracle) 				&ib_dev->ibd_bio_set);
3573a41d85fSNicholas Bellinger 	if (!bio) {
3583a41d85fSNicholas Bellinger 		pr_err("Unable to allocate memory for bio\n");
3593a41d85fSNicholas Bellinger 		return NULL;
3603a41d85fSNicholas Bellinger 	}
3613a41d85fSNicholas Bellinger 
36274d46992SChristoph Hellwig 	bio_set_dev(bio, ib_dev->ibd_bd);
3633a41d85fSNicholas Bellinger 	bio->bi_private = cmd;
3643a41d85fSNicholas Bellinger 	bio->bi_end_io = &iblock_bio_done;
3654f024f37SKent Overstreet 	bio->bi_iter.bi_sector = lba;
366bc9e0e36SChaitanya Kulkarni 	bio->bi_opf = opf;
3673a41d85fSNicholas Bellinger 
3683a41d85fSNicholas Bellinger 	return bio;
3693a41d85fSNicholas Bellinger }
3703a41d85fSNicholas Bellinger 
3714e49ea4aSMike Christie static void iblock_submit_bios(struct bio_list *list)
3723a41d85fSNicholas Bellinger {
3733a41d85fSNicholas Bellinger 	struct blk_plug plug;
3743a41d85fSNicholas Bellinger 	struct bio *bio;
375415ccd98SMike Christie 	/*
376415ccd98SMike Christie 	 * The block layer handles nested plugs, so just plug/unplug to handle
377415ccd98SMike Christie 	 * fabric drivers that didn't support batching and multi bio cmds.
378415ccd98SMike Christie 	 */
3793a41d85fSNicholas Bellinger 	blk_start_plug(&plug);
3803a41d85fSNicholas Bellinger 	while ((bio = bio_list_pop(list)))
3814e49ea4aSMike Christie 		submit_bio(bio);
3823a41d85fSNicholas Bellinger 	blk_finish_plug(&plug);
3833a41d85fSNicholas Bellinger }
3843a41d85fSNicholas Bellinger 
3854246a0b6SChristoph Hellwig static void iblock_end_io_flush(struct bio *bio)
386df5fa691SChristoph Hellwig {
387df5fa691SChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
388df5fa691SChristoph Hellwig 
3894e4cbee9SChristoph Hellwig 	if (bio->bi_status)
3904e4cbee9SChristoph Hellwig 		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
391df5fa691SChristoph Hellwig 
3925787cacdSChristoph Hellwig 	if (cmd) {
3934e4cbee9SChristoph Hellwig 		if (bio->bi_status)
3945787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
395de103c93SChristoph Hellwig 		else
3965787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_GOOD);
3975787cacdSChristoph Hellwig 	}
3985787cacdSChristoph Hellwig 
399df5fa691SChristoph Hellwig 	bio_put(bio);
400df5fa691SChristoph Hellwig }
401df5fa691SChristoph Hellwig 
402c66ac9dbSNicholas Bellinger /*
403df5fa691SChristoph Hellwig  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
404df5fa691SChristoph Hellwig  * always flush the whole cache.
405c66ac9dbSNicholas Bellinger  */
406de103c93SChristoph Hellwig static sense_reason_t
407de103c93SChristoph Hellwig iblock_execute_sync_cache(struct se_cmd *cmd)
408c66ac9dbSNicholas Bellinger {
4090fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
410a1d8b49aSAndy Grover 	int immed = (cmd->t_task_cdb[1] & 0x2);
411df5fa691SChristoph Hellwig 	struct bio *bio;
412c66ac9dbSNicholas Bellinger 
413c66ac9dbSNicholas Bellinger 	/*
414c66ac9dbSNicholas Bellinger 	 * If the Immediate bit is set, queue up the GOOD response
415df5fa691SChristoph Hellwig 	 * for this SYNCHRONIZE_CACHE op.
416c66ac9dbSNicholas Bellinger 	 */
417c66ac9dbSNicholas Bellinger 	if (immed)
4185787cacdSChristoph Hellwig 		target_complete_cmd(cmd, SAM_STAT_GOOD);
419c66ac9dbSNicholas Bellinger 
420df5fa691SChristoph Hellwig 	bio = bio_alloc(GFP_KERNEL, 0);
421df5fa691SChristoph Hellwig 	bio->bi_end_io = iblock_end_io_flush;
42274d46992SChristoph Hellwig 	bio_set_dev(bio, ib_dev->ibd_bd);
42370fd7614SChristoph Hellwig 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
424c66ac9dbSNicholas Bellinger 	if (!immed)
425df5fa691SChristoph Hellwig 		bio->bi_private = cmd;
4264e49ea4aSMike Christie 	submit_bio(bio);
427ad67f0d9SChristoph Hellwig 	return 0;
428c66ac9dbSNicholas Bellinger }
429c66ac9dbSNicholas Bellinger 
430de103c93SChristoph Hellwig static sense_reason_t
43162e46942SChristoph Hellwig iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
432dbc21c5aSAsias He {
43362e46942SChristoph Hellwig 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
4348a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
435dbc21c5aSAsias He 	int ret;
436dbc21c5aSAsias He 
4378a9ebe71SMike Christie 	ret = blkdev_issue_discard(bdev,
4388a9ebe71SMike Christie 				   target_to_linux_sector(dev, lba),
4398a9ebe71SMike Christie 				   target_to_linux_sector(dev,  nolb),
4408a9ebe71SMike Christie 				   GFP_KERNEL, 0);
441dbc21c5aSAsias He 	if (ret < 0) {
442dbc21c5aSAsias He 		pr_err("blkdev_issue_discard() failed: %d\n", ret);
443dbc21c5aSAsias He 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
444dbc21c5aSAsias He 	}
445dbc21c5aSAsias He 
446dbc21c5aSAsias He 	return 0;
447dbc21c5aSAsias He }
448dbc21c5aSAsias He 
449dbc21c5aSAsias He static sense_reason_t
4502237498fSNicholas Bellinger iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
45107b63196SMike Christie {
45207b63196SMike Christie 	struct se_device *dev = cmd->se_dev;
45307b63196SMike Christie 	struct scatterlist *sg = &cmd->t_data_sg[0];
454f5957dadSBryant G Ly 	unsigned char *buf, *not_zero;
455f5957dadSBryant G Ly 	int ret;
45607b63196SMike Christie 
4572237498fSNicholas Bellinger 	buf = kmap(sg_page(sg)) + sg->offset;
4582237498fSNicholas Bellinger 	if (!buf)
4592237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4602237498fSNicholas Bellinger 	/*
4612237498fSNicholas Bellinger 	 * Fall back to block_execute_write_same() slow-path if
4622237498fSNicholas Bellinger 	 * incoming WRITE_SAME payload does not contain zeros.
4632237498fSNicholas Bellinger 	 */
464f5957dadSBryant G Ly 	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
4652237498fSNicholas Bellinger 	kunmap(sg_page(sg));
46607b63196SMike Christie 
467f5957dadSBryant G Ly 	if (not_zero)
4682237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4692237498fSNicholas Bellinger 
4702237498fSNicholas Bellinger 	ret = blkdev_issue_zeroout(bdev,
47107b63196SMike Christie 				target_to_linux_sector(dev, cmd->t_task_lba),
47207b63196SMike Christie 				target_to_linux_sector(dev,
47307b63196SMike Christie 					sbc_get_write_same_sectors(cmd)),
4741d2ff149SDavid Disseldorp 				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
47507b63196SMike Christie 	if (ret)
47607b63196SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
47707b63196SMike Christie 
47814b40c1eSHannes Reinecke 	target_complete_cmd(cmd, SAM_STAT_GOOD);
47907b63196SMike Christie 	return 0;
48007b63196SMike Christie }
48107b63196SMike Christie 
48207b63196SMike Christie static sense_reason_t
483f6970ad3SNicholas Bellinger iblock_execute_write_same(struct se_cmd *cmd)
484f6970ad3SNicholas Bellinger {
48507b63196SMike Christie 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
486f6970ad3SNicholas Bellinger 	struct iblock_req *ibr;
487f6970ad3SNicholas Bellinger 	struct scatterlist *sg;
488f6970ad3SNicholas Bellinger 	struct bio *bio;
489f6970ad3SNicholas Bellinger 	struct bio_list list;
4908a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
4918a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
4928a9ebe71SMike Christie 	sector_t sectors = target_to_linux_sector(dev,
4938a9ebe71SMike Christie 					sbc_get_write_same_sectors(cmd));
494f6970ad3SNicholas Bellinger 
495afd73f1bSNicholas Bellinger 	if (cmd->prot_op) {
496afd73f1bSNicholas Bellinger 		pr_err("WRITE_SAME: Protection information with IBLOCK"
497afd73f1bSNicholas Bellinger 		       " backends not supported\n");
498afd73f1bSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
499afd73f1bSNicholas Bellinger 	}
500f6970ad3SNicholas Bellinger 	sg = &cmd->t_data_sg[0];
501f6970ad3SNicholas Bellinger 
502f6970ad3SNicholas Bellinger 	if (cmd->t_data_nents > 1 ||
503f6970ad3SNicholas Bellinger 	    sg->length != cmd->se_dev->dev_attrib.block_size) {
504f6970ad3SNicholas Bellinger 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
505f6970ad3SNicholas Bellinger 			" block_size: %u\n", cmd->t_data_nents, sg->length,
506f6970ad3SNicholas Bellinger 			cmd->se_dev->dev_attrib.block_size);
507f6970ad3SNicholas Bellinger 		return TCM_INVALID_CDB_FIELD;
508f6970ad3SNicholas Bellinger 	}
509f6970ad3SNicholas Bellinger 
5102237498fSNicholas Bellinger 	if (bdev_write_zeroes_sectors(bdev)) {
5112237498fSNicholas Bellinger 		if (!iblock_execute_zero_out(bdev, cmd))
5122237498fSNicholas Bellinger 			return 0;
5132237498fSNicholas Bellinger 	}
51407b63196SMike Christie 
515f6970ad3SNicholas Bellinger 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
516f6970ad3SNicholas Bellinger 	if (!ibr)
517f6970ad3SNicholas Bellinger 		goto fail;
518f6970ad3SNicholas Bellinger 	cmd->priv = ibr;
519f6970ad3SNicholas Bellinger 
520bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
521f6970ad3SNicholas Bellinger 	if (!bio)
522f6970ad3SNicholas Bellinger 		goto fail_free_ibr;
523f6970ad3SNicholas Bellinger 
524f6970ad3SNicholas Bellinger 	bio_list_init(&list);
525f6970ad3SNicholas Bellinger 	bio_list_add(&list, bio);
526f6970ad3SNicholas Bellinger 
5275981c245SElena Reshetova 	refcount_set(&ibr->pending, 1);
528f6970ad3SNicholas Bellinger 
529f6970ad3SNicholas Bellinger 	while (sectors) {
530f6970ad3SNicholas Bellinger 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
531f6970ad3SNicholas Bellinger 				!= sg->length) {
532f6970ad3SNicholas Bellinger 
533bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
534f6970ad3SNicholas Bellinger 			if (!bio)
535f6970ad3SNicholas Bellinger 				goto fail_put_bios;
536f6970ad3SNicholas Bellinger 
5375981c245SElena Reshetova 			refcount_inc(&ibr->pending);
538f6970ad3SNicholas Bellinger 			bio_list_add(&list, bio);
539f6970ad3SNicholas Bellinger 		}
540f6970ad3SNicholas Bellinger 
541f6970ad3SNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
54280b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
5435676234fSRoman Bolshakov 		sectors -= sg->length >> SECTOR_SHIFT;
544f6970ad3SNicholas Bellinger 	}
545f6970ad3SNicholas Bellinger 
5464e49ea4aSMike Christie 	iblock_submit_bios(&list);
547f6970ad3SNicholas Bellinger 	return 0;
548f6970ad3SNicholas Bellinger 
549f6970ad3SNicholas Bellinger fail_put_bios:
550f6970ad3SNicholas Bellinger 	while ((bio = bio_list_pop(&list)))
551f6970ad3SNicholas Bellinger 		bio_put(bio);
552f6970ad3SNicholas Bellinger fail_free_ibr:
553f6970ad3SNicholas Bellinger 	kfree(ibr);
554f6970ad3SNicholas Bellinger fail:
555f6970ad3SNicholas Bellinger 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
556f6970ad3SNicholas Bellinger }
557f6970ad3SNicholas Bellinger 
558c66ac9dbSNicholas Bellinger enum {
55944bfd018SAndy Grover 	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
560c66ac9dbSNicholas Bellinger };
561c66ac9dbSNicholas Bellinger 
562c66ac9dbSNicholas Bellinger static match_table_t tokens = {
563c66ac9dbSNicholas Bellinger 	{Opt_udev_path, "udev_path=%s"},
56444bfd018SAndy Grover 	{Opt_readonly, "readonly=%d"},
565c66ac9dbSNicholas Bellinger 	{Opt_force, "force=%d"},
566c66ac9dbSNicholas Bellinger 	{Opt_err, NULL}
567c66ac9dbSNicholas Bellinger };
568c66ac9dbSNicholas Bellinger 
5690fd97ccfSChristoph Hellwig static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
570c66ac9dbSNicholas Bellinger 		const char *page, ssize_t count)
571c66ac9dbSNicholas Bellinger {
5720fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
5736d180253SJesper Juhl 	char *orig, *ptr, *arg_p, *opts;
574c66ac9dbSNicholas Bellinger 	substring_t args[MAX_OPT_ARGS];
57521bca31cSRoland Dreier 	int ret = 0, token;
57644bfd018SAndy Grover 	unsigned long tmp_readonly;
577c66ac9dbSNicholas Bellinger 
578c66ac9dbSNicholas Bellinger 	opts = kstrdup(page, GFP_KERNEL);
579c66ac9dbSNicholas Bellinger 	if (!opts)
580c66ac9dbSNicholas Bellinger 		return -ENOMEM;
581c66ac9dbSNicholas Bellinger 
582c66ac9dbSNicholas Bellinger 	orig = opts;
583c66ac9dbSNicholas Bellinger 
58490c161b6SSebastian Andrzej Siewior 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
585c66ac9dbSNicholas Bellinger 		if (!*ptr)
586c66ac9dbSNicholas Bellinger 			continue;
587c66ac9dbSNicholas Bellinger 
588c66ac9dbSNicholas Bellinger 		token = match_token(ptr, tokens, args);
589c66ac9dbSNicholas Bellinger 		switch (token) {
590c66ac9dbSNicholas Bellinger 		case Opt_udev_path:
591c66ac9dbSNicholas Bellinger 			if (ib_dev->ibd_bd) {
5926708bb27SAndy Grover 				pr_err("Unable to set udev_path= while"
593c66ac9dbSNicholas Bellinger 					" ib_dev->ibd_bd exists\n");
594c66ac9dbSNicholas Bellinger 				ret = -EEXIST;
595c66ac9dbSNicholas Bellinger 				goto out;
596c66ac9dbSNicholas Bellinger 			}
597852b6ed1SNicholas Bellinger 			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
598852b6ed1SNicholas Bellinger 				SE_UDEV_PATH_LEN) == 0) {
599852b6ed1SNicholas Bellinger 				ret = -EINVAL;
6006d180253SJesper Juhl 				break;
6016d180253SJesper Juhl 			}
6026708bb27SAndy Grover 			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
603c66ac9dbSNicholas Bellinger 					ib_dev->ibd_udev_path);
604c66ac9dbSNicholas Bellinger 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
605c66ac9dbSNicholas Bellinger 			break;
60644bfd018SAndy Grover 		case Opt_readonly:
60744bfd018SAndy Grover 			arg_p = match_strdup(&args[0]);
60844bfd018SAndy Grover 			if (!arg_p) {
60944bfd018SAndy Grover 				ret = -ENOMEM;
61044bfd018SAndy Grover 				break;
61144bfd018SAndy Grover 			}
61257103d7fSJingoo Han 			ret = kstrtoul(arg_p, 0, &tmp_readonly);
61344bfd018SAndy Grover 			kfree(arg_p);
61444bfd018SAndy Grover 			if (ret < 0) {
61557103d7fSJingoo Han 				pr_err("kstrtoul() failed for"
61644bfd018SAndy Grover 						" readonly=\n");
61744bfd018SAndy Grover 				goto out;
61844bfd018SAndy Grover 			}
61944bfd018SAndy Grover 			ib_dev->ibd_readonly = tmp_readonly;
62044bfd018SAndy Grover 			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
62144bfd018SAndy Grover 			break;
622c66ac9dbSNicholas Bellinger 		case Opt_force:
623c66ac9dbSNicholas Bellinger 			break;
624c66ac9dbSNicholas Bellinger 		default:
625c66ac9dbSNicholas Bellinger 			break;
626c66ac9dbSNicholas Bellinger 		}
627c66ac9dbSNicholas Bellinger 	}
628c66ac9dbSNicholas Bellinger 
629c66ac9dbSNicholas Bellinger out:
630c66ac9dbSNicholas Bellinger 	kfree(orig);
631c66ac9dbSNicholas Bellinger 	return (!ret) ? count : ret;
632c66ac9dbSNicholas Bellinger }
633c66ac9dbSNicholas Bellinger 
6340fd97ccfSChristoph Hellwig static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
635c66ac9dbSNicholas Bellinger {
6360fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
6370fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
638c66ac9dbSNicholas Bellinger 	char buf[BDEVNAME_SIZE];
639c66ac9dbSNicholas Bellinger 	ssize_t bl = 0;
640c66ac9dbSNicholas Bellinger 
641c66ac9dbSNicholas Bellinger 	if (bd)
642c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "iBlock device: %s",
643c66ac9dbSNicholas Bellinger 				bdevname(bd, buf));
6440fd97ccfSChristoph Hellwig 	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
64544bfd018SAndy Grover 		bl += sprintf(b + bl, "  UDEV PATH: %s",
6460fd97ccfSChristoph Hellwig 				ib_dev->ibd_udev_path);
6470fd97ccfSChristoph Hellwig 	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
648c66ac9dbSNicholas Bellinger 
649c66ac9dbSNicholas Bellinger 	bl += sprintf(b + bl, "        ");
650c66ac9dbSNicholas Bellinger 	if (bd) {
651c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
65257ba1059SChristoph Hellwig 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
65357ba1059SChristoph Hellwig 			"CLAIMED: IBLOCK");
654c66ac9dbSNicholas Bellinger 	} else {
65521bca31cSRoland Dreier 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
656c66ac9dbSNicholas Bellinger 	}
657c66ac9dbSNicholas Bellinger 
658c66ac9dbSNicholas Bellinger 	return bl;
659c66ac9dbSNicholas Bellinger }
660c66ac9dbSNicholas Bellinger 
661ecebbf6cSNicholas Bellinger static int
662fed564f6SGreg Edwards iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
663fed564f6SGreg Edwards 		 struct sg_mapping_iter *miter)
664ecebbf6cSNicholas Bellinger {
665ecebbf6cSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
666ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
667ecebbf6cSNicholas Bellinger 	struct bio_integrity_payload *bip;
668ecebbf6cSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
669fed564f6SGreg Edwards 	int rc;
670fed564f6SGreg Edwards 	size_t resid, len;
671ecebbf6cSNicholas Bellinger 
672ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(ib_dev->ibd_bd);
673ecebbf6cSNicholas Bellinger 	if (!bi) {
674ecebbf6cSNicholas Bellinger 		pr_err("Unable to locate bio_integrity\n");
675ecebbf6cSNicholas Bellinger 		return -ENODEV;
676ecebbf6cSNicholas Bellinger 	}
677ecebbf6cSNicholas Bellinger 
6785f7136dbSMatthew Wilcox (Oracle) 	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
67906c1e390SKeith Busch 	if (IS_ERR(bip)) {
680ecebbf6cSNicholas Bellinger 		pr_err("Unable to allocate bio_integrity_payload\n");
68106c1e390SKeith Busch 		return PTR_ERR(bip);
682ecebbf6cSNicholas Bellinger 	}
683ecebbf6cSNicholas Bellinger 
684fed564f6SGreg Edwards 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
685e4dc9a4cSIsrael Rukshin 	/* virtual start sector must be in integrity interval units */
686e4dc9a4cSIsrael Rukshin 	bip_set_seed(bip, bio->bi_iter.bi_sector >>
687e4dc9a4cSIsrael Rukshin 				  (bi->interval_exp - SECTOR_SHIFT));
688ecebbf6cSNicholas Bellinger 
6894e13c5d0SLinus Torvalds 	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
6904e13c5d0SLinus Torvalds 		 (unsigned long long)bip->bip_iter.bi_sector);
691ecebbf6cSNicholas Bellinger 
692fed564f6SGreg Edwards 	resid = bip->bip_iter.bi_size;
693fed564f6SGreg Edwards 	while (resid > 0 && sg_miter_next(miter)) {
694ecebbf6cSNicholas Bellinger 
695fed564f6SGreg Edwards 		len = min_t(size_t, miter->length, resid);
696fed564f6SGreg Edwards 		rc = bio_integrity_add_page(bio, miter->page, len,
697fed564f6SGreg Edwards 					    offset_in_page(miter->addr));
698fed564f6SGreg Edwards 		if (rc != len) {
699ecebbf6cSNicholas Bellinger 			pr_err("bio_integrity_add_page() failed; %d\n", rc);
700fed564f6SGreg Edwards 			sg_miter_stop(miter);
701ecebbf6cSNicholas Bellinger 			return -ENOMEM;
702ecebbf6cSNicholas Bellinger 		}
703ecebbf6cSNicholas Bellinger 
704fed564f6SGreg Edwards 		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
705fed564f6SGreg Edwards 			  miter->page, len, offset_in_page(miter->addr));
706fed564f6SGreg Edwards 
707fed564f6SGreg Edwards 		resid -= len;
708fed564f6SGreg Edwards 		if (len < miter->length)
709fed564f6SGreg Edwards 			miter->consumed -= miter->length - len;
710ecebbf6cSNicholas Bellinger 	}
711fed564f6SGreg Edwards 	sg_miter_stop(miter);
712ecebbf6cSNicholas Bellinger 
713ecebbf6cSNicholas Bellinger 	return 0;
714ecebbf6cSNicholas Bellinger }
715ecebbf6cSNicholas Bellinger 
716de103c93SChristoph Hellwig static sense_reason_t
717a82a9538SNicholas Bellinger iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
718a82a9538SNicholas Bellinger 		  enum dma_data_direction data_direction)
719c66ac9dbSNicholas Bellinger {
7205951146dSAndy Grover 	struct se_device *dev = cmd->se_dev;
7218a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
7225787cacdSChristoph Hellwig 	struct iblock_req *ibr;
723fed564f6SGreg Edwards 	struct bio *bio;
724dbbf3e94SChristoph Hellwig 	struct bio_list list;
725c66ac9dbSNicholas Bellinger 	struct scatterlist *sg;
7265787cacdSChristoph Hellwig 	u32 sg_num = sgl_nents;
727bc9e0e36SChaitanya Kulkarni 	unsigned int opf;
728d5b4a21bSChristoph Hellwig 	unsigned bio_cnt;
729bc9e0e36SChaitanya Kulkarni 	int i, rc;
730fed564f6SGreg Edwards 	struct sg_mapping_iter prot_miter;
731bc9e0e36SChaitanya Kulkarni 	unsigned int miter_dir;
732dbbf3e94SChristoph Hellwig 
7335787cacdSChristoph Hellwig 	if (data_direction == DMA_TO_DEVICE) {
734d0c8b259SNicholas Bellinger 		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
735d0c8b259SNicholas Bellinger 		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
736dbbf3e94SChristoph Hellwig 		/*
73770fd7614SChristoph Hellwig 		 * Force writethrough using REQ_FUA if a volatile write cache
738d0c8b259SNicholas Bellinger 		 * is not enabled, or if initiator set the Force Unit Access bit.
739dbbf3e94SChristoph Hellwig 		 */
740bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_WRITE;
741bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_TO_SG;
742c888a8f9SJens Axboe 		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
743d0c8b259SNicholas Bellinger 			if (cmd->se_cmd_flags & SCF_FUA)
744bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
745c888a8f9SJens Axboe 			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
746bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
747d0c8b259SNicholas Bellinger 		}
748dbbf3e94SChristoph Hellwig 	} else {
749bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_READ;
750bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_FROM_SG;
751dbbf3e94SChristoph Hellwig 	}
752dbbf3e94SChristoph Hellwig 
7535787cacdSChristoph Hellwig 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
7545787cacdSChristoph Hellwig 	if (!ibr)
7555787cacdSChristoph Hellwig 		goto fail;
7565787cacdSChristoph Hellwig 	cmd->priv = ibr;
7575787cacdSChristoph Hellwig 
758e0de4457SPaolo Bonzini 	if (!sgl_nents) {
7595981c245SElena Reshetova 		refcount_set(&ibr->pending, 1);
760e0de4457SPaolo Bonzini 		iblock_complete_cmd(cmd);
761e0de4457SPaolo Bonzini 		return 0;
762e0de4457SPaolo Bonzini 	}
763e0de4457SPaolo Bonzini 
764bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
7655787cacdSChristoph Hellwig 	if (!bio)
7665787cacdSChristoph Hellwig 		goto fail_free_ibr;
767c66ac9dbSNicholas Bellinger 
768dbbf3e94SChristoph Hellwig 	bio_list_init(&list);
769dbbf3e94SChristoph Hellwig 	bio_list_add(&list, bio);
7705787cacdSChristoph Hellwig 
7715981c245SElena Reshetova 	refcount_set(&ibr->pending, 2);
772d5b4a21bSChristoph Hellwig 	bio_cnt = 1;
773dbbf3e94SChristoph Hellwig 
774fed564f6SGreg Edwards 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
775fed564f6SGreg Edwards 		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
776bc9e0e36SChaitanya Kulkarni 			       miter_dir);
777fed564f6SGreg Edwards 
7785787cacdSChristoph Hellwig 	for_each_sg(sgl, sg, sgl_nents, i) {
779dbbf3e94SChristoph Hellwig 		/*
780dbbf3e94SChristoph Hellwig 		 * XXX: if the length the device accepts is shorter than the
781dbbf3e94SChristoph Hellwig 		 *	length of the S/G list entry this will cause and
782dbbf3e94SChristoph Hellwig 		 *	endless loop.  Better hope no driver uses huge pages.
783dbbf3e94SChristoph Hellwig 		 */
784dbbf3e94SChristoph Hellwig 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
785dbbf3e94SChristoph Hellwig 				!= sg->length) {
786fed564f6SGreg Edwards 			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
787fed564f6SGreg Edwards 				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
788fed564f6SGreg Edwards 				if (rc)
789fed564f6SGreg Edwards 					goto fail_put_bios;
790fed564f6SGreg Edwards 			}
791fed564f6SGreg Edwards 
792d5b4a21bSChristoph Hellwig 			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
7934e49ea4aSMike Christie 				iblock_submit_bios(&list);
794d5b4a21bSChristoph Hellwig 				bio_cnt = 0;
795d5b4a21bSChristoph Hellwig 			}
796d5b4a21bSChristoph Hellwig 
797bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
7986708bb27SAndy Grover 			if (!bio)
7995787cacdSChristoph Hellwig 				goto fail_put_bios;
8005787cacdSChristoph Hellwig 
8015981c245SElena Reshetova 			refcount_inc(&ibr->pending);
802dbbf3e94SChristoph Hellwig 			bio_list_add(&list, bio);
803d5b4a21bSChristoph Hellwig 			bio_cnt++;
804c66ac9dbSNicholas Bellinger 		}
805dbbf3e94SChristoph Hellwig 
806c66ac9dbSNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
80780b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
808c66ac9dbSNicholas Bellinger 		sg_num--;
809c66ac9dbSNicholas Bellinger 	}
810c66ac9dbSNicholas Bellinger 
8116f16ec43SNicholas Bellinger 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
812fed564f6SGreg Edwards 		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
813ecebbf6cSNicholas Bellinger 		if (rc)
814ecebbf6cSNicholas Bellinger 			goto fail_put_bios;
815ecebbf6cSNicholas Bellinger 	}
816ecebbf6cSNicholas Bellinger 
8174e49ea4aSMike Christie 	iblock_submit_bios(&list);
8185787cacdSChristoph Hellwig 	iblock_complete_cmd(cmd);
81903e98c9eSNicholas Bellinger 	return 0;
820dbbf3e94SChristoph Hellwig 
8215787cacdSChristoph Hellwig fail_put_bios:
822dbbf3e94SChristoph Hellwig 	while ((bio = bio_list_pop(&list)))
823c66ac9dbSNicholas Bellinger 		bio_put(bio);
8245787cacdSChristoph Hellwig fail_free_ibr:
8255787cacdSChristoph Hellwig 	kfree(ibr);
8265787cacdSChristoph Hellwig fail:
827de103c93SChristoph Hellwig 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
828c66ac9dbSNicholas Bellinger }
829c66ac9dbSNicholas Bellinger 
830c66ac9dbSNicholas Bellinger static sector_t iblock_get_blocks(struct se_device *dev)
831c66ac9dbSNicholas Bellinger {
8320fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8330fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
834c66ac9dbSNicholas Bellinger 	struct request_queue *q = bdev_get_queue(bd);
835c66ac9dbSNicholas Bellinger 
836c66ac9dbSNicholas Bellinger 	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
837c66ac9dbSNicholas Bellinger }
838c66ac9dbSNicholas Bellinger 
8397f7caf6aSAndy Grover static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
8407f7caf6aSAndy Grover {
8417f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8427f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8437f7caf6aSAndy Grover 	int ret;
8447f7caf6aSAndy Grover 
8457f7caf6aSAndy Grover 	ret = bdev_alignment_offset(bd);
8467f7caf6aSAndy Grover 	if (ret == -1)
8477f7caf6aSAndy Grover 		return 0;
8487f7caf6aSAndy Grover 
8497f7caf6aSAndy Grover 	/* convert offset-bytes to offset-lbas */
8507f7caf6aSAndy Grover 	return ret / bdev_logical_block_size(bd);
8517f7caf6aSAndy Grover }
8527f7caf6aSAndy Grover 
8537f7caf6aSAndy Grover static unsigned int iblock_get_lbppbe(struct se_device *dev)
8547f7caf6aSAndy Grover {
8557f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8567f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
857c151eddbSChaitanya Kulkarni 	unsigned int logs_per_phys =
858a2c6c6a3SChaitanya Kulkarni 		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
8597f7caf6aSAndy Grover 
8607f7caf6aSAndy Grover 	return ilog2(logs_per_phys);
8617f7caf6aSAndy Grover }
8627f7caf6aSAndy Grover 
8637f7caf6aSAndy Grover static unsigned int iblock_get_io_min(struct se_device *dev)
8647f7caf6aSAndy Grover {
8657f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8667f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8677f7caf6aSAndy Grover 
8687f7caf6aSAndy Grover 	return bdev_io_min(bd);
8697f7caf6aSAndy Grover }
8707f7caf6aSAndy Grover 
8717f7caf6aSAndy Grover static unsigned int iblock_get_io_opt(struct se_device *dev)
8727f7caf6aSAndy Grover {
8737f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8747f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8757f7caf6aSAndy Grover 
8767f7caf6aSAndy Grover 	return bdev_io_opt(bd);
8777f7caf6aSAndy Grover }
8787f7caf6aSAndy Grover 
8799e999a6cSChristoph Hellwig static struct sbc_ops iblock_sbc_ops = {
8800c2ad7d1SChristoph Hellwig 	.execute_rw		= iblock_execute_rw,
881ad67f0d9SChristoph Hellwig 	.execute_sync_cache	= iblock_execute_sync_cache,
8826f974e8cSChristoph Hellwig 	.execute_write_same	= iblock_execute_write_same,
88314150a6bSChristoph Hellwig 	.execute_unmap		= iblock_execute_unmap,
8840c2ad7d1SChristoph Hellwig };
8850c2ad7d1SChristoph Hellwig 
886de103c93SChristoph Hellwig static sense_reason_t
887de103c93SChristoph Hellwig iblock_parse_cdb(struct se_cmd *cmd)
8880c2ad7d1SChristoph Hellwig {
8899e999a6cSChristoph Hellwig 	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
8900c2ad7d1SChristoph Hellwig }
8910c2ad7d1SChristoph Hellwig 
892452e2010SRashika Kheria static bool iblock_get_write_cache(struct se_device *dev)
893d0c8b259SNicholas Bellinger {
894d0c8b259SNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
895d0c8b259SNicholas Bellinger 	struct block_device *bd = ib_dev->ibd_bd;
896d0c8b259SNicholas Bellinger 	struct request_queue *q = bdev_get_queue(bd);
897d0c8b259SNicholas Bellinger 
898c888a8f9SJens Axboe 	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
899d0c8b259SNicholas Bellinger }
900d0c8b259SNicholas Bellinger 
9010a06d430SChristoph Hellwig static const struct target_backend_ops iblock_ops = {
902c66ac9dbSNicholas Bellinger 	.name			= "iblock",
9030fd97ccfSChristoph Hellwig 	.inquiry_prod		= "IBLOCK",
9040fd97ccfSChristoph Hellwig 	.inquiry_rev		= IBLOCK_VERSION,
905c66ac9dbSNicholas Bellinger 	.owner			= THIS_MODULE,
906c66ac9dbSNicholas Bellinger 	.attach_hba		= iblock_attach_hba,
907c66ac9dbSNicholas Bellinger 	.detach_hba		= iblock_detach_hba,
9080fd97ccfSChristoph Hellwig 	.alloc_device		= iblock_alloc_device,
9090fd97ccfSChristoph Hellwig 	.configure_device	= iblock_configure_device,
91092634706SMike Christie 	.destroy_device		= iblock_destroy_device,
911c66ac9dbSNicholas Bellinger 	.free_device		= iblock_free_device,
912415ccd98SMike Christie 	.plug_device		= iblock_plug_device,
913415ccd98SMike Christie 	.unplug_device		= iblock_unplug_device,
9140c2ad7d1SChristoph Hellwig 	.parse_cdb		= iblock_parse_cdb,
915c66ac9dbSNicholas Bellinger 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
916c66ac9dbSNicholas Bellinger 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
9176f23ac8aSChristoph Hellwig 	.get_device_type	= sbc_get_device_type,
918c66ac9dbSNicholas Bellinger 	.get_blocks		= iblock_get_blocks,
9197f7caf6aSAndy Grover 	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
9207f7caf6aSAndy Grover 	.get_lbppbe		= iblock_get_lbppbe,
9217f7caf6aSAndy Grover 	.get_io_min		= iblock_get_io_min,
9227f7caf6aSAndy Grover 	.get_io_opt		= iblock_get_io_opt,
923d0c8b259SNicholas Bellinger 	.get_write_cache	= iblock_get_write_cache,
9245873c4d1SChristoph Hellwig 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
925c66ac9dbSNicholas Bellinger };
926c66ac9dbSNicholas Bellinger 
927c66ac9dbSNicholas Bellinger static int __init iblock_module_init(void)
928c66ac9dbSNicholas Bellinger {
9290a06d430SChristoph Hellwig 	return transport_backend_register(&iblock_ops);
930c66ac9dbSNicholas Bellinger }
931c66ac9dbSNicholas Bellinger 
93263b91d5aSAsias He static void __exit iblock_module_exit(void)
933c66ac9dbSNicholas Bellinger {
9340a06d430SChristoph Hellwig 	target_backend_unregister(&iblock_ops);
935c66ac9dbSNicholas Bellinger }
936c66ac9dbSNicholas Bellinger 
937c66ac9dbSNicholas Bellinger MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
938c66ac9dbSNicholas Bellinger MODULE_AUTHOR("nab@Linux-iSCSI.org");
939c66ac9dbSNicholas Bellinger MODULE_LICENSE("GPL");
940c66ac9dbSNicholas Bellinger 
941c66ac9dbSNicholas Bellinger module_init(iblock_module_init);
942c66ac9dbSNicholas Bellinger module_exit(iblock_module_exit);
943