11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c66ac9dbSNicholas Bellinger /*******************************************************************************
3c66ac9dbSNicholas Bellinger  * Filename:  target_core_iblock.c
4c66ac9dbSNicholas Bellinger  *
5c66ac9dbSNicholas Bellinger  * This file contains the Storage Engine  <-> Linux BlockIO transport
6c66ac9dbSNicholas Bellinger  * specific functions.
7c66ac9dbSNicholas Bellinger  *
84c76251eSNicholas Bellinger  * (c) Copyright 2003-2013 Datera, Inc.
9c66ac9dbSNicholas Bellinger  *
10c66ac9dbSNicholas Bellinger  * Nicholas A. Bellinger <nab@kernel.org>
11c66ac9dbSNicholas Bellinger  *
12c66ac9dbSNicholas Bellinger  ******************************************************************************/
13c66ac9dbSNicholas Bellinger 
14c66ac9dbSNicholas Bellinger #include <linux/string.h>
15c66ac9dbSNicholas Bellinger #include <linux/parser.h>
16c66ac9dbSNicholas Bellinger #include <linux/timer.h>
17c66ac9dbSNicholas Bellinger #include <linux/fs.h>
18c66ac9dbSNicholas Bellinger #include <linux/blkdev.h>
19c66ac9dbSNicholas Bellinger #include <linux/slab.h>
20c66ac9dbSNicholas Bellinger #include <linux/spinlock.h>
21c66ac9dbSNicholas Bellinger #include <linux/bio.h>
22c66ac9dbSNicholas Bellinger #include <linux/genhd.h>
23c66ac9dbSNicholas Bellinger #include <linux/file.h>
24827509e3SPaul Gortmaker #include <linux/module.h>
25ba929992SBart Van Assche #include <scsi/scsi_proto.h>
2614150a6bSChristoph Hellwig #include <asm/unaligned.h>
27c66ac9dbSNicholas Bellinger 
28c66ac9dbSNicholas Bellinger #include <target/target_core_base.h>
29c4795fb2SChristoph Hellwig #include <target/target_core_backend.h>
30c66ac9dbSNicholas Bellinger 
31c66ac9dbSNicholas Bellinger #include "target_core_iblock.h"
32c66ac9dbSNicholas Bellinger 
33d5b4a21bSChristoph Hellwig #define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
34d5b4a21bSChristoph Hellwig #define IBLOCK_BIO_POOL_SIZE	128
35d5b4a21bSChristoph Hellwig 
360fd97ccfSChristoph Hellwig static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
370fd97ccfSChristoph Hellwig {
380fd97ccfSChristoph Hellwig 	return container_of(dev, struct iblock_dev, dev);
390fd97ccfSChristoph Hellwig }
400fd97ccfSChristoph Hellwig 
410fd97ccfSChristoph Hellwig 
42c66ac9dbSNicholas Bellinger static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
43c66ac9dbSNicholas Bellinger {
446708bb27SAndy Grover 	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
45c66ac9dbSNicholas Bellinger 		" Generic Target Core Stack %s\n", hba->hba_id,
46ce8dd25dSChristoph Hellwig 		IBLOCK_VERSION, TARGET_CORE_VERSION);
47c66ac9dbSNicholas Bellinger 	return 0;
48c66ac9dbSNicholas Bellinger }
49c66ac9dbSNicholas Bellinger 
50c66ac9dbSNicholas Bellinger static void iblock_detach_hba(struct se_hba *hba)
51c66ac9dbSNicholas Bellinger {
52c66ac9dbSNicholas Bellinger }
53c66ac9dbSNicholas Bellinger 
540fd97ccfSChristoph Hellwig static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
55c66ac9dbSNicholas Bellinger {
56c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = NULL;
57c66ac9dbSNicholas Bellinger 
58c66ac9dbSNicholas Bellinger 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
596708bb27SAndy Grover 	if (!ib_dev) {
606708bb27SAndy Grover 		pr_err("Unable to allocate struct iblock_dev\n");
61c66ac9dbSNicholas Bellinger 		return NULL;
62c66ac9dbSNicholas Bellinger 	}
63c66ac9dbSNicholas Bellinger 
64415ccd98SMike Christie 	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
65415ccd98SMike Christie 				   GFP_KERNEL);
66415ccd98SMike Christie 	if (!ib_dev->ibd_plug)
67415ccd98SMike Christie 		goto free_dev;
68415ccd98SMike Christie 
696708bb27SAndy Grover 	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
70c66ac9dbSNicholas Bellinger 
710fd97ccfSChristoph Hellwig 	return &ib_dev->dev;
72415ccd98SMike Christie 
73415ccd98SMike Christie free_dev:
74415ccd98SMike Christie 	kfree(ib_dev);
75415ccd98SMike Christie 	return NULL;
76c66ac9dbSNicholas Bellinger }
77c66ac9dbSNicholas Bellinger 
780fd97ccfSChristoph Hellwig static int iblock_configure_device(struct se_device *dev)
79c66ac9dbSNicholas Bellinger {
800fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
81c66ac9dbSNicholas Bellinger 	struct request_queue *q;
820fd97ccfSChristoph Hellwig 	struct block_device *bd = NULL;
83ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
8444bfd018SAndy Grover 	fmode_t mode;
852237498fSNicholas Bellinger 	unsigned int max_write_zeroes_sectors;
868f13142aSColin Ian King 	int ret;
87c66ac9dbSNicholas Bellinger 
880fd97ccfSChristoph Hellwig 	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
890fd97ccfSChristoph Hellwig 		pr_err("Missing udev_path= parameters for IBLOCK\n");
900fd97ccfSChristoph Hellwig 		return -EINVAL;
91c66ac9dbSNicholas Bellinger 	}
92d5b4a21bSChristoph Hellwig 
93a47a28b7SKent Overstreet 	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
94a47a28b7SKent Overstreet 	if (ret) {
950fd97ccfSChristoph Hellwig 		pr_err("IBLOCK: Unable to create bioset\n");
960fd97ccfSChristoph Hellwig 		goto out;
97c66ac9dbSNicholas Bellinger 	}
980fd97ccfSChristoph Hellwig 
996708bb27SAndy Grover 	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
100c66ac9dbSNicholas Bellinger 			ib_dev->ibd_udev_path);
101c66ac9dbSNicholas Bellinger 
10244bfd018SAndy Grover 	mode = FMODE_READ|FMODE_EXCL;
10344bfd018SAndy Grover 	if (!ib_dev->ibd_readonly)
10444bfd018SAndy Grover 		mode |= FMODE_WRITE;
105eeeb9522SNicholas Bellinger 	else
106eeeb9522SNicholas Bellinger 		dev->dev_flags |= DF_READ_ONLY;
10744bfd018SAndy Grover 
10844bfd018SAndy Grover 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
109613640e4SNicholas Bellinger 	if (IS_ERR(bd)) {
110613640e4SNicholas Bellinger 		ret = PTR_ERR(bd);
1110fd97ccfSChristoph Hellwig 		goto out_free_bioset;
112613640e4SNicholas Bellinger 	}
113c66ac9dbSNicholas Bellinger 	ib_dev->ibd_bd = bd;
114c66ac9dbSNicholas Bellinger 
1150fd97ccfSChristoph Hellwig 	q = bdev_get_queue(bd);
1160fd97ccfSChristoph Hellwig 
1170fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
118046ba642SNicholas Bellinger 	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
1190fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
120c66ac9dbSNicholas Bellinger 
121ea263c7fSMike Christie 	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
1226708bb27SAndy Grover 		pr_debug("IBLOCK: BLOCK Discard support available,"
123c66ac9dbSNicholas Bellinger 			 " disabled by default\n");
1248a9ebe71SMike Christie 
125f6970ad3SNicholas Bellinger 	/*
126f6970ad3SNicholas Bellinger 	 * Enable write same emulation for IBLOCK and use 0xFFFF as
127f6970ad3SNicholas Bellinger 	 * the smaller WRITE_SAME(10) only has a two-byte block count.
128f6970ad3SNicholas Bellinger 	 */
1292237498fSNicholas Bellinger 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
1302237498fSNicholas Bellinger 	if (max_write_zeroes_sectors)
1312237498fSNicholas Bellinger 		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
1322237498fSNicholas Bellinger 	else
133f6970ad3SNicholas Bellinger 		dev->dev_attrib.max_write_same_len = 0xFFFF;
134c66ac9dbSNicholas Bellinger 
135e22a7f07SRoland Dreier 	if (blk_queue_nonrot(q))
1360fd97ccfSChristoph Hellwig 		dev->dev_attrib.is_nonrot = 1;
137d0c8b259SNicholas Bellinger 
138ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(bd);
139ecebbf6cSNicholas Bellinger 	if (bi) {
140a47a28b7SKent Overstreet 		struct bio_set *bs = &ib_dev->ibd_bio_set;
141ecebbf6cSNicholas Bellinger 
1420f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
1430f8087ecSMartin K. Petersen 		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
144ecebbf6cSNicholas Bellinger 			pr_err("IBLOCK export of blk_integrity: %s not"
1450f8087ecSMartin K. Petersen 			       " supported\n", bi->profile->name);
146ecebbf6cSNicholas Bellinger 			ret = -ENOSYS;
147ecebbf6cSNicholas Bellinger 			goto out_blkdev_put;
148ecebbf6cSNicholas Bellinger 		}
149ecebbf6cSNicholas Bellinger 
1500f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
151ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
1520f8087ecSMartin K. Petersen 		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
153ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
154ecebbf6cSNicholas Bellinger 		}
155ecebbf6cSNicholas Bellinger 
156ecebbf6cSNicholas Bellinger 		if (dev->dev_attrib.pi_prot_type) {
157ecebbf6cSNicholas Bellinger 			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
158ecebbf6cSNicholas Bellinger 				pr_err("Unable to allocate bioset for PI\n");
159ecebbf6cSNicholas Bellinger 				ret = -ENOMEM;
160ecebbf6cSNicholas Bellinger 				goto out_blkdev_put;
161ecebbf6cSNicholas Bellinger 			}
162ecebbf6cSNicholas Bellinger 			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
163f4f8154aSKent Overstreet 				 &bs->bio_integrity_pool);
164ecebbf6cSNicholas Bellinger 		}
165ecebbf6cSNicholas Bellinger 		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
166ecebbf6cSNicholas Bellinger 	}
167ecebbf6cSNicholas Bellinger 
1680fd97ccfSChristoph Hellwig 	return 0;
169e22a7f07SRoland Dreier 
170ecebbf6cSNicholas Bellinger out_blkdev_put:
171ecebbf6cSNicholas Bellinger 	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
1720fd97ccfSChristoph Hellwig out_free_bioset:
173a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
1740fd97ccfSChristoph Hellwig out:
1750fd97ccfSChristoph Hellwig 	return ret;
176c66ac9dbSNicholas Bellinger }
177c66ac9dbSNicholas Bellinger 
1784cc987eaSNicholas Bellinger static void iblock_dev_call_rcu(struct rcu_head *p)
1794cc987eaSNicholas Bellinger {
1804cc987eaSNicholas Bellinger 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1814cc987eaSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1824cc987eaSNicholas Bellinger 
183415ccd98SMike Christie 	kfree(ib_dev->ibd_plug);
1844cc987eaSNicholas Bellinger 	kfree(ib_dev);
1854cc987eaSNicholas Bellinger }
1864cc987eaSNicholas Bellinger 
1870fd97ccfSChristoph Hellwig static void iblock_free_device(struct se_device *dev)
188c66ac9dbSNicholas Bellinger {
18992634706SMike Christie 	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
19092634706SMike Christie }
19192634706SMike Christie 
19292634706SMike Christie static void iblock_destroy_device(struct se_device *dev)
19392634706SMike Christie {
1940fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
195c66ac9dbSNicholas Bellinger 
196bc665524SNicholas Bellinger 	if (ib_dev->ibd_bd != NULL)
197c66ac9dbSNicholas Bellinger 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
198a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
199c66ac9dbSNicholas Bellinger }
200c66ac9dbSNicholas Bellinger 
201415ccd98SMike Christie static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
202415ccd98SMike Christie {
203415ccd98SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
204415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug;
205415ccd98SMike Christie 
206415ccd98SMike Christie 	/*
2075aaeca25SMike Christie 	 * Each se_device has a per cpu work this can be run from. We
208415ccd98SMike Christie 	 * shouldn't have multiple threads on the same cpu calling this
209415ccd98SMike Christie 	 * at the same time.
210415ccd98SMike Christie 	 */
2115aaeca25SMike Christie 	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
212415ccd98SMike Christie 	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
213415ccd98SMike Christie 		return NULL;
214415ccd98SMike Christie 
215415ccd98SMike Christie 	blk_start_plug(&ib_dev_plug->blk_plug);
216415ccd98SMike Christie 	return &ib_dev_plug->se_plug;
217415ccd98SMike Christie }
218415ccd98SMike Christie 
219415ccd98SMike Christie static void iblock_unplug_device(struct se_dev_plug *se_plug)
220415ccd98SMike Christie {
221415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
222415ccd98SMike Christie 					struct iblock_dev_plug, se_plug);
223415ccd98SMike Christie 
224415ccd98SMike Christie 	blk_finish_plug(&ib_dev_plug->blk_plug);
225415ccd98SMike Christie 	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
226415ccd98SMike Christie }
227415ccd98SMike Christie 
228c66ac9dbSNicholas Bellinger static unsigned long long iblock_emulate_read_cap_with_block_size(
229c66ac9dbSNicholas Bellinger 	struct se_device *dev,
230c66ac9dbSNicholas Bellinger 	struct block_device *bd,
231c66ac9dbSNicholas Bellinger 	struct request_queue *q)
232c66ac9dbSNicholas Bellinger {
233c66ac9dbSNicholas Bellinger 	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
234c66ac9dbSNicholas Bellinger 					bdev_logical_block_size(bd)) - 1);
235c66ac9dbSNicholas Bellinger 	u32 block_size = bdev_logical_block_size(bd);
236c66ac9dbSNicholas Bellinger 
2370fd97ccfSChristoph Hellwig 	if (block_size == dev->dev_attrib.block_size)
238c66ac9dbSNicholas Bellinger 		return blocks_long;
239c66ac9dbSNicholas Bellinger 
240c66ac9dbSNicholas Bellinger 	switch (block_size) {
241c66ac9dbSNicholas Bellinger 	case 4096:
2420fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
243c66ac9dbSNicholas Bellinger 		case 2048:
244c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
245c66ac9dbSNicholas Bellinger 			break;
246c66ac9dbSNicholas Bellinger 		case 1024:
247c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
248c66ac9dbSNicholas Bellinger 			break;
249c66ac9dbSNicholas Bellinger 		case 512:
250c66ac9dbSNicholas Bellinger 			blocks_long <<= 3;
251492096ecSGustavo A. R. Silva 			break;
252c66ac9dbSNicholas Bellinger 		default:
253c66ac9dbSNicholas Bellinger 			break;
254c66ac9dbSNicholas Bellinger 		}
255c66ac9dbSNicholas Bellinger 		break;
256c66ac9dbSNicholas Bellinger 	case 2048:
2570fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
258c66ac9dbSNicholas Bellinger 		case 4096:
259c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
260c66ac9dbSNicholas Bellinger 			break;
261c66ac9dbSNicholas Bellinger 		case 1024:
262c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
263c66ac9dbSNicholas Bellinger 			break;
264c66ac9dbSNicholas Bellinger 		case 512:
265c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
266c66ac9dbSNicholas Bellinger 			break;
267c66ac9dbSNicholas Bellinger 		default:
268c66ac9dbSNicholas Bellinger 			break;
269c66ac9dbSNicholas Bellinger 		}
270c66ac9dbSNicholas Bellinger 		break;
271c66ac9dbSNicholas Bellinger 	case 1024:
2720fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
273c66ac9dbSNicholas Bellinger 		case 4096:
274c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
275c66ac9dbSNicholas Bellinger 			break;
276c66ac9dbSNicholas Bellinger 		case 2048:
277c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
278c66ac9dbSNicholas Bellinger 			break;
279c66ac9dbSNicholas Bellinger 		case 512:
280c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
281c66ac9dbSNicholas Bellinger 			break;
282c66ac9dbSNicholas Bellinger 		default:
283c66ac9dbSNicholas Bellinger 			break;
284c66ac9dbSNicholas Bellinger 		}
285c66ac9dbSNicholas Bellinger 		break;
286c66ac9dbSNicholas Bellinger 	case 512:
2870fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
288c66ac9dbSNicholas Bellinger 		case 4096:
289c66ac9dbSNicholas Bellinger 			blocks_long >>= 3;
290c66ac9dbSNicholas Bellinger 			break;
291c66ac9dbSNicholas Bellinger 		case 2048:
292c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
293c66ac9dbSNicholas Bellinger 			break;
294c66ac9dbSNicholas Bellinger 		case 1024:
295c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
296c66ac9dbSNicholas Bellinger 			break;
297c66ac9dbSNicholas Bellinger 		default:
298c66ac9dbSNicholas Bellinger 			break;
299c66ac9dbSNicholas Bellinger 		}
300c66ac9dbSNicholas Bellinger 		break;
301c66ac9dbSNicholas Bellinger 	default:
302c66ac9dbSNicholas Bellinger 		break;
303c66ac9dbSNicholas Bellinger 	}
304c66ac9dbSNicholas Bellinger 
305c66ac9dbSNicholas Bellinger 	return blocks_long;
306c66ac9dbSNicholas Bellinger }
307c66ac9dbSNicholas Bellinger 
3083a41d85fSNicholas Bellinger static void iblock_complete_cmd(struct se_cmd *cmd)
3093a41d85fSNicholas Bellinger {
3103a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3113a41d85fSNicholas Bellinger 	u8 status;
3123a41d85fSNicholas Bellinger 
3135981c245SElena Reshetova 	if (!refcount_dec_and_test(&ibr->pending))
3143a41d85fSNicholas Bellinger 		return;
3153a41d85fSNicholas Bellinger 
3163a41d85fSNicholas Bellinger 	if (atomic_read(&ibr->ib_bio_err_cnt))
3173a41d85fSNicholas Bellinger 		status = SAM_STAT_CHECK_CONDITION;
3183a41d85fSNicholas Bellinger 	else
3193a41d85fSNicholas Bellinger 		status = SAM_STAT_GOOD;
3203a41d85fSNicholas Bellinger 
3213a41d85fSNicholas Bellinger 	target_complete_cmd(cmd, status);
3223a41d85fSNicholas Bellinger 	kfree(ibr);
3233a41d85fSNicholas Bellinger }
3243a41d85fSNicholas Bellinger 
3254246a0b6SChristoph Hellwig static void iblock_bio_done(struct bio *bio)
3263a41d85fSNicholas Bellinger {
3273a41d85fSNicholas Bellinger 	struct se_cmd *cmd = bio->bi_private;
3283a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3293a41d85fSNicholas Bellinger 
3304e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
3314e4cbee9SChristoph Hellwig 		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
3323a41d85fSNicholas Bellinger 		/*
3333a41d85fSNicholas Bellinger 		 * Bump the ib_bio_err_cnt and release bio.
3343a41d85fSNicholas Bellinger 		 */
3353a41d85fSNicholas Bellinger 		atomic_inc(&ibr->ib_bio_err_cnt);
3364e857c58SPeter Zijlstra 		smp_mb__after_atomic();
3373a41d85fSNicholas Bellinger 	}
3383a41d85fSNicholas Bellinger 
3393a41d85fSNicholas Bellinger 	bio_put(bio);
3403a41d85fSNicholas Bellinger 
3413a41d85fSNicholas Bellinger 	iblock_complete_cmd(cmd);
3423a41d85fSNicholas Bellinger }
3433a41d85fSNicholas Bellinger 
344bc9e0e36SChaitanya Kulkarni static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
345bc9e0e36SChaitanya Kulkarni 				  unsigned int opf)
3463a41d85fSNicholas Bellinger {
3473a41d85fSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
3483a41d85fSNicholas Bellinger 	struct bio *bio;
3493a41d85fSNicholas Bellinger 
3503a41d85fSNicholas Bellinger 	/*
3513a41d85fSNicholas Bellinger 	 * Only allocate as many vector entries as the bio code allows us to,
3523a41d85fSNicholas Bellinger 	 * we'll loop later on until we have handled the whole request.
3533a41d85fSNicholas Bellinger 	 */
3545f7136dbSMatthew Wilcox (Oracle) 	bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
3555f7136dbSMatthew Wilcox (Oracle) 				&ib_dev->ibd_bio_set);
3563a41d85fSNicholas Bellinger 	if (!bio) {
3573a41d85fSNicholas Bellinger 		pr_err("Unable to allocate memory for bio\n");
3583a41d85fSNicholas Bellinger 		return NULL;
3593a41d85fSNicholas Bellinger 	}
3603a41d85fSNicholas Bellinger 
36174d46992SChristoph Hellwig 	bio_set_dev(bio, ib_dev->ibd_bd);
3623a41d85fSNicholas Bellinger 	bio->bi_private = cmd;
3633a41d85fSNicholas Bellinger 	bio->bi_end_io = &iblock_bio_done;
3644f024f37SKent Overstreet 	bio->bi_iter.bi_sector = lba;
365bc9e0e36SChaitanya Kulkarni 	bio->bi_opf = opf;
3663a41d85fSNicholas Bellinger 
3673a41d85fSNicholas Bellinger 	return bio;
3683a41d85fSNicholas Bellinger }
3693a41d85fSNicholas Bellinger 
3704e49ea4aSMike Christie static void iblock_submit_bios(struct bio_list *list)
3713a41d85fSNicholas Bellinger {
3723a41d85fSNicholas Bellinger 	struct blk_plug plug;
3733a41d85fSNicholas Bellinger 	struct bio *bio;
374415ccd98SMike Christie 	/*
375415ccd98SMike Christie 	 * The block layer handles nested plugs, so just plug/unplug to handle
376415ccd98SMike Christie 	 * fabric drivers that didn't support batching and multi bio cmds.
377415ccd98SMike Christie 	 */
3783a41d85fSNicholas Bellinger 	blk_start_plug(&plug);
3793a41d85fSNicholas Bellinger 	while ((bio = bio_list_pop(list)))
3804e49ea4aSMike Christie 		submit_bio(bio);
3813a41d85fSNicholas Bellinger 	blk_finish_plug(&plug);
3823a41d85fSNicholas Bellinger }
3833a41d85fSNicholas Bellinger 
3844246a0b6SChristoph Hellwig static void iblock_end_io_flush(struct bio *bio)
385df5fa691SChristoph Hellwig {
386df5fa691SChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
387df5fa691SChristoph Hellwig 
3884e4cbee9SChristoph Hellwig 	if (bio->bi_status)
3894e4cbee9SChristoph Hellwig 		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
390df5fa691SChristoph Hellwig 
3915787cacdSChristoph Hellwig 	if (cmd) {
3924e4cbee9SChristoph Hellwig 		if (bio->bi_status)
3935787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
394de103c93SChristoph Hellwig 		else
3955787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_GOOD);
3965787cacdSChristoph Hellwig 	}
3975787cacdSChristoph Hellwig 
398df5fa691SChristoph Hellwig 	bio_put(bio);
399df5fa691SChristoph Hellwig }
400df5fa691SChristoph Hellwig 
401c66ac9dbSNicholas Bellinger /*
402df5fa691SChristoph Hellwig  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
403df5fa691SChristoph Hellwig  * always flush the whole cache.
404c66ac9dbSNicholas Bellinger  */
405de103c93SChristoph Hellwig static sense_reason_t
406de103c93SChristoph Hellwig iblock_execute_sync_cache(struct se_cmd *cmd)
407c66ac9dbSNicholas Bellinger {
4080fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
409a1d8b49aSAndy Grover 	int immed = (cmd->t_task_cdb[1] & 0x2);
410df5fa691SChristoph Hellwig 	struct bio *bio;
411c66ac9dbSNicholas Bellinger 
412c66ac9dbSNicholas Bellinger 	/*
413c66ac9dbSNicholas Bellinger 	 * If the Immediate bit is set, queue up the GOOD response
414df5fa691SChristoph Hellwig 	 * for this SYNCHRONIZE_CACHE op.
415c66ac9dbSNicholas Bellinger 	 */
416c66ac9dbSNicholas Bellinger 	if (immed)
4175787cacdSChristoph Hellwig 		target_complete_cmd(cmd, SAM_STAT_GOOD);
418c66ac9dbSNicholas Bellinger 
419df5fa691SChristoph Hellwig 	bio = bio_alloc(GFP_KERNEL, 0);
420df5fa691SChristoph Hellwig 	bio->bi_end_io = iblock_end_io_flush;
42174d46992SChristoph Hellwig 	bio_set_dev(bio, ib_dev->ibd_bd);
42270fd7614SChristoph Hellwig 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
423c66ac9dbSNicholas Bellinger 	if (!immed)
424df5fa691SChristoph Hellwig 		bio->bi_private = cmd;
4254e49ea4aSMike Christie 	submit_bio(bio);
426ad67f0d9SChristoph Hellwig 	return 0;
427c66ac9dbSNicholas Bellinger }
428c66ac9dbSNicholas Bellinger 
429de103c93SChristoph Hellwig static sense_reason_t
43062e46942SChristoph Hellwig iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
431dbc21c5aSAsias He {
43262e46942SChristoph Hellwig 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
4338a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
434dbc21c5aSAsias He 	int ret;
435dbc21c5aSAsias He 
4368a9ebe71SMike Christie 	ret = blkdev_issue_discard(bdev,
4378a9ebe71SMike Christie 				   target_to_linux_sector(dev, lba),
4388a9ebe71SMike Christie 				   target_to_linux_sector(dev,  nolb),
4398a9ebe71SMike Christie 				   GFP_KERNEL, 0);
440dbc21c5aSAsias He 	if (ret < 0) {
441dbc21c5aSAsias He 		pr_err("blkdev_issue_discard() failed: %d\n", ret);
442dbc21c5aSAsias He 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
443dbc21c5aSAsias He 	}
444dbc21c5aSAsias He 
445dbc21c5aSAsias He 	return 0;
446dbc21c5aSAsias He }
447dbc21c5aSAsias He 
448dbc21c5aSAsias He static sense_reason_t
4492237498fSNicholas Bellinger iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
45007b63196SMike Christie {
45107b63196SMike Christie 	struct se_device *dev = cmd->se_dev;
45207b63196SMike Christie 	struct scatterlist *sg = &cmd->t_data_sg[0];
453f5957dadSBryant G Ly 	unsigned char *buf, *not_zero;
454f5957dadSBryant G Ly 	int ret;
45507b63196SMike Christie 
4562237498fSNicholas Bellinger 	buf = kmap(sg_page(sg)) + sg->offset;
4572237498fSNicholas Bellinger 	if (!buf)
4582237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4592237498fSNicholas Bellinger 	/*
4602237498fSNicholas Bellinger 	 * Fall back to block_execute_write_same() slow-path if
4612237498fSNicholas Bellinger 	 * incoming WRITE_SAME payload does not contain zeros.
4622237498fSNicholas Bellinger 	 */
463f5957dadSBryant G Ly 	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
4642237498fSNicholas Bellinger 	kunmap(sg_page(sg));
46507b63196SMike Christie 
466f5957dadSBryant G Ly 	if (not_zero)
4672237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4682237498fSNicholas Bellinger 
4692237498fSNicholas Bellinger 	ret = blkdev_issue_zeroout(bdev,
47007b63196SMike Christie 				target_to_linux_sector(dev, cmd->t_task_lba),
47107b63196SMike Christie 				target_to_linux_sector(dev,
47207b63196SMike Christie 					sbc_get_write_same_sectors(cmd)),
4731d2ff149SDavid Disseldorp 				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
47407b63196SMike Christie 	if (ret)
47507b63196SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
47607b63196SMike Christie 
47714b40c1eSHannes Reinecke 	target_complete_cmd(cmd, SAM_STAT_GOOD);
47807b63196SMike Christie 	return 0;
47907b63196SMike Christie }
48007b63196SMike Christie 
48107b63196SMike Christie static sense_reason_t
482f6970ad3SNicholas Bellinger iblock_execute_write_same(struct se_cmd *cmd)
483f6970ad3SNicholas Bellinger {
48407b63196SMike Christie 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
485f6970ad3SNicholas Bellinger 	struct iblock_req *ibr;
486f6970ad3SNicholas Bellinger 	struct scatterlist *sg;
487f6970ad3SNicholas Bellinger 	struct bio *bio;
488f6970ad3SNicholas Bellinger 	struct bio_list list;
4898a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
4908a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
4918a9ebe71SMike Christie 	sector_t sectors = target_to_linux_sector(dev,
4928a9ebe71SMike Christie 					sbc_get_write_same_sectors(cmd));
493f6970ad3SNicholas Bellinger 
494afd73f1bSNicholas Bellinger 	if (cmd->prot_op) {
495afd73f1bSNicholas Bellinger 		pr_err("WRITE_SAME: Protection information with IBLOCK"
496afd73f1bSNicholas Bellinger 		       " backends not supported\n");
497afd73f1bSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
498afd73f1bSNicholas Bellinger 	}
499f6970ad3SNicholas Bellinger 	sg = &cmd->t_data_sg[0];
500f6970ad3SNicholas Bellinger 
501f6970ad3SNicholas Bellinger 	if (cmd->t_data_nents > 1 ||
502f6970ad3SNicholas Bellinger 	    sg->length != cmd->se_dev->dev_attrib.block_size) {
503f6970ad3SNicholas Bellinger 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
504f6970ad3SNicholas Bellinger 			" block_size: %u\n", cmd->t_data_nents, sg->length,
505f6970ad3SNicholas Bellinger 			cmd->se_dev->dev_attrib.block_size);
506f6970ad3SNicholas Bellinger 		return TCM_INVALID_CDB_FIELD;
507f6970ad3SNicholas Bellinger 	}
508f6970ad3SNicholas Bellinger 
5092237498fSNicholas Bellinger 	if (bdev_write_zeroes_sectors(bdev)) {
5102237498fSNicholas Bellinger 		if (!iblock_execute_zero_out(bdev, cmd))
5112237498fSNicholas Bellinger 			return 0;
5122237498fSNicholas Bellinger 	}
51307b63196SMike Christie 
514f6970ad3SNicholas Bellinger 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
515f6970ad3SNicholas Bellinger 	if (!ibr)
516f6970ad3SNicholas Bellinger 		goto fail;
517f6970ad3SNicholas Bellinger 	cmd->priv = ibr;
518f6970ad3SNicholas Bellinger 
519bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
520f6970ad3SNicholas Bellinger 	if (!bio)
521f6970ad3SNicholas Bellinger 		goto fail_free_ibr;
522f6970ad3SNicholas Bellinger 
523f6970ad3SNicholas Bellinger 	bio_list_init(&list);
524f6970ad3SNicholas Bellinger 	bio_list_add(&list, bio);
525f6970ad3SNicholas Bellinger 
5265981c245SElena Reshetova 	refcount_set(&ibr->pending, 1);
527f6970ad3SNicholas Bellinger 
528f6970ad3SNicholas Bellinger 	while (sectors) {
529f6970ad3SNicholas Bellinger 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
530f6970ad3SNicholas Bellinger 				!= sg->length) {
531f6970ad3SNicholas Bellinger 
532bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
533f6970ad3SNicholas Bellinger 			if (!bio)
534f6970ad3SNicholas Bellinger 				goto fail_put_bios;
535f6970ad3SNicholas Bellinger 
5365981c245SElena Reshetova 			refcount_inc(&ibr->pending);
537f6970ad3SNicholas Bellinger 			bio_list_add(&list, bio);
538f6970ad3SNicholas Bellinger 		}
539f6970ad3SNicholas Bellinger 
540f6970ad3SNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
54180b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
5425676234fSRoman Bolshakov 		sectors -= sg->length >> SECTOR_SHIFT;
543f6970ad3SNicholas Bellinger 	}
544f6970ad3SNicholas Bellinger 
5454e49ea4aSMike Christie 	iblock_submit_bios(&list);
546f6970ad3SNicholas Bellinger 	return 0;
547f6970ad3SNicholas Bellinger 
548f6970ad3SNicholas Bellinger fail_put_bios:
549f6970ad3SNicholas Bellinger 	while ((bio = bio_list_pop(&list)))
550f6970ad3SNicholas Bellinger 		bio_put(bio);
551f6970ad3SNicholas Bellinger fail_free_ibr:
552f6970ad3SNicholas Bellinger 	kfree(ibr);
553f6970ad3SNicholas Bellinger fail:
554f6970ad3SNicholas Bellinger 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
555f6970ad3SNicholas Bellinger }
556f6970ad3SNicholas Bellinger 
557c66ac9dbSNicholas Bellinger enum {
55844bfd018SAndy Grover 	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
559c66ac9dbSNicholas Bellinger };
560c66ac9dbSNicholas Bellinger 
561c66ac9dbSNicholas Bellinger static match_table_t tokens = {
562c66ac9dbSNicholas Bellinger 	{Opt_udev_path, "udev_path=%s"},
56344bfd018SAndy Grover 	{Opt_readonly, "readonly=%d"},
564c66ac9dbSNicholas Bellinger 	{Opt_force, "force=%d"},
565c66ac9dbSNicholas Bellinger 	{Opt_err, NULL}
566c66ac9dbSNicholas Bellinger };
567c66ac9dbSNicholas Bellinger 
5680fd97ccfSChristoph Hellwig static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
569c66ac9dbSNicholas Bellinger 		const char *page, ssize_t count)
570c66ac9dbSNicholas Bellinger {
5710fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
5726d180253SJesper Juhl 	char *orig, *ptr, *arg_p, *opts;
573c66ac9dbSNicholas Bellinger 	substring_t args[MAX_OPT_ARGS];
57421bca31cSRoland Dreier 	int ret = 0, token;
57544bfd018SAndy Grover 	unsigned long tmp_readonly;
576c66ac9dbSNicholas Bellinger 
577c66ac9dbSNicholas Bellinger 	opts = kstrdup(page, GFP_KERNEL);
578c66ac9dbSNicholas Bellinger 	if (!opts)
579c66ac9dbSNicholas Bellinger 		return -ENOMEM;
580c66ac9dbSNicholas Bellinger 
581c66ac9dbSNicholas Bellinger 	orig = opts;
582c66ac9dbSNicholas Bellinger 
58390c161b6SSebastian Andrzej Siewior 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
584c66ac9dbSNicholas Bellinger 		if (!*ptr)
585c66ac9dbSNicholas Bellinger 			continue;
586c66ac9dbSNicholas Bellinger 
587c66ac9dbSNicholas Bellinger 		token = match_token(ptr, tokens, args);
588c66ac9dbSNicholas Bellinger 		switch (token) {
589c66ac9dbSNicholas Bellinger 		case Opt_udev_path:
590c66ac9dbSNicholas Bellinger 			if (ib_dev->ibd_bd) {
5916708bb27SAndy Grover 				pr_err("Unable to set udev_path= while"
592c66ac9dbSNicholas Bellinger 					" ib_dev->ibd_bd exists\n");
593c66ac9dbSNicholas Bellinger 				ret = -EEXIST;
594c66ac9dbSNicholas Bellinger 				goto out;
595c66ac9dbSNicholas Bellinger 			}
596852b6ed1SNicholas Bellinger 			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
597852b6ed1SNicholas Bellinger 				SE_UDEV_PATH_LEN) == 0) {
598852b6ed1SNicholas Bellinger 				ret = -EINVAL;
5996d180253SJesper Juhl 				break;
6006d180253SJesper Juhl 			}
6016708bb27SAndy Grover 			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
602c66ac9dbSNicholas Bellinger 					ib_dev->ibd_udev_path);
603c66ac9dbSNicholas Bellinger 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
604c66ac9dbSNicholas Bellinger 			break;
60544bfd018SAndy Grover 		case Opt_readonly:
60644bfd018SAndy Grover 			arg_p = match_strdup(&args[0]);
60744bfd018SAndy Grover 			if (!arg_p) {
60844bfd018SAndy Grover 				ret = -ENOMEM;
60944bfd018SAndy Grover 				break;
61044bfd018SAndy Grover 			}
61157103d7fSJingoo Han 			ret = kstrtoul(arg_p, 0, &tmp_readonly);
61244bfd018SAndy Grover 			kfree(arg_p);
61344bfd018SAndy Grover 			if (ret < 0) {
61457103d7fSJingoo Han 				pr_err("kstrtoul() failed for"
61544bfd018SAndy Grover 						" readonly=\n");
61644bfd018SAndy Grover 				goto out;
61744bfd018SAndy Grover 			}
61844bfd018SAndy Grover 			ib_dev->ibd_readonly = tmp_readonly;
61944bfd018SAndy Grover 			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
62044bfd018SAndy Grover 			break;
621c66ac9dbSNicholas Bellinger 		case Opt_force:
622c66ac9dbSNicholas Bellinger 			break;
623c66ac9dbSNicholas Bellinger 		default:
624c66ac9dbSNicholas Bellinger 			break;
625c66ac9dbSNicholas Bellinger 		}
626c66ac9dbSNicholas Bellinger 	}
627c66ac9dbSNicholas Bellinger 
628c66ac9dbSNicholas Bellinger out:
629c66ac9dbSNicholas Bellinger 	kfree(orig);
630c66ac9dbSNicholas Bellinger 	return (!ret) ? count : ret;
631c66ac9dbSNicholas Bellinger }
632c66ac9dbSNicholas Bellinger 
6330fd97ccfSChristoph Hellwig static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
634c66ac9dbSNicholas Bellinger {
6350fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
6360fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
637c66ac9dbSNicholas Bellinger 	ssize_t bl = 0;
638c66ac9dbSNicholas Bellinger 
639c66ac9dbSNicholas Bellinger 	if (bd)
640*1b74ab77SChristoph Hellwig 		bl += sprintf(b + bl, "iBlock device: %pg", bd);
6410fd97ccfSChristoph Hellwig 	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
64244bfd018SAndy Grover 		bl += sprintf(b + bl, "  UDEV PATH: %s",
6430fd97ccfSChristoph Hellwig 				ib_dev->ibd_udev_path);
6440fd97ccfSChristoph Hellwig 	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
645c66ac9dbSNicholas Bellinger 
646c66ac9dbSNicholas Bellinger 	bl += sprintf(b + bl, "        ");
647c66ac9dbSNicholas Bellinger 	if (bd) {
648c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
64957ba1059SChristoph Hellwig 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
65057ba1059SChristoph Hellwig 			"CLAIMED: IBLOCK");
651c66ac9dbSNicholas Bellinger 	} else {
65221bca31cSRoland Dreier 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
653c66ac9dbSNicholas Bellinger 	}
654c66ac9dbSNicholas Bellinger 
655c66ac9dbSNicholas Bellinger 	return bl;
656c66ac9dbSNicholas Bellinger }
657c66ac9dbSNicholas Bellinger 
658ecebbf6cSNicholas Bellinger static int
659fed564f6SGreg Edwards iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
660fed564f6SGreg Edwards 		 struct sg_mapping_iter *miter)
661ecebbf6cSNicholas Bellinger {
662ecebbf6cSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
663ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
664ecebbf6cSNicholas Bellinger 	struct bio_integrity_payload *bip;
665ecebbf6cSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
666fed564f6SGreg Edwards 	int rc;
667fed564f6SGreg Edwards 	size_t resid, len;
668ecebbf6cSNicholas Bellinger 
669ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(ib_dev->ibd_bd);
670ecebbf6cSNicholas Bellinger 	if (!bi) {
671ecebbf6cSNicholas Bellinger 		pr_err("Unable to locate bio_integrity\n");
672ecebbf6cSNicholas Bellinger 		return -ENODEV;
673ecebbf6cSNicholas Bellinger 	}
674ecebbf6cSNicholas Bellinger 
6755f7136dbSMatthew Wilcox (Oracle) 	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
67606c1e390SKeith Busch 	if (IS_ERR(bip)) {
677ecebbf6cSNicholas Bellinger 		pr_err("Unable to allocate bio_integrity_payload\n");
67806c1e390SKeith Busch 		return PTR_ERR(bip);
679ecebbf6cSNicholas Bellinger 	}
680ecebbf6cSNicholas Bellinger 
681fed564f6SGreg Edwards 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
682e4dc9a4cSIsrael Rukshin 	/* virtual start sector must be in integrity interval units */
683e4dc9a4cSIsrael Rukshin 	bip_set_seed(bip, bio->bi_iter.bi_sector >>
684e4dc9a4cSIsrael Rukshin 				  (bi->interval_exp - SECTOR_SHIFT));
685ecebbf6cSNicholas Bellinger 
6864e13c5d0SLinus Torvalds 	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
6874e13c5d0SLinus Torvalds 		 (unsigned long long)bip->bip_iter.bi_sector);
688ecebbf6cSNicholas Bellinger 
689fed564f6SGreg Edwards 	resid = bip->bip_iter.bi_size;
690fed564f6SGreg Edwards 	while (resid > 0 && sg_miter_next(miter)) {
691ecebbf6cSNicholas Bellinger 
692fed564f6SGreg Edwards 		len = min_t(size_t, miter->length, resid);
693fed564f6SGreg Edwards 		rc = bio_integrity_add_page(bio, miter->page, len,
694fed564f6SGreg Edwards 					    offset_in_page(miter->addr));
695fed564f6SGreg Edwards 		if (rc != len) {
696ecebbf6cSNicholas Bellinger 			pr_err("bio_integrity_add_page() failed; %d\n", rc);
697fed564f6SGreg Edwards 			sg_miter_stop(miter);
698ecebbf6cSNicholas Bellinger 			return -ENOMEM;
699ecebbf6cSNicholas Bellinger 		}
700ecebbf6cSNicholas Bellinger 
701fed564f6SGreg Edwards 		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
702fed564f6SGreg Edwards 			  miter->page, len, offset_in_page(miter->addr));
703fed564f6SGreg Edwards 
704fed564f6SGreg Edwards 		resid -= len;
705fed564f6SGreg Edwards 		if (len < miter->length)
706fed564f6SGreg Edwards 			miter->consumed -= miter->length - len;
707ecebbf6cSNicholas Bellinger 	}
708fed564f6SGreg Edwards 	sg_miter_stop(miter);
709ecebbf6cSNicholas Bellinger 
710ecebbf6cSNicholas Bellinger 	return 0;
711ecebbf6cSNicholas Bellinger }
712ecebbf6cSNicholas Bellinger 
713de103c93SChristoph Hellwig static sense_reason_t
714a82a9538SNicholas Bellinger iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
715a82a9538SNicholas Bellinger 		  enum dma_data_direction data_direction)
716c66ac9dbSNicholas Bellinger {
7175951146dSAndy Grover 	struct se_device *dev = cmd->se_dev;
7188a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
7195787cacdSChristoph Hellwig 	struct iblock_req *ibr;
720fed564f6SGreg Edwards 	struct bio *bio;
721dbbf3e94SChristoph Hellwig 	struct bio_list list;
722c66ac9dbSNicholas Bellinger 	struct scatterlist *sg;
7235787cacdSChristoph Hellwig 	u32 sg_num = sgl_nents;
724bc9e0e36SChaitanya Kulkarni 	unsigned int opf;
725d5b4a21bSChristoph Hellwig 	unsigned bio_cnt;
726bc9e0e36SChaitanya Kulkarni 	int i, rc;
727fed564f6SGreg Edwards 	struct sg_mapping_iter prot_miter;
728bc9e0e36SChaitanya Kulkarni 	unsigned int miter_dir;
729dbbf3e94SChristoph Hellwig 
7305787cacdSChristoph Hellwig 	if (data_direction == DMA_TO_DEVICE) {
731d0c8b259SNicholas Bellinger 		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
732d0c8b259SNicholas Bellinger 		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
733dbbf3e94SChristoph Hellwig 		/*
73470fd7614SChristoph Hellwig 		 * Force writethrough using REQ_FUA if a volatile write cache
735d0c8b259SNicholas Bellinger 		 * is not enabled, or if initiator set the Force Unit Access bit.
736dbbf3e94SChristoph Hellwig 		 */
737bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_WRITE;
738bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_TO_SG;
739c888a8f9SJens Axboe 		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
740d0c8b259SNicholas Bellinger 			if (cmd->se_cmd_flags & SCF_FUA)
741bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
742c888a8f9SJens Axboe 			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
743bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
744d0c8b259SNicholas Bellinger 		}
745dbbf3e94SChristoph Hellwig 	} else {
746bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_READ;
747bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_FROM_SG;
748dbbf3e94SChristoph Hellwig 	}
749dbbf3e94SChristoph Hellwig 
7505787cacdSChristoph Hellwig 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
7515787cacdSChristoph Hellwig 	if (!ibr)
7525787cacdSChristoph Hellwig 		goto fail;
7535787cacdSChristoph Hellwig 	cmd->priv = ibr;
7545787cacdSChristoph Hellwig 
755e0de4457SPaolo Bonzini 	if (!sgl_nents) {
7565981c245SElena Reshetova 		refcount_set(&ibr->pending, 1);
757e0de4457SPaolo Bonzini 		iblock_complete_cmd(cmd);
758e0de4457SPaolo Bonzini 		return 0;
759e0de4457SPaolo Bonzini 	}
760e0de4457SPaolo Bonzini 
761bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
7625787cacdSChristoph Hellwig 	if (!bio)
7635787cacdSChristoph Hellwig 		goto fail_free_ibr;
764c66ac9dbSNicholas Bellinger 
765dbbf3e94SChristoph Hellwig 	bio_list_init(&list);
766dbbf3e94SChristoph Hellwig 	bio_list_add(&list, bio);
7675787cacdSChristoph Hellwig 
7685981c245SElena Reshetova 	refcount_set(&ibr->pending, 2);
769d5b4a21bSChristoph Hellwig 	bio_cnt = 1;
770dbbf3e94SChristoph Hellwig 
771fed564f6SGreg Edwards 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
772fed564f6SGreg Edwards 		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
773bc9e0e36SChaitanya Kulkarni 			       miter_dir);
774fed564f6SGreg Edwards 
7755787cacdSChristoph Hellwig 	for_each_sg(sgl, sg, sgl_nents, i) {
776dbbf3e94SChristoph Hellwig 		/*
777dbbf3e94SChristoph Hellwig 		 * XXX: if the length the device accepts is shorter than the
778dbbf3e94SChristoph Hellwig 		 *	length of the S/G list entry this will cause and
779dbbf3e94SChristoph Hellwig 		 *	endless loop.  Better hope no driver uses huge pages.
780dbbf3e94SChristoph Hellwig 		 */
781dbbf3e94SChristoph Hellwig 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
782dbbf3e94SChristoph Hellwig 				!= sg->length) {
783fed564f6SGreg Edwards 			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
784fed564f6SGreg Edwards 				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
785fed564f6SGreg Edwards 				if (rc)
786fed564f6SGreg Edwards 					goto fail_put_bios;
787fed564f6SGreg Edwards 			}
788fed564f6SGreg Edwards 
789d5b4a21bSChristoph Hellwig 			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
7904e49ea4aSMike Christie 				iblock_submit_bios(&list);
791d5b4a21bSChristoph Hellwig 				bio_cnt = 0;
792d5b4a21bSChristoph Hellwig 			}
793d5b4a21bSChristoph Hellwig 
794bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
7956708bb27SAndy Grover 			if (!bio)
7965787cacdSChristoph Hellwig 				goto fail_put_bios;
7975787cacdSChristoph Hellwig 
7985981c245SElena Reshetova 			refcount_inc(&ibr->pending);
799dbbf3e94SChristoph Hellwig 			bio_list_add(&list, bio);
800d5b4a21bSChristoph Hellwig 			bio_cnt++;
801c66ac9dbSNicholas Bellinger 		}
802dbbf3e94SChristoph Hellwig 
803c66ac9dbSNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
80480b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
805c66ac9dbSNicholas Bellinger 		sg_num--;
806c66ac9dbSNicholas Bellinger 	}
807c66ac9dbSNicholas Bellinger 
8086f16ec43SNicholas Bellinger 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
809fed564f6SGreg Edwards 		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
810ecebbf6cSNicholas Bellinger 		if (rc)
811ecebbf6cSNicholas Bellinger 			goto fail_put_bios;
812ecebbf6cSNicholas Bellinger 	}
813ecebbf6cSNicholas Bellinger 
8144e49ea4aSMike Christie 	iblock_submit_bios(&list);
8155787cacdSChristoph Hellwig 	iblock_complete_cmd(cmd);
81603e98c9eSNicholas Bellinger 	return 0;
817dbbf3e94SChristoph Hellwig 
8185787cacdSChristoph Hellwig fail_put_bios:
819dbbf3e94SChristoph Hellwig 	while ((bio = bio_list_pop(&list)))
820c66ac9dbSNicholas Bellinger 		bio_put(bio);
8215787cacdSChristoph Hellwig fail_free_ibr:
8225787cacdSChristoph Hellwig 	kfree(ibr);
8235787cacdSChristoph Hellwig fail:
824de103c93SChristoph Hellwig 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
825c66ac9dbSNicholas Bellinger }
826c66ac9dbSNicholas Bellinger 
827c66ac9dbSNicholas Bellinger static sector_t iblock_get_blocks(struct se_device *dev)
828c66ac9dbSNicholas Bellinger {
8290fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8300fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
831c66ac9dbSNicholas Bellinger 	struct request_queue *q = bdev_get_queue(bd);
832c66ac9dbSNicholas Bellinger 
833c66ac9dbSNicholas Bellinger 	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
834c66ac9dbSNicholas Bellinger }
835c66ac9dbSNicholas Bellinger 
8367f7caf6aSAndy Grover static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
8377f7caf6aSAndy Grover {
8387f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8397f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8407f7caf6aSAndy Grover 	int ret;
8417f7caf6aSAndy Grover 
8427f7caf6aSAndy Grover 	ret = bdev_alignment_offset(bd);
8437f7caf6aSAndy Grover 	if (ret == -1)
8447f7caf6aSAndy Grover 		return 0;
8457f7caf6aSAndy Grover 
8467f7caf6aSAndy Grover 	/* convert offset-bytes to offset-lbas */
8477f7caf6aSAndy Grover 	return ret / bdev_logical_block_size(bd);
8487f7caf6aSAndy Grover }
8497f7caf6aSAndy Grover 
8507f7caf6aSAndy Grover static unsigned int iblock_get_lbppbe(struct se_device *dev)
8517f7caf6aSAndy Grover {
8527f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8537f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
854c151eddbSChaitanya Kulkarni 	unsigned int logs_per_phys =
855a2c6c6a3SChaitanya Kulkarni 		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
8567f7caf6aSAndy Grover 
8577f7caf6aSAndy Grover 	return ilog2(logs_per_phys);
8587f7caf6aSAndy Grover }
8597f7caf6aSAndy Grover 
8607f7caf6aSAndy Grover static unsigned int iblock_get_io_min(struct se_device *dev)
8617f7caf6aSAndy Grover {
8627f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8637f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8647f7caf6aSAndy Grover 
8657f7caf6aSAndy Grover 	return bdev_io_min(bd);
8667f7caf6aSAndy Grover }
8677f7caf6aSAndy Grover 
8687f7caf6aSAndy Grover static unsigned int iblock_get_io_opt(struct se_device *dev)
8697f7caf6aSAndy Grover {
8707f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
8717f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
8727f7caf6aSAndy Grover 
8737f7caf6aSAndy Grover 	return bdev_io_opt(bd);
8747f7caf6aSAndy Grover }
8757f7caf6aSAndy Grover 
8769e999a6cSChristoph Hellwig static struct sbc_ops iblock_sbc_ops = {
8770c2ad7d1SChristoph Hellwig 	.execute_rw		= iblock_execute_rw,
878ad67f0d9SChristoph Hellwig 	.execute_sync_cache	= iblock_execute_sync_cache,
8796f974e8cSChristoph Hellwig 	.execute_write_same	= iblock_execute_write_same,
88014150a6bSChristoph Hellwig 	.execute_unmap		= iblock_execute_unmap,
8810c2ad7d1SChristoph Hellwig };
8820c2ad7d1SChristoph Hellwig 
883de103c93SChristoph Hellwig static sense_reason_t
884de103c93SChristoph Hellwig iblock_parse_cdb(struct se_cmd *cmd)
8850c2ad7d1SChristoph Hellwig {
8869e999a6cSChristoph Hellwig 	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
8870c2ad7d1SChristoph Hellwig }
8880c2ad7d1SChristoph Hellwig 
889452e2010SRashika Kheria static bool iblock_get_write_cache(struct se_device *dev)
890d0c8b259SNicholas Bellinger {
891d0c8b259SNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
892d0c8b259SNicholas Bellinger 	struct block_device *bd = ib_dev->ibd_bd;
893d0c8b259SNicholas Bellinger 	struct request_queue *q = bdev_get_queue(bd);
894d0c8b259SNicholas Bellinger 
895c888a8f9SJens Axboe 	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
896d0c8b259SNicholas Bellinger }
897d0c8b259SNicholas Bellinger 
8980a06d430SChristoph Hellwig static const struct target_backend_ops iblock_ops = {
899c66ac9dbSNicholas Bellinger 	.name			= "iblock",
9000fd97ccfSChristoph Hellwig 	.inquiry_prod		= "IBLOCK",
9010fd97ccfSChristoph Hellwig 	.inquiry_rev		= IBLOCK_VERSION,
902c66ac9dbSNicholas Bellinger 	.owner			= THIS_MODULE,
903c66ac9dbSNicholas Bellinger 	.attach_hba		= iblock_attach_hba,
904c66ac9dbSNicholas Bellinger 	.detach_hba		= iblock_detach_hba,
9050fd97ccfSChristoph Hellwig 	.alloc_device		= iblock_alloc_device,
9060fd97ccfSChristoph Hellwig 	.configure_device	= iblock_configure_device,
90792634706SMike Christie 	.destroy_device		= iblock_destroy_device,
908c66ac9dbSNicholas Bellinger 	.free_device		= iblock_free_device,
909415ccd98SMike Christie 	.plug_device		= iblock_plug_device,
910415ccd98SMike Christie 	.unplug_device		= iblock_unplug_device,
9110c2ad7d1SChristoph Hellwig 	.parse_cdb		= iblock_parse_cdb,
912c66ac9dbSNicholas Bellinger 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
913c66ac9dbSNicholas Bellinger 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
9146f23ac8aSChristoph Hellwig 	.get_device_type	= sbc_get_device_type,
915c66ac9dbSNicholas Bellinger 	.get_blocks		= iblock_get_blocks,
9167f7caf6aSAndy Grover 	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
9177f7caf6aSAndy Grover 	.get_lbppbe		= iblock_get_lbppbe,
9187f7caf6aSAndy Grover 	.get_io_min		= iblock_get_io_min,
9197f7caf6aSAndy Grover 	.get_io_opt		= iblock_get_io_opt,
920d0c8b259SNicholas Bellinger 	.get_write_cache	= iblock_get_write_cache,
9215873c4d1SChristoph Hellwig 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
922c66ac9dbSNicholas Bellinger };
923c66ac9dbSNicholas Bellinger 
924c66ac9dbSNicholas Bellinger static int __init iblock_module_init(void)
925c66ac9dbSNicholas Bellinger {
9260a06d430SChristoph Hellwig 	return transport_backend_register(&iblock_ops);
927c66ac9dbSNicholas Bellinger }
928c66ac9dbSNicholas Bellinger 
92963b91d5aSAsias He static void __exit iblock_module_exit(void)
930c66ac9dbSNicholas Bellinger {
9310a06d430SChristoph Hellwig 	target_backend_unregister(&iblock_ops);
932c66ac9dbSNicholas Bellinger }
933c66ac9dbSNicholas Bellinger 
934c66ac9dbSNicholas Bellinger MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
935c66ac9dbSNicholas Bellinger MODULE_AUTHOR("nab@Linux-iSCSI.org");
936c66ac9dbSNicholas Bellinger MODULE_LICENSE("GPL");
937c66ac9dbSNicholas Bellinger 
938c66ac9dbSNicholas Bellinger module_init(iblock_module_init);
939c66ac9dbSNicholas Bellinger module_exit(iblock_module_exit);
940