11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c66ac9dbSNicholas Bellinger /*******************************************************************************
3c66ac9dbSNicholas Bellinger  * Filename:  target_core_iblock.c
4c66ac9dbSNicholas Bellinger  *
5c66ac9dbSNicholas Bellinger  * This file contains the Storage Engine  <-> Linux BlockIO transport
6c66ac9dbSNicholas Bellinger  * specific functions.
7c66ac9dbSNicholas Bellinger  *
84c76251eSNicholas Bellinger  * (c) Copyright 2003-2013 Datera, Inc.
9c66ac9dbSNicholas Bellinger  *
10c66ac9dbSNicholas Bellinger  * Nicholas A. Bellinger <nab@kernel.org>
11c66ac9dbSNicholas Bellinger  *
12c66ac9dbSNicholas Bellinger  ******************************************************************************/
13c66ac9dbSNicholas Bellinger 
14c66ac9dbSNicholas Bellinger #include <linux/string.h>
15c66ac9dbSNicholas Bellinger #include <linux/parser.h>
16c66ac9dbSNicholas Bellinger #include <linux/timer.h>
17c66ac9dbSNicholas Bellinger #include <linux/fs.h>
18c66ac9dbSNicholas Bellinger #include <linux/blkdev.h>
19fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
20c66ac9dbSNicholas Bellinger #include <linux/slab.h>
21c66ac9dbSNicholas Bellinger #include <linux/spinlock.h>
22c66ac9dbSNicholas Bellinger #include <linux/bio.h>
23c66ac9dbSNicholas Bellinger #include <linux/file.h>
24827509e3SPaul Gortmaker #include <linux/module.h>
2524b83debSChristoph Hellwig #include <linux/scatterlist.h>
26394f8118SMike Christie #include <linux/pr.h>
27ba929992SBart Van Assche #include <scsi/scsi_proto.h>
28394f8118SMike Christie #include <scsi/scsi_common.h>
2914150a6bSChristoph Hellwig #include <asm/unaligned.h>
30c66ac9dbSNicholas Bellinger 
31c66ac9dbSNicholas Bellinger #include <target/target_core_base.h>
32c4795fb2SChristoph Hellwig #include <target/target_core_backend.h>
33c66ac9dbSNicholas Bellinger 
34c66ac9dbSNicholas Bellinger #include "target_core_iblock.h"
35394f8118SMike Christie #include "target_core_pr.h"
36c66ac9dbSNicholas Bellinger 
37d5b4a21bSChristoph Hellwig #define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
38d5b4a21bSChristoph Hellwig #define IBLOCK_BIO_POOL_SIZE	128
39d5b4a21bSChristoph Hellwig 
400fd97ccfSChristoph Hellwig static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
410fd97ccfSChristoph Hellwig {
420fd97ccfSChristoph Hellwig 	return container_of(dev, struct iblock_dev, dev);
430fd97ccfSChristoph Hellwig }
440fd97ccfSChristoph Hellwig 
450fd97ccfSChristoph Hellwig 
46c66ac9dbSNicholas Bellinger static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
47c66ac9dbSNicholas Bellinger {
486708bb27SAndy Grover 	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
49c66ac9dbSNicholas Bellinger 		" Generic Target Core Stack %s\n", hba->hba_id,
50ce8dd25dSChristoph Hellwig 		IBLOCK_VERSION, TARGET_CORE_VERSION);
51c66ac9dbSNicholas Bellinger 	return 0;
52c66ac9dbSNicholas Bellinger }
53c66ac9dbSNicholas Bellinger 
54c66ac9dbSNicholas Bellinger static void iblock_detach_hba(struct se_hba *hba)
55c66ac9dbSNicholas Bellinger {
56c66ac9dbSNicholas Bellinger }
57c66ac9dbSNicholas Bellinger 
580fd97ccfSChristoph Hellwig static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
59c66ac9dbSNicholas Bellinger {
60c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = NULL;
61c66ac9dbSNicholas Bellinger 
62c66ac9dbSNicholas Bellinger 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
636708bb27SAndy Grover 	if (!ib_dev) {
646708bb27SAndy Grover 		pr_err("Unable to allocate struct iblock_dev\n");
65c66ac9dbSNicholas Bellinger 		return NULL;
66c66ac9dbSNicholas Bellinger 	}
67c66ac9dbSNicholas Bellinger 
68415ccd98SMike Christie 	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
69415ccd98SMike Christie 				   GFP_KERNEL);
70415ccd98SMike Christie 	if (!ib_dev->ibd_plug)
71415ccd98SMike Christie 		goto free_dev;
72415ccd98SMike Christie 
736708bb27SAndy Grover 	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
74c66ac9dbSNicholas Bellinger 
750fd97ccfSChristoph Hellwig 	return &ib_dev->dev;
76415ccd98SMike Christie 
77415ccd98SMike Christie free_dev:
78415ccd98SMike Christie 	kfree(ib_dev);
79415ccd98SMike Christie 	return NULL;
80c66ac9dbSNicholas Bellinger }
81c66ac9dbSNicholas Bellinger 
82d7c382c5SMike Christie static bool iblock_configure_unmap(struct se_device *dev)
83d7c382c5SMike Christie {
84d7c382c5SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
85d7c382c5SMike Christie 
86d7c382c5SMike Christie 	return target_configure_unmap_from_queue(&dev->dev_attrib,
87d7c382c5SMike Christie 						 ib_dev->ibd_bd);
88d7c382c5SMike Christie }
89d7c382c5SMike Christie 
900fd97ccfSChristoph Hellwig static int iblock_configure_device(struct se_device *dev)
91c66ac9dbSNicholas Bellinger {
920fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
93c66ac9dbSNicholas Bellinger 	struct request_queue *q;
940fd97ccfSChristoph Hellwig 	struct block_device *bd = NULL;
95ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
9644bfd018SAndy Grover 	fmode_t mode;
972237498fSNicholas Bellinger 	unsigned int max_write_zeroes_sectors;
988f13142aSColin Ian King 	int ret;
99c66ac9dbSNicholas Bellinger 
1000fd97ccfSChristoph Hellwig 	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
1010fd97ccfSChristoph Hellwig 		pr_err("Missing udev_path= parameters for IBLOCK\n");
1020fd97ccfSChristoph Hellwig 		return -EINVAL;
103c66ac9dbSNicholas Bellinger 	}
104d5b4a21bSChristoph Hellwig 
105a47a28b7SKent Overstreet 	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
106a47a28b7SKent Overstreet 	if (ret) {
1070fd97ccfSChristoph Hellwig 		pr_err("IBLOCK: Unable to create bioset\n");
1080fd97ccfSChristoph Hellwig 		goto out;
109c66ac9dbSNicholas Bellinger 	}
1100fd97ccfSChristoph Hellwig 
1116708bb27SAndy Grover 	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
112c66ac9dbSNicholas Bellinger 			ib_dev->ibd_udev_path);
113c66ac9dbSNicholas Bellinger 
11444bfd018SAndy Grover 	mode = FMODE_READ|FMODE_EXCL;
11544bfd018SAndy Grover 	if (!ib_dev->ibd_readonly)
11644bfd018SAndy Grover 		mode |= FMODE_WRITE;
117eeeb9522SNicholas Bellinger 	else
118eeeb9522SNicholas Bellinger 		dev->dev_flags |= DF_READ_ONLY;
11944bfd018SAndy Grover 
12044bfd018SAndy Grover 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
121613640e4SNicholas Bellinger 	if (IS_ERR(bd)) {
122613640e4SNicholas Bellinger 		ret = PTR_ERR(bd);
1230fd97ccfSChristoph Hellwig 		goto out_free_bioset;
124613640e4SNicholas Bellinger 	}
125c66ac9dbSNicholas Bellinger 	ib_dev->ibd_bd = bd;
126c66ac9dbSNicholas Bellinger 
1270fd97ccfSChristoph Hellwig 	q = bdev_get_queue(bd);
1280fd97ccfSChristoph Hellwig 
1290fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
1309375031eSAnastasia Kovaleva 	dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q),
1319375031eSAnastasia Kovaleva 			SECTOR_SIZE,
1329375031eSAnastasia Kovaleva 			dev->dev_attrib.hw_block_size);
1330fd97ccfSChristoph Hellwig 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
134c66ac9dbSNicholas Bellinger 
135f6970ad3SNicholas Bellinger 	/*
136f6970ad3SNicholas Bellinger 	 * Enable write same emulation for IBLOCK and use 0xFFFF as
137f6970ad3SNicholas Bellinger 	 * the smaller WRITE_SAME(10) only has a two-byte block count.
138f6970ad3SNicholas Bellinger 	 */
1392237498fSNicholas Bellinger 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
1402237498fSNicholas Bellinger 	if (max_write_zeroes_sectors)
1412237498fSNicholas Bellinger 		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
1422237498fSNicholas Bellinger 	else
143f6970ad3SNicholas Bellinger 		dev->dev_attrib.max_write_same_len = 0xFFFF;
144c66ac9dbSNicholas Bellinger 
14510f0d2a5SChristoph Hellwig 	if (bdev_nonrot(bd))
1460fd97ccfSChristoph Hellwig 		dev->dev_attrib.is_nonrot = 1;
147d0c8b259SNicholas Bellinger 
148ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(bd);
149ecebbf6cSNicholas Bellinger 	if (bi) {
150a47a28b7SKent Overstreet 		struct bio_set *bs = &ib_dev->ibd_bio_set;
151ecebbf6cSNicholas Bellinger 
1520f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
1530f8087ecSMartin K. Petersen 		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
154ecebbf6cSNicholas Bellinger 			pr_err("IBLOCK export of blk_integrity: %s not"
1550f8087ecSMartin K. Petersen 			       " supported\n", bi->profile->name);
156ecebbf6cSNicholas Bellinger 			ret = -ENOSYS;
157ecebbf6cSNicholas Bellinger 			goto out_blkdev_put;
158ecebbf6cSNicholas Bellinger 		}
159ecebbf6cSNicholas Bellinger 
1600f8087ecSMartin K. Petersen 		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
161ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
1620f8087ecSMartin K. Petersen 		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
163ecebbf6cSNicholas Bellinger 			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
164ecebbf6cSNicholas Bellinger 		}
165ecebbf6cSNicholas Bellinger 
166ecebbf6cSNicholas Bellinger 		if (dev->dev_attrib.pi_prot_type) {
167ecebbf6cSNicholas Bellinger 			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
168ecebbf6cSNicholas Bellinger 				pr_err("Unable to allocate bioset for PI\n");
169ecebbf6cSNicholas Bellinger 				ret = -ENOMEM;
170ecebbf6cSNicholas Bellinger 				goto out_blkdev_put;
171ecebbf6cSNicholas Bellinger 			}
172ecebbf6cSNicholas Bellinger 			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
173f4f8154aSKent Overstreet 				 &bs->bio_integrity_pool);
174ecebbf6cSNicholas Bellinger 		}
175ecebbf6cSNicholas Bellinger 		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
176ecebbf6cSNicholas Bellinger 	}
177ecebbf6cSNicholas Bellinger 
1780fd97ccfSChristoph Hellwig 	return 0;
179e22a7f07SRoland Dreier 
180ecebbf6cSNicholas Bellinger out_blkdev_put:
181ecebbf6cSNicholas Bellinger 	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
1820fd97ccfSChristoph Hellwig out_free_bioset:
183a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
1840fd97ccfSChristoph Hellwig out:
1850fd97ccfSChristoph Hellwig 	return ret;
186c66ac9dbSNicholas Bellinger }
187c66ac9dbSNicholas Bellinger 
1884cc987eaSNicholas Bellinger static void iblock_dev_call_rcu(struct rcu_head *p)
1894cc987eaSNicholas Bellinger {
1904cc987eaSNicholas Bellinger 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1914cc987eaSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1924cc987eaSNicholas Bellinger 
193415ccd98SMike Christie 	kfree(ib_dev->ibd_plug);
1944cc987eaSNicholas Bellinger 	kfree(ib_dev);
1954cc987eaSNicholas Bellinger }
1964cc987eaSNicholas Bellinger 
1970fd97ccfSChristoph Hellwig static void iblock_free_device(struct se_device *dev)
198c66ac9dbSNicholas Bellinger {
19992634706SMike Christie 	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
20092634706SMike Christie }
20192634706SMike Christie 
20292634706SMike Christie static void iblock_destroy_device(struct se_device *dev)
20392634706SMike Christie {
2040fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
205c66ac9dbSNicholas Bellinger 
206bc665524SNicholas Bellinger 	if (ib_dev->ibd_bd != NULL)
207c66ac9dbSNicholas Bellinger 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
208a47a28b7SKent Overstreet 	bioset_exit(&ib_dev->ibd_bio_set);
209c66ac9dbSNicholas Bellinger }
210c66ac9dbSNicholas Bellinger 
211415ccd98SMike Christie static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
212415ccd98SMike Christie {
213415ccd98SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
214415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug;
215415ccd98SMike Christie 
216415ccd98SMike Christie 	/*
2175aaeca25SMike Christie 	 * Each se_device has a per cpu work this can be run from. We
218415ccd98SMike Christie 	 * shouldn't have multiple threads on the same cpu calling this
219415ccd98SMike Christie 	 * at the same time.
220415ccd98SMike Christie 	 */
2215aaeca25SMike Christie 	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
222415ccd98SMike Christie 	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
223415ccd98SMike Christie 		return NULL;
224415ccd98SMike Christie 
225415ccd98SMike Christie 	blk_start_plug(&ib_dev_plug->blk_plug);
226415ccd98SMike Christie 	return &ib_dev_plug->se_plug;
227415ccd98SMike Christie }
228415ccd98SMike Christie 
229415ccd98SMike Christie static void iblock_unplug_device(struct se_dev_plug *se_plug)
230415ccd98SMike Christie {
231415ccd98SMike Christie 	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
232415ccd98SMike Christie 					struct iblock_dev_plug, se_plug);
233415ccd98SMike Christie 
234415ccd98SMike Christie 	blk_finish_plug(&ib_dev_plug->blk_plug);
235415ccd98SMike Christie 	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
236415ccd98SMike Christie }
237415ccd98SMike Christie 
238f915f58eSChristoph Hellwig static sector_t iblock_get_blocks(struct se_device *dev)
239c66ac9dbSNicholas Bellinger {
240f915f58eSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
241f915f58eSChristoph Hellwig 	u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd);
24264f0f426SChristoph Hellwig 	unsigned long long blocks_long =
243f915f58eSChristoph Hellwig 		div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1;
244c66ac9dbSNicholas Bellinger 
2450fd97ccfSChristoph Hellwig 	if (block_size == dev->dev_attrib.block_size)
246c66ac9dbSNicholas Bellinger 		return blocks_long;
247c66ac9dbSNicholas Bellinger 
248c66ac9dbSNicholas Bellinger 	switch (block_size) {
249c66ac9dbSNicholas Bellinger 	case 4096:
2500fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
251c66ac9dbSNicholas Bellinger 		case 2048:
252c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
253c66ac9dbSNicholas Bellinger 			break;
254c66ac9dbSNicholas Bellinger 		case 1024:
255c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
256c66ac9dbSNicholas Bellinger 			break;
257c66ac9dbSNicholas Bellinger 		case 512:
258c66ac9dbSNicholas Bellinger 			blocks_long <<= 3;
259492096ecSGustavo A. R. Silva 			break;
260c66ac9dbSNicholas Bellinger 		default:
261c66ac9dbSNicholas Bellinger 			break;
262c66ac9dbSNicholas Bellinger 		}
263c66ac9dbSNicholas Bellinger 		break;
264c66ac9dbSNicholas Bellinger 	case 2048:
2650fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
266c66ac9dbSNicholas Bellinger 		case 4096:
267c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
268c66ac9dbSNicholas Bellinger 			break;
269c66ac9dbSNicholas Bellinger 		case 1024:
270c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
271c66ac9dbSNicholas Bellinger 			break;
272c66ac9dbSNicholas Bellinger 		case 512:
273c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
274c66ac9dbSNicholas Bellinger 			break;
275c66ac9dbSNicholas Bellinger 		default:
276c66ac9dbSNicholas Bellinger 			break;
277c66ac9dbSNicholas Bellinger 		}
278c66ac9dbSNicholas Bellinger 		break;
279c66ac9dbSNicholas Bellinger 	case 1024:
2800fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
281c66ac9dbSNicholas Bellinger 		case 4096:
282c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
283c66ac9dbSNicholas Bellinger 			break;
284c66ac9dbSNicholas Bellinger 		case 2048:
285c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
286c66ac9dbSNicholas Bellinger 			break;
287c66ac9dbSNicholas Bellinger 		case 512:
288c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
289c66ac9dbSNicholas Bellinger 			break;
290c66ac9dbSNicholas Bellinger 		default:
291c66ac9dbSNicholas Bellinger 			break;
292c66ac9dbSNicholas Bellinger 		}
293c66ac9dbSNicholas Bellinger 		break;
294c66ac9dbSNicholas Bellinger 	case 512:
2950fd97ccfSChristoph Hellwig 		switch (dev->dev_attrib.block_size) {
296c66ac9dbSNicholas Bellinger 		case 4096:
297c66ac9dbSNicholas Bellinger 			blocks_long >>= 3;
298c66ac9dbSNicholas Bellinger 			break;
299c66ac9dbSNicholas Bellinger 		case 2048:
300c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
301c66ac9dbSNicholas Bellinger 			break;
302c66ac9dbSNicholas Bellinger 		case 1024:
303c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
304c66ac9dbSNicholas Bellinger 			break;
305c66ac9dbSNicholas Bellinger 		default:
306c66ac9dbSNicholas Bellinger 			break;
307c66ac9dbSNicholas Bellinger 		}
308c66ac9dbSNicholas Bellinger 		break;
309c66ac9dbSNicholas Bellinger 	default:
310c66ac9dbSNicholas Bellinger 		break;
311c66ac9dbSNicholas Bellinger 	}
312c66ac9dbSNicholas Bellinger 
313c66ac9dbSNicholas Bellinger 	return blocks_long;
314c66ac9dbSNicholas Bellinger }
315c66ac9dbSNicholas Bellinger 
316394f8118SMike Christie static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
3173a41d85fSNicholas Bellinger {
3183a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
3193a41d85fSNicholas Bellinger 	u8 status;
3203a41d85fSNicholas Bellinger 
3215981c245SElena Reshetova 	if (!refcount_dec_and_test(&ibr->pending))
3223a41d85fSNicholas Bellinger 		return;
3233a41d85fSNicholas Bellinger 
324394f8118SMike Christie 	if (blk_status == BLK_STS_RESV_CONFLICT)
325394f8118SMike Christie 		status = SAM_STAT_RESERVATION_CONFLICT;
326394f8118SMike Christie 	else if (atomic_read(&ibr->ib_bio_err_cnt))
3273a41d85fSNicholas Bellinger 		status = SAM_STAT_CHECK_CONDITION;
3283a41d85fSNicholas Bellinger 	else
3293a41d85fSNicholas Bellinger 		status = SAM_STAT_GOOD;
3303a41d85fSNicholas Bellinger 
3313a41d85fSNicholas Bellinger 	target_complete_cmd(cmd, status);
3323a41d85fSNicholas Bellinger 	kfree(ibr);
3333a41d85fSNicholas Bellinger }
3343a41d85fSNicholas Bellinger 
3354246a0b6SChristoph Hellwig static void iblock_bio_done(struct bio *bio)
3363a41d85fSNicholas Bellinger {
3373a41d85fSNicholas Bellinger 	struct se_cmd *cmd = bio->bi_private;
3383a41d85fSNicholas Bellinger 	struct iblock_req *ibr = cmd->priv;
339394f8118SMike Christie 	blk_status_t blk_status = bio->bi_status;
3403a41d85fSNicholas Bellinger 
3414e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
3424e4cbee9SChristoph Hellwig 		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
3433a41d85fSNicholas Bellinger 		/*
3443a41d85fSNicholas Bellinger 		 * Bump the ib_bio_err_cnt and release bio.
3453a41d85fSNicholas Bellinger 		 */
3463a41d85fSNicholas Bellinger 		atomic_inc(&ibr->ib_bio_err_cnt);
3474e857c58SPeter Zijlstra 		smp_mb__after_atomic();
3483a41d85fSNicholas Bellinger 	}
3493a41d85fSNicholas Bellinger 
3503a41d85fSNicholas Bellinger 	bio_put(bio);
3513a41d85fSNicholas Bellinger 
352394f8118SMike Christie 	iblock_complete_cmd(cmd, blk_status);
3533a41d85fSNicholas Bellinger }
3543a41d85fSNicholas Bellinger 
355bc9e0e36SChaitanya Kulkarni static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
35679fe9d7dSBart Van Assche 				  blk_opf_t opf)
3573a41d85fSNicholas Bellinger {
3583a41d85fSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
3593a41d85fSNicholas Bellinger 	struct bio *bio;
3603a41d85fSNicholas Bellinger 
3613a41d85fSNicholas Bellinger 	/*
3623a41d85fSNicholas Bellinger 	 * Only allocate as many vector entries as the bio code allows us to,
3633a41d85fSNicholas Bellinger 	 * we'll loop later on until we have handled the whole request.
3643a41d85fSNicholas Bellinger 	 */
365609be106SChristoph Hellwig 	bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
366609be106SChristoph Hellwig 			       GFP_NOIO, &ib_dev->ibd_bio_set);
3673a41d85fSNicholas Bellinger 	if (!bio) {
3683a41d85fSNicholas Bellinger 		pr_err("Unable to allocate memory for bio\n");
3693a41d85fSNicholas Bellinger 		return NULL;
3703a41d85fSNicholas Bellinger 	}
3713a41d85fSNicholas Bellinger 
3723a41d85fSNicholas Bellinger 	bio->bi_private = cmd;
3733a41d85fSNicholas Bellinger 	bio->bi_end_io = &iblock_bio_done;
3744f024f37SKent Overstreet 	bio->bi_iter.bi_sector = lba;
3753a41d85fSNicholas Bellinger 
3763a41d85fSNicholas Bellinger 	return bio;
3773a41d85fSNicholas Bellinger }
3783a41d85fSNicholas Bellinger 
3794e49ea4aSMike Christie static void iblock_submit_bios(struct bio_list *list)
3803a41d85fSNicholas Bellinger {
3813a41d85fSNicholas Bellinger 	struct blk_plug plug;
3823a41d85fSNicholas Bellinger 	struct bio *bio;
383415ccd98SMike Christie 	/*
384415ccd98SMike Christie 	 * The block layer handles nested plugs, so just plug/unplug to handle
385415ccd98SMike Christie 	 * fabric drivers that didn't support batching and multi bio cmds.
386415ccd98SMike Christie 	 */
3873a41d85fSNicholas Bellinger 	blk_start_plug(&plug);
3883a41d85fSNicholas Bellinger 	while ((bio = bio_list_pop(list)))
3894e49ea4aSMike Christie 		submit_bio(bio);
3903a41d85fSNicholas Bellinger 	blk_finish_plug(&plug);
3913a41d85fSNicholas Bellinger }
3923a41d85fSNicholas Bellinger 
3934246a0b6SChristoph Hellwig static void iblock_end_io_flush(struct bio *bio)
394df5fa691SChristoph Hellwig {
395df5fa691SChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
396df5fa691SChristoph Hellwig 
3974e4cbee9SChristoph Hellwig 	if (bio->bi_status)
3984e4cbee9SChristoph Hellwig 		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
399df5fa691SChristoph Hellwig 
4005787cacdSChristoph Hellwig 	if (cmd) {
4014e4cbee9SChristoph Hellwig 		if (bio->bi_status)
4025787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
403de103c93SChristoph Hellwig 		else
4045787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_GOOD);
4055787cacdSChristoph Hellwig 	}
4065787cacdSChristoph Hellwig 
407df5fa691SChristoph Hellwig 	bio_put(bio);
408df5fa691SChristoph Hellwig }
409df5fa691SChristoph Hellwig 
410c66ac9dbSNicholas Bellinger /*
411df5fa691SChristoph Hellwig  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
412df5fa691SChristoph Hellwig  * always flush the whole cache.
413c66ac9dbSNicholas Bellinger  */
414de103c93SChristoph Hellwig static sense_reason_t
415de103c93SChristoph Hellwig iblock_execute_sync_cache(struct se_cmd *cmd)
416c66ac9dbSNicholas Bellinger {
4170fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
418a1d8b49aSAndy Grover 	int immed = (cmd->t_task_cdb[1] & 0x2);
419df5fa691SChristoph Hellwig 	struct bio *bio;
420c66ac9dbSNicholas Bellinger 
421c66ac9dbSNicholas Bellinger 	/*
422c66ac9dbSNicholas Bellinger 	 * If the Immediate bit is set, queue up the GOOD response
423df5fa691SChristoph Hellwig 	 * for this SYNCHRONIZE_CACHE op.
424c66ac9dbSNicholas Bellinger 	 */
425c66ac9dbSNicholas Bellinger 	if (immed)
4265787cacdSChristoph Hellwig 		target_complete_cmd(cmd, SAM_STAT_GOOD);
427c66ac9dbSNicholas Bellinger 
42807888c66SChristoph Hellwig 	bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
42907888c66SChristoph Hellwig 			GFP_KERNEL);
430df5fa691SChristoph Hellwig 	bio->bi_end_io = iblock_end_io_flush;
431c66ac9dbSNicholas Bellinger 	if (!immed)
432df5fa691SChristoph Hellwig 		bio->bi_private = cmd;
4334e49ea4aSMike Christie 	submit_bio(bio);
434ad67f0d9SChristoph Hellwig 	return 0;
435c66ac9dbSNicholas Bellinger }
436c66ac9dbSNicholas Bellinger 
437de103c93SChristoph Hellwig static sense_reason_t
43862e46942SChristoph Hellwig iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
439dbc21c5aSAsias He {
44062e46942SChristoph Hellwig 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
4418a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
442dbc21c5aSAsias He 	int ret;
443dbc21c5aSAsias He 
4448a9ebe71SMike Christie 	ret = blkdev_issue_discard(bdev,
4458a9ebe71SMike Christie 				   target_to_linux_sector(dev, lba),
4468a9ebe71SMike Christie 				   target_to_linux_sector(dev,  nolb),
44744abff2cSChristoph Hellwig 				   GFP_KERNEL);
448dbc21c5aSAsias He 	if (ret < 0) {
449dbc21c5aSAsias He 		pr_err("blkdev_issue_discard() failed: %d\n", ret);
450dbc21c5aSAsias He 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
451dbc21c5aSAsias He 	}
452dbc21c5aSAsias He 
453dbc21c5aSAsias He 	return 0;
454dbc21c5aSAsias He }
455dbc21c5aSAsias He 
456dbc21c5aSAsias He static sense_reason_t
4572237498fSNicholas Bellinger iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
45807b63196SMike Christie {
45907b63196SMike Christie 	struct se_device *dev = cmd->se_dev;
46007b63196SMike Christie 	struct scatterlist *sg = &cmd->t_data_sg[0];
461f5957dadSBryant G Ly 	unsigned char *buf, *not_zero;
462f5957dadSBryant G Ly 	int ret;
46307b63196SMike Christie 
4642237498fSNicholas Bellinger 	buf = kmap(sg_page(sg)) + sg->offset;
4652237498fSNicholas Bellinger 	if (!buf)
4662237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4672237498fSNicholas Bellinger 	/*
4682237498fSNicholas Bellinger 	 * Fall back to block_execute_write_same() slow-path if
4692237498fSNicholas Bellinger 	 * incoming WRITE_SAME payload does not contain zeros.
4702237498fSNicholas Bellinger 	 */
471f5957dadSBryant G Ly 	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
4722237498fSNicholas Bellinger 	kunmap(sg_page(sg));
47307b63196SMike Christie 
474f5957dadSBryant G Ly 	if (not_zero)
4752237498fSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4762237498fSNicholas Bellinger 
4772237498fSNicholas Bellinger 	ret = blkdev_issue_zeroout(bdev,
47807b63196SMike Christie 				target_to_linux_sector(dev, cmd->t_task_lba),
47907b63196SMike Christie 				target_to_linux_sector(dev,
48007b63196SMike Christie 					sbc_get_write_same_sectors(cmd)),
4811d2ff149SDavid Disseldorp 				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
48207b63196SMike Christie 	if (ret)
48307b63196SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
48407b63196SMike Christie 
48514b40c1eSHannes Reinecke 	target_complete_cmd(cmd, SAM_STAT_GOOD);
48607b63196SMike Christie 	return 0;
48707b63196SMike Christie }
48807b63196SMike Christie 
48907b63196SMike Christie static sense_reason_t
490f6970ad3SNicholas Bellinger iblock_execute_write_same(struct se_cmd *cmd)
491f6970ad3SNicholas Bellinger {
49207b63196SMike Christie 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
493f6970ad3SNicholas Bellinger 	struct iblock_req *ibr;
494f6970ad3SNicholas Bellinger 	struct scatterlist *sg;
495f6970ad3SNicholas Bellinger 	struct bio *bio;
496f6970ad3SNicholas Bellinger 	struct bio_list list;
4978a9ebe71SMike Christie 	struct se_device *dev = cmd->se_dev;
4988a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
4998a9ebe71SMike Christie 	sector_t sectors = target_to_linux_sector(dev,
5008a9ebe71SMike Christie 					sbc_get_write_same_sectors(cmd));
501f6970ad3SNicholas Bellinger 
502afd73f1bSNicholas Bellinger 	if (cmd->prot_op) {
503afd73f1bSNicholas Bellinger 		pr_err("WRITE_SAME: Protection information with IBLOCK"
504afd73f1bSNicholas Bellinger 		       " backends not supported\n");
505afd73f1bSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
506afd73f1bSNicholas Bellinger 	}
507ccd3f449SMike Christie 
508ccd3f449SMike Christie 	if (!cmd->t_data_nents)
509ccd3f449SMike Christie 		return TCM_INVALID_CDB_FIELD;
510ccd3f449SMike Christie 
511f6970ad3SNicholas Bellinger 	sg = &cmd->t_data_sg[0];
512f6970ad3SNicholas Bellinger 
513f6970ad3SNicholas Bellinger 	if (cmd->t_data_nents > 1 ||
514f6970ad3SNicholas Bellinger 	    sg->length != cmd->se_dev->dev_attrib.block_size) {
515f6970ad3SNicholas Bellinger 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
516f6970ad3SNicholas Bellinger 			" block_size: %u\n", cmd->t_data_nents, sg->length,
517f6970ad3SNicholas Bellinger 			cmd->se_dev->dev_attrib.block_size);
518f6970ad3SNicholas Bellinger 		return TCM_INVALID_CDB_FIELD;
519f6970ad3SNicholas Bellinger 	}
520f6970ad3SNicholas Bellinger 
5212237498fSNicholas Bellinger 	if (bdev_write_zeroes_sectors(bdev)) {
5222237498fSNicholas Bellinger 		if (!iblock_execute_zero_out(bdev, cmd))
5232237498fSNicholas Bellinger 			return 0;
5242237498fSNicholas Bellinger 	}
52507b63196SMike Christie 
526f6970ad3SNicholas Bellinger 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
527f6970ad3SNicholas Bellinger 	if (!ibr)
528f6970ad3SNicholas Bellinger 		goto fail;
529f6970ad3SNicholas Bellinger 	cmd->priv = ibr;
530f6970ad3SNicholas Bellinger 
531bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
532f6970ad3SNicholas Bellinger 	if (!bio)
533f6970ad3SNicholas Bellinger 		goto fail_free_ibr;
534f6970ad3SNicholas Bellinger 
535f6970ad3SNicholas Bellinger 	bio_list_init(&list);
536f6970ad3SNicholas Bellinger 	bio_list_add(&list, bio);
537f6970ad3SNicholas Bellinger 
5385981c245SElena Reshetova 	refcount_set(&ibr->pending, 1);
539f6970ad3SNicholas Bellinger 
540f6970ad3SNicholas Bellinger 	while (sectors) {
541f6970ad3SNicholas Bellinger 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
542f6970ad3SNicholas Bellinger 				!= sg->length) {
543f6970ad3SNicholas Bellinger 
544bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
545f6970ad3SNicholas Bellinger 			if (!bio)
546f6970ad3SNicholas Bellinger 				goto fail_put_bios;
547f6970ad3SNicholas Bellinger 
5485981c245SElena Reshetova 			refcount_inc(&ibr->pending);
549f6970ad3SNicholas Bellinger 			bio_list_add(&list, bio);
550f6970ad3SNicholas Bellinger 		}
551f6970ad3SNicholas Bellinger 
552f6970ad3SNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
55380b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
5545676234fSRoman Bolshakov 		sectors -= sg->length >> SECTOR_SHIFT;
555f6970ad3SNicholas Bellinger 	}
556f6970ad3SNicholas Bellinger 
5574e49ea4aSMike Christie 	iblock_submit_bios(&list);
558f6970ad3SNicholas Bellinger 	return 0;
559f6970ad3SNicholas Bellinger 
560f6970ad3SNicholas Bellinger fail_put_bios:
561f6970ad3SNicholas Bellinger 	while ((bio = bio_list_pop(&list)))
562f6970ad3SNicholas Bellinger 		bio_put(bio);
563f6970ad3SNicholas Bellinger fail_free_ibr:
564f6970ad3SNicholas Bellinger 	kfree(ibr);
565f6970ad3SNicholas Bellinger fail:
566f6970ad3SNicholas Bellinger 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
567f6970ad3SNicholas Bellinger }
568f6970ad3SNicholas Bellinger 
569c66ac9dbSNicholas Bellinger enum {
57044bfd018SAndy Grover 	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
571c66ac9dbSNicholas Bellinger };
572c66ac9dbSNicholas Bellinger 
573c66ac9dbSNicholas Bellinger static match_table_t tokens = {
574c66ac9dbSNicholas Bellinger 	{Opt_udev_path, "udev_path=%s"},
57544bfd018SAndy Grover 	{Opt_readonly, "readonly=%d"},
576c66ac9dbSNicholas Bellinger 	{Opt_force, "force=%d"},
577c66ac9dbSNicholas Bellinger 	{Opt_err, NULL}
578c66ac9dbSNicholas Bellinger };
579c66ac9dbSNicholas Bellinger 
5800fd97ccfSChristoph Hellwig static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
581c66ac9dbSNicholas Bellinger 		const char *page, ssize_t count)
582c66ac9dbSNicholas Bellinger {
5830fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
5846d180253SJesper Juhl 	char *orig, *ptr, *arg_p, *opts;
585c66ac9dbSNicholas Bellinger 	substring_t args[MAX_OPT_ARGS];
58621bca31cSRoland Dreier 	int ret = 0, token;
58744bfd018SAndy Grover 	unsigned long tmp_readonly;
588c66ac9dbSNicholas Bellinger 
589c66ac9dbSNicholas Bellinger 	opts = kstrdup(page, GFP_KERNEL);
590c66ac9dbSNicholas Bellinger 	if (!opts)
591c66ac9dbSNicholas Bellinger 		return -ENOMEM;
592c66ac9dbSNicholas Bellinger 
593c66ac9dbSNicholas Bellinger 	orig = opts;
594c66ac9dbSNicholas Bellinger 
59590c161b6SSebastian Andrzej Siewior 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
596c66ac9dbSNicholas Bellinger 		if (!*ptr)
597c66ac9dbSNicholas Bellinger 			continue;
598c66ac9dbSNicholas Bellinger 
599c66ac9dbSNicholas Bellinger 		token = match_token(ptr, tokens, args);
600c66ac9dbSNicholas Bellinger 		switch (token) {
601c66ac9dbSNicholas Bellinger 		case Opt_udev_path:
602c66ac9dbSNicholas Bellinger 			if (ib_dev->ibd_bd) {
6036708bb27SAndy Grover 				pr_err("Unable to set udev_path= while"
604c66ac9dbSNicholas Bellinger 					" ib_dev->ibd_bd exists\n");
605c66ac9dbSNicholas Bellinger 				ret = -EEXIST;
606c66ac9dbSNicholas Bellinger 				goto out;
607c66ac9dbSNicholas Bellinger 			}
608852b6ed1SNicholas Bellinger 			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
609852b6ed1SNicholas Bellinger 				SE_UDEV_PATH_LEN) == 0) {
610852b6ed1SNicholas Bellinger 				ret = -EINVAL;
6116d180253SJesper Juhl 				break;
6126d180253SJesper Juhl 			}
6136708bb27SAndy Grover 			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
614c66ac9dbSNicholas Bellinger 					ib_dev->ibd_udev_path);
615c66ac9dbSNicholas Bellinger 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
616c66ac9dbSNicholas Bellinger 			break;
61744bfd018SAndy Grover 		case Opt_readonly:
61844bfd018SAndy Grover 			arg_p = match_strdup(&args[0]);
61944bfd018SAndy Grover 			if (!arg_p) {
62044bfd018SAndy Grover 				ret = -ENOMEM;
62144bfd018SAndy Grover 				break;
62244bfd018SAndy Grover 			}
62357103d7fSJingoo Han 			ret = kstrtoul(arg_p, 0, &tmp_readonly);
62444bfd018SAndy Grover 			kfree(arg_p);
62544bfd018SAndy Grover 			if (ret < 0) {
62657103d7fSJingoo Han 				pr_err("kstrtoul() failed for"
62744bfd018SAndy Grover 						" readonly=\n");
62844bfd018SAndy Grover 				goto out;
62944bfd018SAndy Grover 			}
63044bfd018SAndy Grover 			ib_dev->ibd_readonly = tmp_readonly;
63144bfd018SAndy Grover 			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
63244bfd018SAndy Grover 			break;
633c66ac9dbSNicholas Bellinger 		case Opt_force:
634c66ac9dbSNicholas Bellinger 			break;
635c66ac9dbSNicholas Bellinger 		default:
636c66ac9dbSNicholas Bellinger 			break;
637c66ac9dbSNicholas Bellinger 		}
638c66ac9dbSNicholas Bellinger 	}
639c66ac9dbSNicholas Bellinger 
640c66ac9dbSNicholas Bellinger out:
641c66ac9dbSNicholas Bellinger 	kfree(orig);
642c66ac9dbSNicholas Bellinger 	return (!ret) ? count : ret;
643c66ac9dbSNicholas Bellinger }
644c66ac9dbSNicholas Bellinger 
6450fd97ccfSChristoph Hellwig static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
646c66ac9dbSNicholas Bellinger {
6470fd97ccfSChristoph Hellwig 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
6480fd97ccfSChristoph Hellwig 	struct block_device *bd = ib_dev->ibd_bd;
649c66ac9dbSNicholas Bellinger 	ssize_t bl = 0;
650c66ac9dbSNicholas Bellinger 
651c66ac9dbSNicholas Bellinger 	if (bd)
6521b74ab77SChristoph Hellwig 		bl += sprintf(b + bl, "iBlock device: %pg", bd);
6530fd97ccfSChristoph Hellwig 	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
65444bfd018SAndy Grover 		bl += sprintf(b + bl, "  UDEV PATH: %s",
6550fd97ccfSChristoph Hellwig 				ib_dev->ibd_udev_path);
6560fd97ccfSChristoph Hellwig 	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
657c66ac9dbSNicholas Bellinger 
658c66ac9dbSNicholas Bellinger 	bl += sprintf(b + bl, "        ");
659c66ac9dbSNicholas Bellinger 	if (bd) {
660c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
66157ba1059SChristoph Hellwig 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
66257ba1059SChristoph Hellwig 			"CLAIMED: IBLOCK");
663c66ac9dbSNicholas Bellinger 	} else {
66421bca31cSRoland Dreier 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
665c66ac9dbSNicholas Bellinger 	}
666c66ac9dbSNicholas Bellinger 
667c66ac9dbSNicholas Bellinger 	return bl;
668c66ac9dbSNicholas Bellinger }
669c66ac9dbSNicholas Bellinger 
670ecebbf6cSNicholas Bellinger static int
671fed564f6SGreg Edwards iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
672fed564f6SGreg Edwards 		 struct sg_mapping_iter *miter)
673ecebbf6cSNicholas Bellinger {
674ecebbf6cSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
675ecebbf6cSNicholas Bellinger 	struct blk_integrity *bi;
676ecebbf6cSNicholas Bellinger 	struct bio_integrity_payload *bip;
677ecebbf6cSNicholas Bellinger 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
678fed564f6SGreg Edwards 	int rc;
679fed564f6SGreg Edwards 	size_t resid, len;
680ecebbf6cSNicholas Bellinger 
681ecebbf6cSNicholas Bellinger 	bi = bdev_get_integrity(ib_dev->ibd_bd);
682ecebbf6cSNicholas Bellinger 	if (!bi) {
683ecebbf6cSNicholas Bellinger 		pr_err("Unable to locate bio_integrity\n");
684ecebbf6cSNicholas Bellinger 		return -ENODEV;
685ecebbf6cSNicholas Bellinger 	}
686ecebbf6cSNicholas Bellinger 
6875f7136dbSMatthew Wilcox (Oracle) 	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
68806c1e390SKeith Busch 	if (IS_ERR(bip)) {
689ecebbf6cSNicholas Bellinger 		pr_err("Unable to allocate bio_integrity_payload\n");
69006c1e390SKeith Busch 		return PTR_ERR(bip);
691ecebbf6cSNicholas Bellinger 	}
692ecebbf6cSNicholas Bellinger 
693fed564f6SGreg Edwards 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
694e4dc9a4cSIsrael Rukshin 	/* virtual start sector must be in integrity interval units */
695e4dc9a4cSIsrael Rukshin 	bip_set_seed(bip, bio->bi_iter.bi_sector >>
696e4dc9a4cSIsrael Rukshin 				  (bi->interval_exp - SECTOR_SHIFT));
697ecebbf6cSNicholas Bellinger 
6984e13c5d0SLinus Torvalds 	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
6994e13c5d0SLinus Torvalds 		 (unsigned long long)bip->bip_iter.bi_sector);
700ecebbf6cSNicholas Bellinger 
701fed564f6SGreg Edwards 	resid = bip->bip_iter.bi_size;
702fed564f6SGreg Edwards 	while (resid > 0 && sg_miter_next(miter)) {
703ecebbf6cSNicholas Bellinger 
704fed564f6SGreg Edwards 		len = min_t(size_t, miter->length, resid);
705fed564f6SGreg Edwards 		rc = bio_integrity_add_page(bio, miter->page, len,
706fed564f6SGreg Edwards 					    offset_in_page(miter->addr));
707fed564f6SGreg Edwards 		if (rc != len) {
708ecebbf6cSNicholas Bellinger 			pr_err("bio_integrity_add_page() failed; %d\n", rc);
709fed564f6SGreg Edwards 			sg_miter_stop(miter);
710ecebbf6cSNicholas Bellinger 			return -ENOMEM;
711ecebbf6cSNicholas Bellinger 		}
712ecebbf6cSNicholas Bellinger 
713fed564f6SGreg Edwards 		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
714fed564f6SGreg Edwards 			  miter->page, len, offset_in_page(miter->addr));
715fed564f6SGreg Edwards 
716fed564f6SGreg Edwards 		resid -= len;
717fed564f6SGreg Edwards 		if (len < miter->length)
718fed564f6SGreg Edwards 			miter->consumed -= miter->length - len;
719ecebbf6cSNicholas Bellinger 	}
720fed564f6SGreg Edwards 	sg_miter_stop(miter);
721ecebbf6cSNicholas Bellinger 
722ecebbf6cSNicholas Bellinger 	return 0;
723ecebbf6cSNicholas Bellinger }
724ecebbf6cSNicholas Bellinger 
725de103c93SChristoph Hellwig static sense_reason_t
726a82a9538SNicholas Bellinger iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
727a82a9538SNicholas Bellinger 		  enum dma_data_direction data_direction)
728c66ac9dbSNicholas Bellinger {
7295951146dSAndy Grover 	struct se_device *dev = cmd->se_dev;
7308a9ebe71SMike Christie 	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
7315787cacdSChristoph Hellwig 	struct iblock_req *ibr;
732fed564f6SGreg Edwards 	struct bio *bio;
733dbbf3e94SChristoph Hellwig 	struct bio_list list;
734c66ac9dbSNicholas Bellinger 	struct scatterlist *sg;
7355787cacdSChristoph Hellwig 	u32 sg_num = sgl_nents;
73679fe9d7dSBart Van Assche 	blk_opf_t opf;
737d5b4a21bSChristoph Hellwig 	unsigned bio_cnt;
738bc9e0e36SChaitanya Kulkarni 	int i, rc;
739fed564f6SGreg Edwards 	struct sg_mapping_iter prot_miter;
740bc9e0e36SChaitanya Kulkarni 	unsigned int miter_dir;
741dbbf3e94SChristoph Hellwig 
7425787cacdSChristoph Hellwig 	if (data_direction == DMA_TO_DEVICE) {
743d0c8b259SNicholas Bellinger 		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
744dbbf3e94SChristoph Hellwig 		/*
74570fd7614SChristoph Hellwig 		 * Force writethrough using REQ_FUA if a volatile write cache
746d0c8b259SNicholas Bellinger 		 * is not enabled, or if initiator set the Force Unit Access bit.
747dbbf3e94SChristoph Hellwig 		 */
748bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_WRITE;
749bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_TO_SG;
750a557e82eSChristoph Hellwig 		if (bdev_fua(ib_dev->ibd_bd)) {
751d0c8b259SNicholas Bellinger 			if (cmd->se_cmd_flags & SCF_FUA)
752bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
75308e688fdSChristoph Hellwig 			else if (!bdev_write_cache(ib_dev->ibd_bd))
754bc9e0e36SChaitanya Kulkarni 				opf |= REQ_FUA;
755d0c8b259SNicholas Bellinger 		}
756dbbf3e94SChristoph Hellwig 	} else {
757bc9e0e36SChaitanya Kulkarni 		opf = REQ_OP_READ;
758bc9e0e36SChaitanya Kulkarni 		miter_dir = SG_MITER_FROM_SG;
759dbbf3e94SChristoph Hellwig 	}
760dbbf3e94SChristoph Hellwig 
7615787cacdSChristoph Hellwig 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
7625787cacdSChristoph Hellwig 	if (!ibr)
7635787cacdSChristoph Hellwig 		goto fail;
7645787cacdSChristoph Hellwig 	cmd->priv = ibr;
7655787cacdSChristoph Hellwig 
766e0de4457SPaolo Bonzini 	if (!sgl_nents) {
7675981c245SElena Reshetova 		refcount_set(&ibr->pending, 1);
768394f8118SMike Christie 		iblock_complete_cmd(cmd, BLK_STS_OK);
769e0de4457SPaolo Bonzini 		return 0;
770e0de4457SPaolo Bonzini 	}
771e0de4457SPaolo Bonzini 
772bc9e0e36SChaitanya Kulkarni 	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
7735787cacdSChristoph Hellwig 	if (!bio)
7745787cacdSChristoph Hellwig 		goto fail_free_ibr;
775c66ac9dbSNicholas Bellinger 
776dbbf3e94SChristoph Hellwig 	bio_list_init(&list);
777dbbf3e94SChristoph Hellwig 	bio_list_add(&list, bio);
7785787cacdSChristoph Hellwig 
7795981c245SElena Reshetova 	refcount_set(&ibr->pending, 2);
780d5b4a21bSChristoph Hellwig 	bio_cnt = 1;
781dbbf3e94SChristoph Hellwig 
782fed564f6SGreg Edwards 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
783fed564f6SGreg Edwards 		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
784bc9e0e36SChaitanya Kulkarni 			       miter_dir);
785fed564f6SGreg Edwards 
7865787cacdSChristoph Hellwig 	for_each_sg(sgl, sg, sgl_nents, i) {
787dbbf3e94SChristoph Hellwig 		/*
788dbbf3e94SChristoph Hellwig 		 * XXX: if the length the device accepts is shorter than the
789dbbf3e94SChristoph Hellwig 		 *	length of the S/G list entry this will cause and
790dbbf3e94SChristoph Hellwig 		 *	endless loop.  Better hope no driver uses huge pages.
791dbbf3e94SChristoph Hellwig 		 */
792dbbf3e94SChristoph Hellwig 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
793dbbf3e94SChristoph Hellwig 				!= sg->length) {
794fed564f6SGreg Edwards 			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
795fed564f6SGreg Edwards 				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
796fed564f6SGreg Edwards 				if (rc)
797fed564f6SGreg Edwards 					goto fail_put_bios;
798fed564f6SGreg Edwards 			}
799fed564f6SGreg Edwards 
800d5b4a21bSChristoph Hellwig 			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
8014e49ea4aSMike Christie 				iblock_submit_bios(&list);
802d5b4a21bSChristoph Hellwig 				bio_cnt = 0;
803d5b4a21bSChristoph Hellwig 			}
804d5b4a21bSChristoph Hellwig 
805bc9e0e36SChaitanya Kulkarni 			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
8066708bb27SAndy Grover 			if (!bio)
8075787cacdSChristoph Hellwig 				goto fail_put_bios;
8085787cacdSChristoph Hellwig 
8095981c245SElena Reshetova 			refcount_inc(&ibr->pending);
810dbbf3e94SChristoph Hellwig 			bio_list_add(&list, bio);
811d5b4a21bSChristoph Hellwig 			bio_cnt++;
812c66ac9dbSNicholas Bellinger 		}
813dbbf3e94SChristoph Hellwig 
814c66ac9dbSNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
81580b045b3SBart Van Assche 		block_lba += sg->length >> SECTOR_SHIFT;
816c66ac9dbSNicholas Bellinger 		sg_num--;
817c66ac9dbSNicholas Bellinger 	}
818c66ac9dbSNicholas Bellinger 
8196f16ec43SNicholas Bellinger 	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
820fed564f6SGreg Edwards 		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
821ecebbf6cSNicholas Bellinger 		if (rc)
822ecebbf6cSNicholas Bellinger 			goto fail_put_bios;
823ecebbf6cSNicholas Bellinger 	}
824ecebbf6cSNicholas Bellinger 
8254e49ea4aSMike Christie 	iblock_submit_bios(&list);
826394f8118SMike Christie 	iblock_complete_cmd(cmd, BLK_STS_OK);
82703e98c9eSNicholas Bellinger 	return 0;
828dbbf3e94SChristoph Hellwig 
8295787cacdSChristoph Hellwig fail_put_bios:
830dbbf3e94SChristoph Hellwig 	while ((bio = bio_list_pop(&list)))
831c66ac9dbSNicholas Bellinger 		bio_put(bio);
8325787cacdSChristoph Hellwig fail_free_ibr:
8335787cacdSChristoph Hellwig 	kfree(ibr);
8345787cacdSChristoph Hellwig fail:
835de103c93SChristoph Hellwig 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
836c66ac9dbSNicholas Bellinger }
837c66ac9dbSNicholas Bellinger 
838394f8118SMike Christie static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
839394f8118SMike Christie 					    u64 sa_key, u8 type, bool aptpl)
840394f8118SMike Christie {
841394f8118SMike Christie 	struct se_device *dev = cmd->se_dev;
842394f8118SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
843394f8118SMike Christie 	struct block_device *bdev = ib_dev->ibd_bd;
844394f8118SMike Christie 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
845394f8118SMike Christie 	int ret;
846394f8118SMike Christie 
847394f8118SMike Christie 	if (!ops) {
848394f8118SMike Christie 		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
849394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
850394f8118SMike Christie 	}
851394f8118SMike Christie 
852394f8118SMike Christie 	switch (sa) {
853394f8118SMike Christie 	case PRO_REGISTER:
854394f8118SMike Christie 	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
855394f8118SMike Christie 		if (!ops->pr_register) {
856394f8118SMike Christie 			pr_err("block device does not support pr_register.\n");
857394f8118SMike Christie 			return TCM_UNSUPPORTED_SCSI_OPCODE;
858394f8118SMike Christie 		}
859394f8118SMike Christie 
860394f8118SMike Christie 		/* The block layer pr ops always enables aptpl */
861394f8118SMike Christie 		if (!aptpl)
862394f8118SMike Christie 			pr_info("APTPL not set by initiator, but will be used.\n");
863394f8118SMike Christie 
864394f8118SMike Christie 		ret = ops->pr_register(bdev, key, sa_key,
865394f8118SMike Christie 				sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
866394f8118SMike Christie 		break;
867394f8118SMike Christie 	case PRO_RESERVE:
868394f8118SMike Christie 		if (!ops->pr_reserve) {
869394f8118SMike Christie 			pr_err("block_device does not support pr_reserve.\n");
870394f8118SMike Christie 			return TCM_UNSUPPORTED_SCSI_OPCODE;
871394f8118SMike Christie 		}
872394f8118SMike Christie 
873394f8118SMike Christie 		ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
874394f8118SMike Christie 		break;
875394f8118SMike Christie 	case PRO_CLEAR:
876394f8118SMike Christie 		if (!ops->pr_clear) {
877394f8118SMike Christie 			pr_err("block_device does not support pr_clear.\n");
878394f8118SMike Christie 			return TCM_UNSUPPORTED_SCSI_OPCODE;
879394f8118SMike Christie 		}
880394f8118SMike Christie 
881394f8118SMike Christie 		ret = ops->pr_clear(bdev, key);
882394f8118SMike Christie 		break;
883394f8118SMike Christie 	case PRO_PREEMPT:
884394f8118SMike Christie 	case PRO_PREEMPT_AND_ABORT:
885394f8118SMike Christie 		if (!ops->pr_clear) {
886394f8118SMike Christie 			pr_err("block_device does not support pr_preempt.\n");
887394f8118SMike Christie 			return TCM_UNSUPPORTED_SCSI_OPCODE;
888394f8118SMike Christie 		}
889394f8118SMike Christie 
890394f8118SMike Christie 		ret = ops->pr_preempt(bdev, key, sa_key,
891394f8118SMike Christie 				      scsi_pr_type_to_block(type),
892*40863cb9SMike Christie 				      sa == PRO_PREEMPT_AND_ABORT);
893394f8118SMike Christie 		break;
894394f8118SMike Christie 	case PRO_RELEASE:
895394f8118SMike Christie 		if (!ops->pr_clear) {
896394f8118SMike Christie 			pr_err("block_device does not support pr_pclear.\n");
897394f8118SMike Christie 			return TCM_UNSUPPORTED_SCSI_OPCODE;
898394f8118SMike Christie 		}
899394f8118SMike Christie 
900394f8118SMike Christie 		ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
901394f8118SMike Christie 		break;
902394f8118SMike Christie 	default:
903394f8118SMike Christie 		pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
904394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
905394f8118SMike Christie 	}
906394f8118SMike Christie 
907394f8118SMike Christie 	if (!ret)
908394f8118SMike Christie 		return TCM_NO_SENSE;
909394f8118SMike Christie 	else if (ret == PR_STS_RESERVATION_CONFLICT)
910394f8118SMike Christie 		return TCM_RESERVATION_CONFLICT;
911394f8118SMike Christie 	else
912394f8118SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
913394f8118SMike Christie }
914394f8118SMike Christie 
915394f8118SMike Christie static void iblock_pr_report_caps(unsigned char *param_data)
916394f8118SMike Christie {
917394f8118SMike Christie 	u16 len = 8;
918394f8118SMike Christie 
919394f8118SMike Christie 	put_unaligned_be16(len, &param_data[0]);
920394f8118SMike Christie 	/*
921394f8118SMike Christie 	 * When using the pr_ops passthrough method we only support exporting
922394f8118SMike Christie 	 * the device through one target port because from the backend module
923394f8118SMike Christie 	 * level we can't see the target port config. As a result we only
924394f8118SMike Christie 	 * support registration directly from the I_T nexus the cmd is sent
925394f8118SMike Christie 	 * through and do not set ATP_C here.
926394f8118SMike Christie 	 *
927394f8118SMike Christie 	 * The block layer pr_ops do not support passing in initiators so
928394f8118SMike Christie 	 * we don't set SIP_C here.
929394f8118SMike Christie 	 */
930394f8118SMike Christie 	/* PTPL_C: Persistence across Target Power Loss bit */
931394f8118SMike Christie 	param_data[2] |= 0x01;
932394f8118SMike Christie 	/*
933394f8118SMike Christie 	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
934394f8118SMike Christie 	 * set the TMV: Task Mask Valid bit.
935394f8118SMike Christie 	 */
936394f8118SMike Christie 	param_data[3] |= 0x80;
937394f8118SMike Christie 	/*
938394f8118SMike Christie 	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
939394f8118SMike Christie 	 */
940394f8118SMike Christie 	param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
941394f8118SMike Christie 	/*
942394f8118SMike Christie 	 * PTPL_A: Persistence across Target Power Loss Active bit. The block
943394f8118SMike Christie 	 * layer pr ops always enables this so report it active.
944394f8118SMike Christie 	 */
945394f8118SMike Christie 	param_data[3] |= 0x01;
946394f8118SMike Christie 	/*
947394f8118SMike Christie 	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
948394f8118SMike Christie 	 */
949394f8118SMike Christie 	param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
950394f8118SMike Christie 	param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
951394f8118SMike Christie 	param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
952394f8118SMike Christie 	param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
953394f8118SMike Christie 	param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
954394f8118SMike Christie 	param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
955394f8118SMike Christie }
956394f8118SMike Christie 
957394f8118SMike Christie static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
958394f8118SMike Christie 					  unsigned char *param_data)
959394f8118SMike Christie {
960394f8118SMike Christie 	struct se_device *dev = cmd->se_dev;
961394f8118SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
962394f8118SMike Christie 	struct block_device *bdev = ib_dev->ibd_bd;
963394f8118SMike Christie 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
964394f8118SMike Christie 	int i, len, paths, data_offset;
965394f8118SMike Christie 	struct pr_keys *keys;
966394f8118SMike Christie 	sense_reason_t ret;
967394f8118SMike Christie 
968394f8118SMike Christie 	if (!ops) {
969394f8118SMike Christie 		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
970394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
971394f8118SMike Christie 	}
972394f8118SMike Christie 
973394f8118SMike Christie 	if (!ops->pr_read_keys) {
974394f8118SMike Christie 		pr_err("Block device does not support read_keys.\n");
975394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
976394f8118SMike Christie 	}
977394f8118SMike Christie 
978394f8118SMike Christie 	/*
979394f8118SMike Christie 	 * We don't know what's under us, but dm-multipath will register every
980394f8118SMike Christie 	 * path with the same key, so start off with enough space for 16 paths.
981394f8118SMike Christie 	 * which is not a lot of memory and should normally be enough.
982394f8118SMike Christie 	 */
983394f8118SMike Christie 	paths = 16;
984394f8118SMike Christie retry:
985394f8118SMike Christie 	len = 8 * paths;
986394f8118SMike Christie 	keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
987394f8118SMike Christie 	if (!keys)
988394f8118SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
989394f8118SMike Christie 
990394f8118SMike Christie 	keys->num_keys = paths;
991394f8118SMike Christie 	if (!ops->pr_read_keys(bdev, keys)) {
992394f8118SMike Christie 		if (keys->num_keys > paths) {
993394f8118SMike Christie 			kfree(keys);
994394f8118SMike Christie 			paths *= 2;
995394f8118SMike Christie 			goto retry;
996394f8118SMike Christie 		}
997394f8118SMike Christie 	} else {
998394f8118SMike Christie 		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
999394f8118SMike Christie 		goto free_keys;
1000394f8118SMike Christie 	}
1001394f8118SMike Christie 
1002394f8118SMike Christie 	ret = TCM_NO_SENSE;
1003394f8118SMike Christie 
1004394f8118SMike Christie 	put_unaligned_be32(keys->generation, &param_data[0]);
1005394f8118SMike Christie 	if (!keys->num_keys) {
1006394f8118SMike Christie 		put_unaligned_be32(0, &param_data[4]);
1007394f8118SMike Christie 		goto free_keys;
1008394f8118SMike Christie 	}
1009394f8118SMike Christie 
1010394f8118SMike Christie 	put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
1011394f8118SMike Christie 
1012394f8118SMike Christie 	data_offset = 8;
1013394f8118SMike Christie 	for (i = 0; i < keys->num_keys; i++) {
1014394f8118SMike Christie 		if (data_offset + 8 > cmd->data_length)
1015394f8118SMike Christie 			break;
1016394f8118SMike Christie 
1017394f8118SMike Christie 		put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
1018394f8118SMike Christie 		data_offset += 8;
1019394f8118SMike Christie 	}
1020394f8118SMike Christie 
1021394f8118SMike Christie free_keys:
1022394f8118SMike Christie 	kfree(keys);
1023394f8118SMike Christie 	return ret;
1024394f8118SMike Christie }
1025394f8118SMike Christie 
1026394f8118SMike Christie static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
1027394f8118SMike Christie 						 unsigned char *param_data)
1028394f8118SMike Christie {
1029394f8118SMike Christie 	struct se_device *dev = cmd->se_dev;
1030394f8118SMike Christie 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1031394f8118SMike Christie 	struct block_device *bdev = ib_dev->ibd_bd;
1032394f8118SMike Christie 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
1033394f8118SMike Christie 	struct pr_held_reservation rsv = { };
1034394f8118SMike Christie 
1035394f8118SMike Christie 	if (!ops) {
1036394f8118SMike Christie 		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
1037394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
1038394f8118SMike Christie 	}
1039394f8118SMike Christie 
1040394f8118SMike Christie 	if (!ops->pr_read_reservation) {
1041394f8118SMike Christie 		pr_err("Block device does not support read_keys.\n");
1042394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
1043394f8118SMike Christie 	}
1044394f8118SMike Christie 
1045394f8118SMike Christie 	if (ops->pr_read_reservation(bdev, &rsv))
1046394f8118SMike Christie 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1047394f8118SMike Christie 
1048394f8118SMike Christie 	put_unaligned_be32(rsv.generation, &param_data[0]);
1049394f8118SMike Christie 	if (!block_pr_type_to_scsi(rsv.type)) {
1050394f8118SMike Christie 		put_unaligned_be32(0, &param_data[4]);
1051394f8118SMike Christie 		return TCM_NO_SENSE;
1052394f8118SMike Christie 	}
1053394f8118SMike Christie 
1054394f8118SMike Christie 	put_unaligned_be32(16, &param_data[4]);
1055394f8118SMike Christie 
1056394f8118SMike Christie 	if (cmd->data_length < 16)
1057394f8118SMike Christie 		return TCM_NO_SENSE;
1058394f8118SMike Christie 	put_unaligned_be64(rsv.key, &param_data[8]);
1059394f8118SMike Christie 
1060394f8118SMike Christie 	if (cmd->data_length < 22)
1061394f8118SMike Christie 		return TCM_NO_SENSE;
1062394f8118SMike Christie 	param_data[21] = block_pr_type_to_scsi(rsv.type);
1063394f8118SMike Christie 
1064394f8118SMike Christie 	return TCM_NO_SENSE;
1065394f8118SMike Christie }
1066394f8118SMike Christie 
1067394f8118SMike Christie static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
1068394f8118SMike Christie 					   unsigned char *param_data)
1069394f8118SMike Christie {
1070394f8118SMike Christie 	sense_reason_t ret = TCM_NO_SENSE;
1071394f8118SMike Christie 
1072394f8118SMike Christie 	switch (sa) {
1073394f8118SMike Christie 	case PRI_REPORT_CAPABILITIES:
1074394f8118SMike Christie 		iblock_pr_report_caps(param_data);
1075394f8118SMike Christie 		break;
1076394f8118SMike Christie 	case PRI_READ_KEYS:
1077394f8118SMike Christie 		ret = iblock_pr_read_keys(cmd, param_data);
1078394f8118SMike Christie 		break;
1079394f8118SMike Christie 	case PRI_READ_RESERVATION:
1080394f8118SMike Christie 		ret = iblock_pr_read_reservation(cmd, param_data);
1081394f8118SMike Christie 		break;
1082394f8118SMike Christie 	default:
1083394f8118SMike Christie 		pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
1084394f8118SMike Christie 		return TCM_UNSUPPORTED_SCSI_OPCODE;
1085394f8118SMike Christie 	}
1086394f8118SMike Christie 
1087394f8118SMike Christie 	return ret;
1088394f8118SMike Christie }
1089394f8118SMike Christie 
10907f7caf6aSAndy Grover static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
10917f7caf6aSAndy Grover {
10927f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
10937f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
10947f7caf6aSAndy Grover 	int ret;
10957f7caf6aSAndy Grover 
10967f7caf6aSAndy Grover 	ret = bdev_alignment_offset(bd);
10977f7caf6aSAndy Grover 	if (ret == -1)
10987f7caf6aSAndy Grover 		return 0;
10997f7caf6aSAndy Grover 
11007f7caf6aSAndy Grover 	/* convert offset-bytes to offset-lbas */
11017f7caf6aSAndy Grover 	return ret / bdev_logical_block_size(bd);
11027f7caf6aSAndy Grover }
11037f7caf6aSAndy Grover 
11047f7caf6aSAndy Grover static unsigned int iblock_get_lbppbe(struct se_device *dev)
11057f7caf6aSAndy Grover {
11067f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
11077f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
1108c151eddbSChaitanya Kulkarni 	unsigned int logs_per_phys =
1109a2c6c6a3SChaitanya Kulkarni 		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
11107f7caf6aSAndy Grover 
11117f7caf6aSAndy Grover 	return ilog2(logs_per_phys);
11127f7caf6aSAndy Grover }
11137f7caf6aSAndy Grover 
11147f7caf6aSAndy Grover static unsigned int iblock_get_io_min(struct se_device *dev)
11157f7caf6aSAndy Grover {
11167f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
11177f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
11187f7caf6aSAndy Grover 
11197f7caf6aSAndy Grover 	return bdev_io_min(bd);
11207f7caf6aSAndy Grover }
11217f7caf6aSAndy Grover 
11227f7caf6aSAndy Grover static unsigned int iblock_get_io_opt(struct se_device *dev)
11237f7caf6aSAndy Grover {
11247f7caf6aSAndy Grover 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
11257f7caf6aSAndy Grover 	struct block_device *bd = ib_dev->ibd_bd;
11267f7caf6aSAndy Grover 
11277f7caf6aSAndy Grover 	return bdev_io_opt(bd);
11287f7caf6aSAndy Grover }
11297f7caf6aSAndy Grover 
11300217da08SMike Christie static struct exec_cmd_ops iblock_exec_cmd_ops = {
11310c2ad7d1SChristoph Hellwig 	.execute_rw		= iblock_execute_rw,
1132ad67f0d9SChristoph Hellwig 	.execute_sync_cache	= iblock_execute_sync_cache,
11336f974e8cSChristoph Hellwig 	.execute_write_same	= iblock_execute_write_same,
113414150a6bSChristoph Hellwig 	.execute_unmap		= iblock_execute_unmap,
1135394f8118SMike Christie 	.execute_pr_out		= iblock_execute_pr_out,
1136394f8118SMike Christie 	.execute_pr_in		= iblock_execute_pr_in,
11370c2ad7d1SChristoph Hellwig };
11380c2ad7d1SChristoph Hellwig 
1139de103c93SChristoph Hellwig static sense_reason_t
1140de103c93SChristoph Hellwig iblock_parse_cdb(struct se_cmd *cmd)
11410c2ad7d1SChristoph Hellwig {
11420217da08SMike Christie 	return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
11430c2ad7d1SChristoph Hellwig }
11440c2ad7d1SChristoph Hellwig 
1145452e2010SRashika Kheria static bool iblock_get_write_cache(struct se_device *dev)
1146d0c8b259SNicholas Bellinger {
114708e688fdSChristoph Hellwig 	return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
1148d0c8b259SNicholas Bellinger }
1149d0c8b259SNicholas Bellinger 
11500a06d430SChristoph Hellwig static const struct target_backend_ops iblock_ops = {
1151c66ac9dbSNicholas Bellinger 	.name			= "iblock",
11520fd97ccfSChristoph Hellwig 	.inquiry_prod		= "IBLOCK",
1153394f8118SMike Christie 	.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
11540fd97ccfSChristoph Hellwig 	.inquiry_rev		= IBLOCK_VERSION,
1155c66ac9dbSNicholas Bellinger 	.owner			= THIS_MODULE,
1156c66ac9dbSNicholas Bellinger 	.attach_hba		= iblock_attach_hba,
1157c66ac9dbSNicholas Bellinger 	.detach_hba		= iblock_detach_hba,
11580fd97ccfSChristoph Hellwig 	.alloc_device		= iblock_alloc_device,
11590fd97ccfSChristoph Hellwig 	.configure_device	= iblock_configure_device,
116092634706SMike Christie 	.destroy_device		= iblock_destroy_device,
1161c66ac9dbSNicholas Bellinger 	.free_device		= iblock_free_device,
1162d7c382c5SMike Christie 	.configure_unmap	= iblock_configure_unmap,
1163415ccd98SMike Christie 	.plug_device		= iblock_plug_device,
1164415ccd98SMike Christie 	.unplug_device		= iblock_unplug_device,
11650c2ad7d1SChristoph Hellwig 	.parse_cdb		= iblock_parse_cdb,
1166c66ac9dbSNicholas Bellinger 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
1167c66ac9dbSNicholas Bellinger 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
11686f23ac8aSChristoph Hellwig 	.get_device_type	= sbc_get_device_type,
1169c66ac9dbSNicholas Bellinger 	.get_blocks		= iblock_get_blocks,
11707f7caf6aSAndy Grover 	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
11717f7caf6aSAndy Grover 	.get_lbppbe		= iblock_get_lbppbe,
11727f7caf6aSAndy Grover 	.get_io_min		= iblock_get_io_min,
11737f7caf6aSAndy Grover 	.get_io_opt		= iblock_get_io_opt,
1174d0c8b259SNicholas Bellinger 	.get_write_cache	= iblock_get_write_cache,
11755873c4d1SChristoph Hellwig 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
1176c66ac9dbSNicholas Bellinger };
1177c66ac9dbSNicholas Bellinger 
1178c66ac9dbSNicholas Bellinger static int __init iblock_module_init(void)
1179c66ac9dbSNicholas Bellinger {
11800a06d430SChristoph Hellwig 	return transport_backend_register(&iblock_ops);
1181c66ac9dbSNicholas Bellinger }
1182c66ac9dbSNicholas Bellinger 
118363b91d5aSAsias He static void __exit iblock_module_exit(void)
1184c66ac9dbSNicholas Bellinger {
11850a06d430SChristoph Hellwig 	target_backend_unregister(&iblock_ops);
1186c66ac9dbSNicholas Bellinger }
1187c66ac9dbSNicholas Bellinger 
1188c66ac9dbSNicholas Bellinger MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
1189c66ac9dbSNicholas Bellinger MODULE_AUTHOR("nab@Linux-iSCSI.org");
1190c66ac9dbSNicholas Bellinger MODULE_LICENSE("GPL");
1191c66ac9dbSNicholas Bellinger 
1192c66ac9dbSNicholas Bellinger module_init(iblock_module_init);
1193c66ac9dbSNicholas Bellinger module_exit(iblock_module_exit);
1194