1c66ac9dbSNicholas Bellinger /*******************************************************************************
2c66ac9dbSNicholas Bellinger  * Filename:  target_core_iblock.c
3c66ac9dbSNicholas Bellinger  *
4c66ac9dbSNicholas Bellinger  * This file contains the Storage Engine  <-> Linux BlockIO transport
5c66ac9dbSNicholas Bellinger  * specific functions.
6c66ac9dbSNicholas Bellinger  *
7c66ac9dbSNicholas Bellinger  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8c66ac9dbSNicholas Bellinger  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9c66ac9dbSNicholas Bellinger  * Copyright (c) 2007-2010 Rising Tide Systems
10c66ac9dbSNicholas Bellinger  * Copyright (c) 2008-2010 Linux-iSCSI.org
11c66ac9dbSNicholas Bellinger  *
12c66ac9dbSNicholas Bellinger  * Nicholas A. Bellinger <nab@kernel.org>
13c66ac9dbSNicholas Bellinger  *
14c66ac9dbSNicholas Bellinger  * This program is free software; you can redistribute it and/or modify
15c66ac9dbSNicholas Bellinger  * it under the terms of the GNU General Public License as published by
16c66ac9dbSNicholas Bellinger  * the Free Software Foundation; either version 2 of the License, or
17c66ac9dbSNicholas Bellinger  * (at your option) any later version.
18c66ac9dbSNicholas Bellinger  *
19c66ac9dbSNicholas Bellinger  * This program is distributed in the hope that it will be useful,
20c66ac9dbSNicholas Bellinger  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21c66ac9dbSNicholas Bellinger  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22c66ac9dbSNicholas Bellinger  * GNU General Public License for more details.
23c66ac9dbSNicholas Bellinger  *
24c66ac9dbSNicholas Bellinger  * You should have received a copy of the GNU General Public License
25c66ac9dbSNicholas Bellinger  * along with this program; if not, write to the Free Software
26c66ac9dbSNicholas Bellinger  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27c66ac9dbSNicholas Bellinger  *
28c66ac9dbSNicholas Bellinger  ******************************************************************************/
29c66ac9dbSNicholas Bellinger 
30c66ac9dbSNicholas Bellinger #include <linux/string.h>
31c66ac9dbSNicholas Bellinger #include <linux/parser.h>
32c66ac9dbSNicholas Bellinger #include <linux/timer.h>
33c66ac9dbSNicholas Bellinger #include <linux/fs.h>
34c66ac9dbSNicholas Bellinger #include <linux/blkdev.h>
35c66ac9dbSNicholas Bellinger #include <linux/slab.h>
36c66ac9dbSNicholas Bellinger #include <linux/spinlock.h>
37c66ac9dbSNicholas Bellinger #include <linux/bio.h>
38c66ac9dbSNicholas Bellinger #include <linux/genhd.h>
39c66ac9dbSNicholas Bellinger #include <linux/file.h>
40827509e3SPaul Gortmaker #include <linux/module.h>
41c66ac9dbSNicholas Bellinger #include <scsi/scsi.h>
42c66ac9dbSNicholas Bellinger #include <scsi/scsi_host.h>
4314150a6bSChristoph Hellwig #include <asm/unaligned.h>
44c66ac9dbSNicholas Bellinger 
45c66ac9dbSNicholas Bellinger #include <target/target_core_base.h>
46c4795fb2SChristoph Hellwig #include <target/target_core_backend.h>
47c66ac9dbSNicholas Bellinger 
48c66ac9dbSNicholas Bellinger #include "target_core_iblock.h"
49c66ac9dbSNicholas Bellinger 
50d5b4a21bSChristoph Hellwig #define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
51d5b4a21bSChristoph Hellwig #define IBLOCK_BIO_POOL_SIZE	128
52d5b4a21bSChristoph Hellwig 
53c66ac9dbSNicholas Bellinger static struct se_subsystem_api iblock_template;
54c66ac9dbSNicholas Bellinger 
55c66ac9dbSNicholas Bellinger static void iblock_bio_done(struct bio *, int);
56c66ac9dbSNicholas Bellinger 
57c66ac9dbSNicholas Bellinger /*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
58c66ac9dbSNicholas Bellinger  *
59c66ac9dbSNicholas Bellinger  *
60c66ac9dbSNicholas Bellinger  */
61c66ac9dbSNicholas Bellinger static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
62c66ac9dbSNicholas Bellinger {
636708bb27SAndy Grover 	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
64c66ac9dbSNicholas Bellinger 		" Generic Target Core Stack %s\n", hba->hba_id,
65c66ac9dbSNicholas Bellinger 		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
66c66ac9dbSNicholas Bellinger 	return 0;
67c66ac9dbSNicholas Bellinger }
68c66ac9dbSNicholas Bellinger 
69c66ac9dbSNicholas Bellinger static void iblock_detach_hba(struct se_hba *hba)
70c66ac9dbSNicholas Bellinger {
71c66ac9dbSNicholas Bellinger }
72c66ac9dbSNicholas Bellinger 
73c66ac9dbSNicholas Bellinger static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
74c66ac9dbSNicholas Bellinger {
75c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = NULL;
76c66ac9dbSNicholas Bellinger 
77c66ac9dbSNicholas Bellinger 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
786708bb27SAndy Grover 	if (!ib_dev) {
796708bb27SAndy Grover 		pr_err("Unable to allocate struct iblock_dev\n");
80c66ac9dbSNicholas Bellinger 		return NULL;
81c66ac9dbSNicholas Bellinger 	}
82c66ac9dbSNicholas Bellinger 
836708bb27SAndy Grover 	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
84c66ac9dbSNicholas Bellinger 
85c66ac9dbSNicholas Bellinger 	return ib_dev;
86c66ac9dbSNicholas Bellinger }
87c66ac9dbSNicholas Bellinger 
88c66ac9dbSNicholas Bellinger static struct se_device *iblock_create_virtdevice(
89c66ac9dbSNicholas Bellinger 	struct se_hba *hba,
90c66ac9dbSNicholas Bellinger 	struct se_subsystem_dev *se_dev,
91c66ac9dbSNicholas Bellinger 	void *p)
92c66ac9dbSNicholas Bellinger {
93c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = p;
94c66ac9dbSNicholas Bellinger 	struct se_device *dev;
95c66ac9dbSNicholas Bellinger 	struct se_dev_limits dev_limits;
96c66ac9dbSNicholas Bellinger 	struct block_device *bd = NULL;
97c66ac9dbSNicholas Bellinger 	struct request_queue *q;
98c66ac9dbSNicholas Bellinger 	struct queue_limits *limits;
99c66ac9dbSNicholas Bellinger 	u32 dev_flags = 0;
10044bfd018SAndy Grover 	fmode_t mode;
101613640e4SNicholas Bellinger 	int ret = -EINVAL;
102c66ac9dbSNicholas Bellinger 
1036708bb27SAndy Grover 	if (!ib_dev) {
1046708bb27SAndy Grover 		pr_err("Unable to locate struct iblock_dev parameter\n");
105613640e4SNicholas Bellinger 		return ERR_PTR(ret);
106c66ac9dbSNicholas Bellinger 	}
107c66ac9dbSNicholas Bellinger 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
108d5b4a21bSChristoph Hellwig 
109d5b4a21bSChristoph Hellwig 	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
1106708bb27SAndy Grover 	if (!ib_dev->ibd_bio_set) {
1116708bb27SAndy Grover 		pr_err("IBLOCK: Unable to create bioset()\n");
112613640e4SNicholas Bellinger 		return ERR_PTR(-ENOMEM);
113c66ac9dbSNicholas Bellinger 	}
1146708bb27SAndy Grover 	pr_debug("IBLOCK: Created bio_set()\n");
115c66ac9dbSNicholas Bellinger 	/*
116c66ac9dbSNicholas Bellinger 	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
117c66ac9dbSNicholas Bellinger 	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
118c66ac9dbSNicholas Bellinger 	 */
1196708bb27SAndy Grover 	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
120c66ac9dbSNicholas Bellinger 			ib_dev->ibd_udev_path);
121c66ac9dbSNicholas Bellinger 
12244bfd018SAndy Grover 	mode = FMODE_READ|FMODE_EXCL;
12344bfd018SAndy Grover 	if (!ib_dev->ibd_readonly)
12444bfd018SAndy Grover 		mode |= FMODE_WRITE;
12544bfd018SAndy Grover 
12644bfd018SAndy Grover 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
127613640e4SNicholas Bellinger 	if (IS_ERR(bd)) {
128613640e4SNicholas Bellinger 		ret = PTR_ERR(bd);
129c66ac9dbSNicholas Bellinger 		goto failed;
130613640e4SNicholas Bellinger 	}
131c66ac9dbSNicholas Bellinger 	/*
132c66ac9dbSNicholas Bellinger 	 * Setup the local scope queue_limits from struct request_queue->limits
133c66ac9dbSNicholas Bellinger 	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
134c66ac9dbSNicholas Bellinger 	 */
135c66ac9dbSNicholas Bellinger 	q = bdev_get_queue(bd);
136c66ac9dbSNicholas Bellinger 	limits = &dev_limits.limits;
137c66ac9dbSNicholas Bellinger 	limits->logical_block_size = bdev_logical_block_size(bd);
138d5b4a21bSChristoph Hellwig 	limits->max_hw_sectors = UINT_MAX;
139d5b4a21bSChristoph Hellwig 	limits->max_sectors = UINT_MAX;
1408f3d14e2SNicholas Bellinger 	dev_limits.hw_queue_depth = q->nr_requests;
1418f3d14e2SNicholas Bellinger 	dev_limits.queue_depth = q->nr_requests;
142c66ac9dbSNicholas Bellinger 
143c66ac9dbSNicholas Bellinger 	ib_dev->ibd_bd = bd;
144c66ac9dbSNicholas Bellinger 
145c66ac9dbSNicholas Bellinger 	dev = transport_add_device_to_core_hba(hba,
1465951146dSAndy Grover 			&iblock_template, se_dev, dev_flags, ib_dev,
147c66ac9dbSNicholas Bellinger 			&dev_limits, "IBLOCK", IBLOCK_VERSION);
1486708bb27SAndy Grover 	if (!dev)
149c66ac9dbSNicholas Bellinger 		goto failed;
150c66ac9dbSNicholas Bellinger 
151c66ac9dbSNicholas Bellinger 	/*
152c66ac9dbSNicholas Bellinger 	 * Check if the underlying struct block_device request_queue supports
153c66ac9dbSNicholas Bellinger 	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
154c66ac9dbSNicholas Bellinger 	 * in ATA and we need to set TPE=1
155c66ac9dbSNicholas Bellinger 	 */
156613640e4SNicholas Bellinger 	if (blk_queue_discard(q)) {
157e3d6f909SAndy Grover 		dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
158c66ac9dbSNicholas Bellinger 				q->limits.max_discard_sectors;
159c66ac9dbSNicholas Bellinger 		/*
160c66ac9dbSNicholas Bellinger 		 * Currently hardcoded to 1 in Linux/SCSI code..
161c66ac9dbSNicholas Bellinger 		 */
162e3d6f909SAndy Grover 		dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
163e3d6f909SAndy Grover 		dev->se_sub_dev->se_dev_attrib.unmap_granularity =
1647347b5ffSMarco Sanvido 				q->limits.discard_granularity >> 9;
165e3d6f909SAndy Grover 		dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
166c66ac9dbSNicholas Bellinger 				q->limits.discard_alignment;
167c66ac9dbSNicholas Bellinger 
1686708bb27SAndy Grover 		pr_debug("IBLOCK: BLOCK Discard support available,"
169c66ac9dbSNicholas Bellinger 				" disabled by default\n");
170c66ac9dbSNicholas Bellinger 	}
171c66ac9dbSNicholas Bellinger 
172e22a7f07SRoland Dreier 	if (blk_queue_nonrot(q))
173e22a7f07SRoland Dreier 		dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
174e22a7f07SRoland Dreier 
175c66ac9dbSNicholas Bellinger 	return dev;
176c66ac9dbSNicholas Bellinger 
177c66ac9dbSNicholas Bellinger failed:
178c66ac9dbSNicholas Bellinger 	if (ib_dev->ibd_bio_set) {
179c66ac9dbSNicholas Bellinger 		bioset_free(ib_dev->ibd_bio_set);
180c66ac9dbSNicholas Bellinger 		ib_dev->ibd_bio_set = NULL;
181c66ac9dbSNicholas Bellinger 	}
182c66ac9dbSNicholas Bellinger 	ib_dev->ibd_bd = NULL;
183613640e4SNicholas Bellinger 	return ERR_PTR(ret);
184c66ac9dbSNicholas Bellinger }
185c66ac9dbSNicholas Bellinger 
186c66ac9dbSNicholas Bellinger static void iblock_free_device(void *p)
187c66ac9dbSNicholas Bellinger {
188c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = p;
189c66ac9dbSNicholas Bellinger 
190bc665524SNicholas Bellinger 	if (ib_dev->ibd_bd != NULL)
191c66ac9dbSNicholas Bellinger 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
192bc665524SNicholas Bellinger 	if (ib_dev->ibd_bio_set != NULL)
193c66ac9dbSNicholas Bellinger 		bioset_free(ib_dev->ibd_bio_set);
194c66ac9dbSNicholas Bellinger 	kfree(ib_dev);
195c66ac9dbSNicholas Bellinger }
196c66ac9dbSNicholas Bellinger 
197c66ac9dbSNicholas Bellinger static unsigned long long iblock_emulate_read_cap_with_block_size(
198c66ac9dbSNicholas Bellinger 	struct se_device *dev,
199c66ac9dbSNicholas Bellinger 	struct block_device *bd,
200c66ac9dbSNicholas Bellinger 	struct request_queue *q)
201c66ac9dbSNicholas Bellinger {
202c66ac9dbSNicholas Bellinger 	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
203c66ac9dbSNicholas Bellinger 					bdev_logical_block_size(bd)) - 1);
204c66ac9dbSNicholas Bellinger 	u32 block_size = bdev_logical_block_size(bd);
205c66ac9dbSNicholas Bellinger 
206e3d6f909SAndy Grover 	if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
207c66ac9dbSNicholas Bellinger 		return blocks_long;
208c66ac9dbSNicholas Bellinger 
209c66ac9dbSNicholas Bellinger 	switch (block_size) {
210c66ac9dbSNicholas Bellinger 	case 4096:
211e3d6f909SAndy Grover 		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
212c66ac9dbSNicholas Bellinger 		case 2048:
213c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
214c66ac9dbSNicholas Bellinger 			break;
215c66ac9dbSNicholas Bellinger 		case 1024:
216c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
217c66ac9dbSNicholas Bellinger 			break;
218c66ac9dbSNicholas Bellinger 		case 512:
219c66ac9dbSNicholas Bellinger 			blocks_long <<= 3;
220c66ac9dbSNicholas Bellinger 		default:
221c66ac9dbSNicholas Bellinger 			break;
222c66ac9dbSNicholas Bellinger 		}
223c66ac9dbSNicholas Bellinger 		break;
224c66ac9dbSNicholas Bellinger 	case 2048:
225e3d6f909SAndy Grover 		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
226c66ac9dbSNicholas Bellinger 		case 4096:
227c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
228c66ac9dbSNicholas Bellinger 			break;
229c66ac9dbSNicholas Bellinger 		case 1024:
230c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
231c66ac9dbSNicholas Bellinger 			break;
232c66ac9dbSNicholas Bellinger 		case 512:
233c66ac9dbSNicholas Bellinger 			blocks_long <<= 2;
234c66ac9dbSNicholas Bellinger 			break;
235c66ac9dbSNicholas Bellinger 		default:
236c66ac9dbSNicholas Bellinger 			break;
237c66ac9dbSNicholas Bellinger 		}
238c66ac9dbSNicholas Bellinger 		break;
239c66ac9dbSNicholas Bellinger 	case 1024:
240e3d6f909SAndy Grover 		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
241c66ac9dbSNicholas Bellinger 		case 4096:
242c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
243c66ac9dbSNicholas Bellinger 			break;
244c66ac9dbSNicholas Bellinger 		case 2048:
245c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
246c66ac9dbSNicholas Bellinger 			break;
247c66ac9dbSNicholas Bellinger 		case 512:
248c66ac9dbSNicholas Bellinger 			blocks_long <<= 1;
249c66ac9dbSNicholas Bellinger 			break;
250c66ac9dbSNicholas Bellinger 		default:
251c66ac9dbSNicholas Bellinger 			break;
252c66ac9dbSNicholas Bellinger 		}
253c66ac9dbSNicholas Bellinger 		break;
254c66ac9dbSNicholas Bellinger 	case 512:
255e3d6f909SAndy Grover 		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
256c66ac9dbSNicholas Bellinger 		case 4096:
257c66ac9dbSNicholas Bellinger 			blocks_long >>= 3;
258c66ac9dbSNicholas Bellinger 			break;
259c66ac9dbSNicholas Bellinger 		case 2048:
260c66ac9dbSNicholas Bellinger 			blocks_long >>= 2;
261c66ac9dbSNicholas Bellinger 			break;
262c66ac9dbSNicholas Bellinger 		case 1024:
263c66ac9dbSNicholas Bellinger 			blocks_long >>= 1;
264c66ac9dbSNicholas Bellinger 			break;
265c66ac9dbSNicholas Bellinger 		default:
266c66ac9dbSNicholas Bellinger 			break;
267c66ac9dbSNicholas Bellinger 		}
268c66ac9dbSNicholas Bellinger 		break;
269c66ac9dbSNicholas Bellinger 	default:
270c66ac9dbSNicholas Bellinger 		break;
271c66ac9dbSNicholas Bellinger 	}
272c66ac9dbSNicholas Bellinger 
273c66ac9dbSNicholas Bellinger 	return blocks_long;
274c66ac9dbSNicholas Bellinger }
275c66ac9dbSNicholas Bellinger 
276df5fa691SChristoph Hellwig static void iblock_end_io_flush(struct bio *bio, int err)
277df5fa691SChristoph Hellwig {
278df5fa691SChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
279df5fa691SChristoph Hellwig 
280df5fa691SChristoph Hellwig 	if (err)
281df5fa691SChristoph Hellwig 		pr_err("IBLOCK: cache flush failed: %d\n", err);
282df5fa691SChristoph Hellwig 
2835787cacdSChristoph Hellwig 	if (cmd) {
2845787cacdSChristoph Hellwig 		if (err) {
2855787cacdSChristoph Hellwig 			cmd->scsi_sense_reason =
2865787cacdSChristoph Hellwig 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2875787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
2885787cacdSChristoph Hellwig 		} else {
2895787cacdSChristoph Hellwig 			target_complete_cmd(cmd, SAM_STAT_GOOD);
2905787cacdSChristoph Hellwig 		}
2915787cacdSChristoph Hellwig 	}
2925787cacdSChristoph Hellwig 
293df5fa691SChristoph Hellwig 	bio_put(bio);
294df5fa691SChristoph Hellwig }
295df5fa691SChristoph Hellwig 
296c66ac9dbSNicholas Bellinger /*
297df5fa691SChristoph Hellwig  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
298df5fa691SChristoph Hellwig  * always flush the whole cache.
299c66ac9dbSNicholas Bellinger  */
300ad67f0d9SChristoph Hellwig static int iblock_execute_sync_cache(struct se_cmd *cmd)
301c66ac9dbSNicholas Bellinger {
302c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
303a1d8b49aSAndy Grover 	int immed = (cmd->t_task_cdb[1] & 0x2);
304df5fa691SChristoph Hellwig 	struct bio *bio;
305c66ac9dbSNicholas Bellinger 
306c66ac9dbSNicholas Bellinger 	/*
307c66ac9dbSNicholas Bellinger 	 * If the Immediate bit is set, queue up the GOOD response
308df5fa691SChristoph Hellwig 	 * for this SYNCHRONIZE_CACHE op.
309c66ac9dbSNicholas Bellinger 	 */
310c66ac9dbSNicholas Bellinger 	if (immed)
3115787cacdSChristoph Hellwig 		target_complete_cmd(cmd, SAM_STAT_GOOD);
312c66ac9dbSNicholas Bellinger 
313df5fa691SChristoph Hellwig 	bio = bio_alloc(GFP_KERNEL, 0);
314df5fa691SChristoph Hellwig 	bio->bi_end_io = iblock_end_io_flush;
315df5fa691SChristoph Hellwig 	bio->bi_bdev = ib_dev->ibd_bd;
316c66ac9dbSNicholas Bellinger 	if (!immed)
317df5fa691SChristoph Hellwig 		bio->bi_private = cmd;
318df5fa691SChristoph Hellwig 	submit_bio(WRITE_FLUSH, bio);
319ad67f0d9SChristoph Hellwig 	return 0;
320c66ac9dbSNicholas Bellinger }
321c66ac9dbSNicholas Bellinger 
32214150a6bSChristoph Hellwig static int iblock_execute_unmap(struct se_cmd *cmd)
323c66ac9dbSNicholas Bellinger {
32414150a6bSChristoph Hellwig 	struct se_device *dev = cmd->se_dev;
325c66ac9dbSNicholas Bellinger 	struct iblock_dev *ibd = dev->dev_ptr;
32614150a6bSChristoph Hellwig 	unsigned char *buf, *ptr = NULL;
32714150a6bSChristoph Hellwig 	unsigned char *cdb = &cmd->t_task_cdb[0];
32814150a6bSChristoph Hellwig 	sector_t lba;
32914150a6bSChristoph Hellwig 	unsigned int size = cmd->data_length, range;
33014150a6bSChristoph Hellwig 	int ret = 0, offset;
33114150a6bSChristoph Hellwig 	unsigned short dl, bd_dl;
332c66ac9dbSNicholas Bellinger 
33314150a6bSChristoph Hellwig 	/* First UNMAP block descriptor starts at 8 byte offset */
33414150a6bSChristoph Hellwig 	offset = 8;
33514150a6bSChristoph Hellwig 	size -= 8;
33614150a6bSChristoph Hellwig 	dl = get_unaligned_be16(&cdb[0]);
33714150a6bSChristoph Hellwig 	bd_dl = get_unaligned_be16(&cdb[2]);
33814150a6bSChristoph Hellwig 
33914150a6bSChristoph Hellwig 	buf = transport_kmap_data_sg(cmd);
34014150a6bSChristoph Hellwig 
34114150a6bSChristoph Hellwig 	ptr = &buf[offset];
34214150a6bSChristoph Hellwig 	pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
34314150a6bSChristoph Hellwig 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
34414150a6bSChristoph Hellwig 
34514150a6bSChristoph Hellwig 	while (size) {
34614150a6bSChristoph Hellwig 		lba = get_unaligned_be64(&ptr[0]);
34714150a6bSChristoph Hellwig 		range = get_unaligned_be32(&ptr[8]);
34814150a6bSChristoph Hellwig 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
34914150a6bSChristoph Hellwig 				 (unsigned long long)lba, range);
35014150a6bSChristoph Hellwig 
35114150a6bSChristoph Hellwig 		ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
35214150a6bSChristoph Hellwig 					   GFP_KERNEL, 0);
35314150a6bSChristoph Hellwig 		if (ret < 0) {
35414150a6bSChristoph Hellwig 			pr_err("blkdev_issue_discard() failed: %d\n",
35514150a6bSChristoph Hellwig 					ret);
35614150a6bSChristoph Hellwig 			goto err;
35714150a6bSChristoph Hellwig 		}
35814150a6bSChristoph Hellwig 
35914150a6bSChristoph Hellwig 		ptr += 16;
36014150a6bSChristoph Hellwig 		size -= 16;
36114150a6bSChristoph Hellwig 	}
36214150a6bSChristoph Hellwig 
36314150a6bSChristoph Hellwig err:
36414150a6bSChristoph Hellwig 	transport_kunmap_data_sg(cmd);
36514150a6bSChristoph Hellwig 	if (!ret)
36614150a6bSChristoph Hellwig 		target_complete_cmd(cmd, GOOD);
36714150a6bSChristoph Hellwig 	return ret;
368c66ac9dbSNicholas Bellinger }
369c66ac9dbSNicholas Bellinger 
3706f974e8cSChristoph Hellwig static int iblock_execute_write_same(struct se_cmd *cmd)
3716f974e8cSChristoph Hellwig {
3726f974e8cSChristoph Hellwig 	struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
3736f974e8cSChristoph Hellwig 	int ret;
3746f974e8cSChristoph Hellwig 
3756f974e8cSChristoph Hellwig 	ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
3766f974e8cSChristoph Hellwig 				   spc_get_write_same_sectors(cmd), GFP_KERNEL,
3776f974e8cSChristoph Hellwig 				   0);
3786f974e8cSChristoph Hellwig 	if (ret < 0) {
3796f974e8cSChristoph Hellwig 		pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
3806f974e8cSChristoph Hellwig 		return ret;
3816f974e8cSChristoph Hellwig 	}
3826f974e8cSChristoph Hellwig 
3836f974e8cSChristoph Hellwig 	target_complete_cmd(cmd, GOOD);
3846f974e8cSChristoph Hellwig 	return 0;
3856f974e8cSChristoph Hellwig }
3866f974e8cSChristoph Hellwig 
387c66ac9dbSNicholas Bellinger enum {
38844bfd018SAndy Grover 	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
389c66ac9dbSNicholas Bellinger };
390c66ac9dbSNicholas Bellinger 
391c66ac9dbSNicholas Bellinger static match_table_t tokens = {
392c66ac9dbSNicholas Bellinger 	{Opt_udev_path, "udev_path=%s"},
39344bfd018SAndy Grover 	{Opt_readonly, "readonly=%d"},
394c66ac9dbSNicholas Bellinger 	{Opt_force, "force=%d"},
395c66ac9dbSNicholas Bellinger 	{Opt_err, NULL}
396c66ac9dbSNicholas Bellinger };
397c66ac9dbSNicholas Bellinger 
398c66ac9dbSNicholas Bellinger static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
399c66ac9dbSNicholas Bellinger 					       struct se_subsystem_dev *se_dev,
400c66ac9dbSNicholas Bellinger 					       const char *page, ssize_t count)
401c66ac9dbSNicholas Bellinger {
402c66ac9dbSNicholas Bellinger 	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
4036d180253SJesper Juhl 	char *orig, *ptr, *arg_p, *opts;
404c66ac9dbSNicholas Bellinger 	substring_t args[MAX_OPT_ARGS];
40521bca31cSRoland Dreier 	int ret = 0, token;
40644bfd018SAndy Grover 	unsigned long tmp_readonly;
407c66ac9dbSNicholas Bellinger 
408c66ac9dbSNicholas Bellinger 	opts = kstrdup(page, GFP_KERNEL);
409c66ac9dbSNicholas Bellinger 	if (!opts)
410c66ac9dbSNicholas Bellinger 		return -ENOMEM;
411c66ac9dbSNicholas Bellinger 
412c66ac9dbSNicholas Bellinger 	orig = opts;
413c66ac9dbSNicholas Bellinger 
41490c161b6SSebastian Andrzej Siewior 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
415c66ac9dbSNicholas Bellinger 		if (!*ptr)
416c66ac9dbSNicholas Bellinger 			continue;
417c66ac9dbSNicholas Bellinger 
418c66ac9dbSNicholas Bellinger 		token = match_token(ptr, tokens, args);
419c66ac9dbSNicholas Bellinger 		switch (token) {
420c66ac9dbSNicholas Bellinger 		case Opt_udev_path:
421c66ac9dbSNicholas Bellinger 			if (ib_dev->ibd_bd) {
4226708bb27SAndy Grover 				pr_err("Unable to set udev_path= while"
423c66ac9dbSNicholas Bellinger 					" ib_dev->ibd_bd exists\n");
424c66ac9dbSNicholas Bellinger 				ret = -EEXIST;
425c66ac9dbSNicholas Bellinger 				goto out;
426c66ac9dbSNicholas Bellinger 			}
4276d180253SJesper Juhl 			arg_p = match_strdup(&args[0]);
4286d180253SJesper Juhl 			if (!arg_p) {
4296d180253SJesper Juhl 				ret = -ENOMEM;
4306d180253SJesper Juhl 				break;
4316d180253SJesper Juhl 			}
4326d180253SJesper Juhl 			snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
4336d180253SJesper Juhl 					"%s", arg_p);
4346d180253SJesper Juhl 			kfree(arg_p);
4356708bb27SAndy Grover 			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
436c66ac9dbSNicholas Bellinger 					ib_dev->ibd_udev_path);
437c66ac9dbSNicholas Bellinger 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
438c66ac9dbSNicholas Bellinger 			break;
43944bfd018SAndy Grover 		case Opt_readonly:
44044bfd018SAndy Grover 			arg_p = match_strdup(&args[0]);
44144bfd018SAndy Grover 			if (!arg_p) {
44244bfd018SAndy Grover 				ret = -ENOMEM;
44344bfd018SAndy Grover 				break;
44444bfd018SAndy Grover 			}
44544bfd018SAndy Grover 			ret = strict_strtoul(arg_p, 0, &tmp_readonly);
44644bfd018SAndy Grover 			kfree(arg_p);
44744bfd018SAndy Grover 			if (ret < 0) {
44844bfd018SAndy Grover 				pr_err("strict_strtoul() failed for"
44944bfd018SAndy Grover 						" readonly=\n");
45044bfd018SAndy Grover 				goto out;
45144bfd018SAndy Grover 			}
45244bfd018SAndy Grover 			ib_dev->ibd_readonly = tmp_readonly;
45344bfd018SAndy Grover 			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
45444bfd018SAndy Grover 			break;
455c66ac9dbSNicholas Bellinger 		case Opt_force:
456c66ac9dbSNicholas Bellinger 			break;
457c66ac9dbSNicholas Bellinger 		default:
458c66ac9dbSNicholas Bellinger 			break;
459c66ac9dbSNicholas Bellinger 		}
460c66ac9dbSNicholas Bellinger 	}
461c66ac9dbSNicholas Bellinger 
462c66ac9dbSNicholas Bellinger out:
463c66ac9dbSNicholas Bellinger 	kfree(orig);
464c66ac9dbSNicholas Bellinger 	return (!ret) ? count : ret;
465c66ac9dbSNicholas Bellinger }
466c66ac9dbSNicholas Bellinger 
467c66ac9dbSNicholas Bellinger static ssize_t iblock_check_configfs_dev_params(
468c66ac9dbSNicholas Bellinger 	struct se_hba *hba,
469c66ac9dbSNicholas Bellinger 	struct se_subsystem_dev *se_dev)
470c66ac9dbSNicholas Bellinger {
471c66ac9dbSNicholas Bellinger 	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
472c66ac9dbSNicholas Bellinger 
473c66ac9dbSNicholas Bellinger 	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
4746708bb27SAndy Grover 		pr_err("Missing udev_path= parameters for IBLOCK\n");
475e3d6f909SAndy Grover 		return -EINVAL;
476c66ac9dbSNicholas Bellinger 	}
477c66ac9dbSNicholas Bellinger 
478c66ac9dbSNicholas Bellinger 	return 0;
479c66ac9dbSNicholas Bellinger }
480c66ac9dbSNicholas Bellinger 
481c66ac9dbSNicholas Bellinger static ssize_t iblock_show_configfs_dev_params(
482c66ac9dbSNicholas Bellinger 	struct se_hba *hba,
483c66ac9dbSNicholas Bellinger 	struct se_subsystem_dev *se_dev,
484c66ac9dbSNicholas Bellinger 	char *b)
485c66ac9dbSNicholas Bellinger {
486c66ac9dbSNicholas Bellinger 	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
487c66ac9dbSNicholas Bellinger 	struct block_device *bd = ibd->ibd_bd;
488c66ac9dbSNicholas Bellinger 	char buf[BDEVNAME_SIZE];
489c66ac9dbSNicholas Bellinger 	ssize_t bl = 0;
490c66ac9dbSNicholas Bellinger 
491c66ac9dbSNicholas Bellinger 	if (bd)
492c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "iBlock device: %s",
493c66ac9dbSNicholas Bellinger 				bdevname(bd, buf));
49444bfd018SAndy Grover 	if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
49544bfd018SAndy Grover 		bl += sprintf(b + bl, "  UDEV PATH: %s",
496c66ac9dbSNicholas Bellinger 				ibd->ibd_udev_path);
49744bfd018SAndy Grover 	bl += sprintf(b + bl, "  readonly: %d\n", ibd->ibd_readonly);
498c66ac9dbSNicholas Bellinger 
499c66ac9dbSNicholas Bellinger 	bl += sprintf(b + bl, "        ");
500c66ac9dbSNicholas Bellinger 	if (bd) {
501c66ac9dbSNicholas Bellinger 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
50221bca31cSRoland Dreier 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
5038359cf43SJörn Engel 			"" : (bd->bd_holder == ibd) ?
504c66ac9dbSNicholas Bellinger 			"CLAIMED: IBLOCK" : "CLAIMED: OS");
505c66ac9dbSNicholas Bellinger 	} else {
50621bca31cSRoland Dreier 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
507c66ac9dbSNicholas Bellinger 	}
508c66ac9dbSNicholas Bellinger 
509c66ac9dbSNicholas Bellinger 	return bl;
510c66ac9dbSNicholas Bellinger }
511c66ac9dbSNicholas Bellinger 
5125787cacdSChristoph Hellwig static void iblock_complete_cmd(struct se_cmd *cmd)
5135787cacdSChristoph Hellwig {
5145787cacdSChristoph Hellwig 	struct iblock_req *ibr = cmd->priv;
5155787cacdSChristoph Hellwig 	u8 status;
5165787cacdSChristoph Hellwig 
5175787cacdSChristoph Hellwig 	if (!atomic_dec_and_test(&ibr->pending))
5185787cacdSChristoph Hellwig 		return;
5195787cacdSChristoph Hellwig 
5205787cacdSChristoph Hellwig 	if (atomic_read(&ibr->ib_bio_err_cnt))
5215787cacdSChristoph Hellwig 		status = SAM_STAT_CHECK_CONDITION;
5225787cacdSChristoph Hellwig 	else
5235787cacdSChristoph Hellwig 		status = SAM_STAT_GOOD;
5245787cacdSChristoph Hellwig 
5255787cacdSChristoph Hellwig 	target_complete_cmd(cmd, status);
5265787cacdSChristoph Hellwig 	kfree(ibr);
5275787cacdSChristoph Hellwig }
5285787cacdSChristoph Hellwig 
529c66ac9dbSNicholas Bellinger static void iblock_bio_destructor(struct bio *bio)
530c66ac9dbSNicholas Bellinger {
5315787cacdSChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
5325787cacdSChristoph Hellwig 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
533c66ac9dbSNicholas Bellinger 
534c66ac9dbSNicholas Bellinger 	bio_free(bio, ib_dev->ibd_bio_set);
535c66ac9dbSNicholas Bellinger }
536c66ac9dbSNicholas Bellinger 
537dbbf3e94SChristoph Hellwig static struct bio *
5385787cacdSChristoph Hellwig iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
539c66ac9dbSNicholas Bellinger {
5405787cacdSChristoph Hellwig 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
541c66ac9dbSNicholas Bellinger 	struct bio *bio;
542c66ac9dbSNicholas Bellinger 
5435c55125fSChristoph Hellwig 	/*
5445c55125fSChristoph Hellwig 	 * Only allocate as many vector entries as the bio code allows us to,
5455c55125fSChristoph Hellwig 	 * we'll loop later on until we have handled the whole request.
5465c55125fSChristoph Hellwig 	 */
5475c55125fSChristoph Hellwig 	if (sg_num > BIO_MAX_PAGES)
5485c55125fSChristoph Hellwig 		sg_num = BIO_MAX_PAGES;
5495c55125fSChristoph Hellwig 
550c66ac9dbSNicholas Bellinger 	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
5516708bb27SAndy Grover 	if (!bio) {
5526708bb27SAndy Grover 		pr_err("Unable to allocate memory for bio\n");
553c66ac9dbSNicholas Bellinger 		return NULL;
554c66ac9dbSNicholas Bellinger 	}
555c66ac9dbSNicholas Bellinger 
556c66ac9dbSNicholas Bellinger 	bio->bi_bdev = ib_dev->ibd_bd;
5575787cacdSChristoph Hellwig 	bio->bi_private = cmd;
558c66ac9dbSNicholas Bellinger 	bio->bi_destructor = iblock_bio_destructor;
559c66ac9dbSNicholas Bellinger 	bio->bi_end_io = &iblock_bio_done;
560c66ac9dbSNicholas Bellinger 	bio->bi_sector = lba;
561c66ac9dbSNicholas Bellinger 	return bio;
562c66ac9dbSNicholas Bellinger }
563c66ac9dbSNicholas Bellinger 
564d5b4a21bSChristoph Hellwig static void iblock_submit_bios(struct bio_list *list, int rw)
565d5b4a21bSChristoph Hellwig {
566d5b4a21bSChristoph Hellwig 	struct blk_plug plug;
567d5b4a21bSChristoph Hellwig 	struct bio *bio;
568d5b4a21bSChristoph Hellwig 
569d5b4a21bSChristoph Hellwig 	blk_start_plug(&plug);
570d5b4a21bSChristoph Hellwig 	while ((bio = bio_list_pop(list)))
571d5b4a21bSChristoph Hellwig 		submit_bio(rw, bio);
572d5b4a21bSChristoph Hellwig 	blk_finish_plug(&plug);
573d5b4a21bSChristoph Hellwig }
574d5b4a21bSChristoph Hellwig 
5750c2ad7d1SChristoph Hellwig static int iblock_execute_rw(struct se_cmd *cmd)
576c66ac9dbSNicholas Bellinger {
5770c2ad7d1SChristoph Hellwig 	struct scatterlist *sgl = cmd->t_data_sg;
5780c2ad7d1SChristoph Hellwig 	u32 sgl_nents = cmd->t_data_nents;
5790c2ad7d1SChristoph Hellwig 	enum dma_data_direction data_direction = cmd->data_direction;
5805951146dSAndy Grover 	struct se_device *dev = cmd->se_dev;
5815787cacdSChristoph Hellwig 	struct iblock_req *ibr;
582dbbf3e94SChristoph Hellwig 	struct bio *bio;
583dbbf3e94SChristoph Hellwig 	struct bio_list list;
584c66ac9dbSNicholas Bellinger 	struct scatterlist *sg;
5855787cacdSChristoph Hellwig 	u32 sg_num = sgl_nents;
586c66ac9dbSNicholas Bellinger 	sector_t block_lba;
587d5b4a21bSChristoph Hellwig 	unsigned bio_cnt;
588dbbf3e94SChristoph Hellwig 	int rw;
5895787cacdSChristoph Hellwig 	int i;
590dbbf3e94SChristoph Hellwig 
5915787cacdSChristoph Hellwig 	if (data_direction == DMA_TO_DEVICE) {
592dbbf3e94SChristoph Hellwig 		/*
593dbbf3e94SChristoph Hellwig 		 * Force data to disk if we pretend to not have a volatile
594dbbf3e94SChristoph Hellwig 		 * write cache, or the initiator set the Force Unit Access bit.
595dbbf3e94SChristoph Hellwig 		 */
596dbbf3e94SChristoph Hellwig 		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
597dbbf3e94SChristoph Hellwig 		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
5982d3a4b51SChristoph Hellwig 		     (cmd->se_cmd_flags & SCF_FUA)))
599dbbf3e94SChristoph Hellwig 			rw = WRITE_FUA;
600dbbf3e94SChristoph Hellwig 		else
601dbbf3e94SChristoph Hellwig 			rw = WRITE;
602dbbf3e94SChristoph Hellwig 	} else {
603dbbf3e94SChristoph Hellwig 		rw = READ;
604dbbf3e94SChristoph Hellwig 	}
605dbbf3e94SChristoph Hellwig 
606c66ac9dbSNicholas Bellinger 	/*
6075787cacdSChristoph Hellwig 	 * Convert the blocksize advertised to the initiator to the 512 byte
6085787cacdSChristoph Hellwig 	 * units unconditionally used by the Linux block layer.
609c66ac9dbSNicholas Bellinger 	 */
610e3d6f909SAndy Grover 	if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
61172a0e5e2SChristoph Hellwig 		block_lba = (cmd->t_task_lba << 3);
612e3d6f909SAndy Grover 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
61372a0e5e2SChristoph Hellwig 		block_lba = (cmd->t_task_lba << 2);
614e3d6f909SAndy Grover 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
61572a0e5e2SChristoph Hellwig 		block_lba = (cmd->t_task_lba << 1);
616e3d6f909SAndy Grover 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
61772a0e5e2SChristoph Hellwig 		block_lba = cmd->t_task_lba;
618c66ac9dbSNicholas Bellinger 	else {
6196708bb27SAndy Grover 		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
620e3d6f909SAndy Grover 				" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
62103e98c9eSNicholas Bellinger 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
62203e98c9eSNicholas Bellinger 		return -ENOSYS;
623c66ac9dbSNicholas Bellinger 	}
624c66ac9dbSNicholas Bellinger 
6255787cacdSChristoph Hellwig 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
6265787cacdSChristoph Hellwig 	if (!ibr)
6275787cacdSChristoph Hellwig 		goto fail;
6285787cacdSChristoph Hellwig 	cmd->priv = ibr;
6295787cacdSChristoph Hellwig 
6305787cacdSChristoph Hellwig 	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
6315787cacdSChristoph Hellwig 	if (!bio)
6325787cacdSChristoph Hellwig 		goto fail_free_ibr;
633c66ac9dbSNicholas Bellinger 
634dbbf3e94SChristoph Hellwig 	bio_list_init(&list);
635dbbf3e94SChristoph Hellwig 	bio_list_add(&list, bio);
6365787cacdSChristoph Hellwig 
6375787cacdSChristoph Hellwig 	atomic_set(&ibr->pending, 2);
638d5b4a21bSChristoph Hellwig 	bio_cnt = 1;
639dbbf3e94SChristoph Hellwig 
6405787cacdSChristoph Hellwig 	for_each_sg(sgl, sg, sgl_nents, i) {
641dbbf3e94SChristoph Hellwig 		/*
642dbbf3e94SChristoph Hellwig 		 * XXX: if the length the device accepts is shorter than the
643dbbf3e94SChristoph Hellwig 		 *	length of the S/G list entry this will cause and
644dbbf3e94SChristoph Hellwig 		 *	endless loop.  Better hope no driver uses huge pages.
645dbbf3e94SChristoph Hellwig 		 */
646dbbf3e94SChristoph Hellwig 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
647dbbf3e94SChristoph Hellwig 				!= sg->length) {
648d5b4a21bSChristoph Hellwig 			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
649d5b4a21bSChristoph Hellwig 				iblock_submit_bios(&list, rw);
650d5b4a21bSChristoph Hellwig 				bio_cnt = 0;
651d5b4a21bSChristoph Hellwig 			}
652d5b4a21bSChristoph Hellwig 
6535787cacdSChristoph Hellwig 			bio = iblock_get_bio(cmd, block_lba, sg_num);
6546708bb27SAndy Grover 			if (!bio)
6555787cacdSChristoph Hellwig 				goto fail_put_bios;
6565787cacdSChristoph Hellwig 
6575787cacdSChristoph Hellwig 			atomic_inc(&ibr->pending);
658dbbf3e94SChristoph Hellwig 			bio_list_add(&list, bio);
659d5b4a21bSChristoph Hellwig 			bio_cnt++;
660c66ac9dbSNicholas Bellinger 		}
661dbbf3e94SChristoph Hellwig 
662c66ac9dbSNicholas Bellinger 		/* Always in 512 byte units for Linux/Block */
663c66ac9dbSNicholas Bellinger 		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
664c66ac9dbSNicholas Bellinger 		sg_num--;
665c66ac9dbSNicholas Bellinger 	}
666c66ac9dbSNicholas Bellinger 
667d5b4a21bSChristoph Hellwig 	iblock_submit_bios(&list, rw);
6685787cacdSChristoph Hellwig 	iblock_complete_cmd(cmd);
66903e98c9eSNicholas Bellinger 	return 0;
670dbbf3e94SChristoph Hellwig 
6715787cacdSChristoph Hellwig fail_put_bios:
672dbbf3e94SChristoph Hellwig 	while ((bio = bio_list_pop(&list)))
673c66ac9dbSNicholas Bellinger 		bio_put(bio);
6745787cacdSChristoph Hellwig fail_free_ibr:
6755787cacdSChristoph Hellwig 	kfree(ibr);
67603e98c9eSNicholas Bellinger 	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
6775787cacdSChristoph Hellwig fail:
67803e98c9eSNicholas Bellinger 	return -ENOMEM;
679c66ac9dbSNicholas Bellinger }
680c66ac9dbSNicholas Bellinger 
681c66ac9dbSNicholas Bellinger static u32 iblock_get_device_rev(struct se_device *dev)
682c66ac9dbSNicholas Bellinger {
683c66ac9dbSNicholas Bellinger 	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
684c66ac9dbSNicholas Bellinger }
685c66ac9dbSNicholas Bellinger 
686c66ac9dbSNicholas Bellinger static u32 iblock_get_device_type(struct se_device *dev)
687c66ac9dbSNicholas Bellinger {
688c66ac9dbSNicholas Bellinger 	return TYPE_DISK;
689c66ac9dbSNicholas Bellinger }
690c66ac9dbSNicholas Bellinger 
691c66ac9dbSNicholas Bellinger static sector_t iblock_get_blocks(struct se_device *dev)
692c66ac9dbSNicholas Bellinger {
693c66ac9dbSNicholas Bellinger 	struct iblock_dev *ibd = dev->dev_ptr;
694c66ac9dbSNicholas Bellinger 	struct block_device *bd = ibd->ibd_bd;
695c66ac9dbSNicholas Bellinger 	struct request_queue *q = bdev_get_queue(bd);
696c66ac9dbSNicholas Bellinger 
697c66ac9dbSNicholas Bellinger 	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
698c66ac9dbSNicholas Bellinger }
699c66ac9dbSNicholas Bellinger 
700c66ac9dbSNicholas Bellinger static void iblock_bio_done(struct bio *bio, int err)
701c66ac9dbSNicholas Bellinger {
7025787cacdSChristoph Hellwig 	struct se_cmd *cmd = bio->bi_private;
7035787cacdSChristoph Hellwig 	struct iblock_req *ibr = cmd->priv;
704dbbf3e94SChristoph Hellwig 
705c66ac9dbSNicholas Bellinger 	/*
706c66ac9dbSNicholas Bellinger 	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
707c66ac9dbSNicholas Bellinger 	 */
7086708bb27SAndy Grover 	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
709c66ac9dbSNicholas Bellinger 		err = -EIO;
710c66ac9dbSNicholas Bellinger 
711c66ac9dbSNicholas Bellinger 	if (err != 0) {
7126708bb27SAndy Grover 		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
713c66ac9dbSNicholas Bellinger 			" err: %d\n", bio, err);
714c66ac9dbSNicholas Bellinger 		/*
715c66ac9dbSNicholas Bellinger 		 * Bump the ib_bio_err_cnt and release bio.
716c66ac9dbSNicholas Bellinger 		 */
717c66ac9dbSNicholas Bellinger 		atomic_inc(&ibr->ib_bio_err_cnt);
718c66ac9dbSNicholas Bellinger 		smp_mb__after_atomic_inc();
719dbbf3e94SChristoph Hellwig 	}
720dbbf3e94SChristoph Hellwig 
721c66ac9dbSNicholas Bellinger 	bio_put(bio);
722dbbf3e94SChristoph Hellwig 
7235787cacdSChristoph Hellwig 	iblock_complete_cmd(cmd);
724c66ac9dbSNicholas Bellinger }
725c66ac9dbSNicholas Bellinger 
7260c2ad7d1SChristoph Hellwig static struct spc_ops iblock_spc_ops = {
7270c2ad7d1SChristoph Hellwig 	.execute_rw		= iblock_execute_rw,
728ad67f0d9SChristoph Hellwig 	.execute_sync_cache	= iblock_execute_sync_cache,
7296f974e8cSChristoph Hellwig 	.execute_write_same	= iblock_execute_write_same,
73014150a6bSChristoph Hellwig 	.execute_unmap		= iblock_execute_unmap,
7310c2ad7d1SChristoph Hellwig };
7320c2ad7d1SChristoph Hellwig 
7330c2ad7d1SChristoph Hellwig static int iblock_parse_cdb(struct se_cmd *cmd)
7340c2ad7d1SChristoph Hellwig {
7350c2ad7d1SChristoph Hellwig 	return sbc_parse_cdb(cmd, &iblock_spc_ops);
7360c2ad7d1SChristoph Hellwig }
7370c2ad7d1SChristoph Hellwig 
738c66ac9dbSNicholas Bellinger static struct se_subsystem_api iblock_template = {
739c66ac9dbSNicholas Bellinger 	.name			= "iblock",
740c66ac9dbSNicholas Bellinger 	.owner			= THIS_MODULE,
741c66ac9dbSNicholas Bellinger 	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
742f55918faSChristoph Hellwig 	.write_cache_emulated	= 1,
743f55918faSChristoph Hellwig 	.fua_write_emulated	= 1,
744c66ac9dbSNicholas Bellinger 	.attach_hba		= iblock_attach_hba,
745c66ac9dbSNicholas Bellinger 	.detach_hba		= iblock_detach_hba,
746c66ac9dbSNicholas Bellinger 	.allocate_virtdevice	= iblock_allocate_virtdevice,
747c66ac9dbSNicholas Bellinger 	.create_virtdevice	= iblock_create_virtdevice,
748c66ac9dbSNicholas Bellinger 	.free_device		= iblock_free_device,
7490c2ad7d1SChristoph Hellwig 	.parse_cdb		= iblock_parse_cdb,
750c66ac9dbSNicholas Bellinger 	.check_configfs_dev_params = iblock_check_configfs_dev_params,
751c66ac9dbSNicholas Bellinger 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
752c66ac9dbSNicholas Bellinger 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
753c66ac9dbSNicholas Bellinger 	.get_device_rev		= iblock_get_device_rev,
754c66ac9dbSNicholas Bellinger 	.get_device_type	= iblock_get_device_type,
755c66ac9dbSNicholas Bellinger 	.get_blocks		= iblock_get_blocks,
756c66ac9dbSNicholas Bellinger };
757c66ac9dbSNicholas Bellinger 
758c66ac9dbSNicholas Bellinger static int __init iblock_module_init(void)
759c66ac9dbSNicholas Bellinger {
760c66ac9dbSNicholas Bellinger 	return transport_subsystem_register(&iblock_template);
761c66ac9dbSNicholas Bellinger }
762c66ac9dbSNicholas Bellinger 
763c66ac9dbSNicholas Bellinger static void iblock_module_exit(void)
764c66ac9dbSNicholas Bellinger {
765c66ac9dbSNicholas Bellinger 	transport_subsystem_release(&iblock_template);
766c66ac9dbSNicholas Bellinger }
767c66ac9dbSNicholas Bellinger 
768c66ac9dbSNicholas Bellinger MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
769c66ac9dbSNicholas Bellinger MODULE_AUTHOR("nab@Linux-iSCSI.org");
770c66ac9dbSNicholas Bellinger MODULE_LICENSE("GPL");
771c66ac9dbSNicholas Bellinger 
772c66ac9dbSNicholas Bellinger module_init(iblock_module_init);
773c66ac9dbSNicholas Bellinger module_exit(iblock_module_exit);
774