1 /*******************************************************************************
2  * Filename:  target_core_iblock.c
3  *
4  * This file contains the Storage Engine  <-> Linux BlockIO transport
5  * specific functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29 
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/fs.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/bio.h>
38 #include <linux/genhd.h>
39 #include <linux/file.h>
40 #include <linux/module.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
43 #include <asm/unaligned.h>
44 
45 #include <target/target_core_base.h>
46 #include <target/target_core_backend.h>
47 
48 #include "target_core_iblock.h"
49 
50 #define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
51 #define IBLOCK_BIO_POOL_SIZE	128
52 
53 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
54 {
55 	return container_of(dev, struct iblock_dev, dev);
56 }
57 
58 
59 static struct se_subsystem_api iblock_template;
60 
61 static void iblock_bio_done(struct bio *, int);
62 
63 /*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
64  *
65  *
66  */
67 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
68 {
69 	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
70 		" Generic Target Core Stack %s\n", hba->hba_id,
71 		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
72 	return 0;
73 }
74 
75 static void iblock_detach_hba(struct se_hba *hba)
76 {
77 }
78 
79 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
80 {
81 	struct iblock_dev *ib_dev = NULL;
82 
83 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
84 	if (!ib_dev) {
85 		pr_err("Unable to allocate struct iblock_dev\n");
86 		return NULL;
87 	}
88 
89 	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
90 
91 	return &ib_dev->dev;
92 }
93 
94 static int iblock_configure_device(struct se_device *dev)
95 {
96 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
97 	struct request_queue *q;
98 	struct block_device *bd = NULL;
99 	fmode_t mode;
100 	int ret = -ENOMEM;
101 
102 	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
103 		pr_err("Missing udev_path= parameters for IBLOCK\n");
104 		return -EINVAL;
105 	}
106 
107 	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
108 	if (!ib_dev->ibd_bio_set) {
109 		pr_err("IBLOCK: Unable to create bioset\n");
110 		goto out;
111 	}
112 
113 	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
114 			ib_dev->ibd_udev_path);
115 
116 	mode = FMODE_READ|FMODE_EXCL;
117 	if (!ib_dev->ibd_readonly)
118 		mode |= FMODE_WRITE;
119 
120 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
121 	if (IS_ERR(bd)) {
122 		ret = PTR_ERR(bd);
123 		goto out_free_bioset;
124 	}
125 	ib_dev->ibd_bd = bd;
126 
127 	q = bdev_get_queue(bd);
128 
129 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
130 	dev->dev_attrib.hw_max_sectors = UINT_MAX;
131 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
132 
133 	/*
134 	 * Check if the underlying struct block_device request_queue supports
135 	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
136 	 * in ATA and we need to set TPE=1
137 	 */
138 	if (blk_queue_discard(q)) {
139 		dev->dev_attrib.max_unmap_lba_count =
140 				q->limits.max_discard_sectors;
141 
142 		/*
143 		 * Currently hardcoded to 1 in Linux/SCSI code..
144 		 */
145 		dev->dev_attrib.max_unmap_block_desc_count = 1;
146 		dev->dev_attrib.unmap_granularity =
147 				q->limits.discard_granularity >> 9;
148 		dev->dev_attrib.unmap_granularity_alignment =
149 				q->limits.discard_alignment;
150 
151 		pr_debug("IBLOCK: BLOCK Discard support available,"
152 				" disabled by default\n");
153 	}
154 	/*
155 	 * Enable write same emulation for IBLOCK and use 0xFFFF as
156 	 * the smaller WRITE_SAME(10) only has a two-byte block count.
157 	 */
158 	dev->dev_attrib.max_write_same_len = 0xFFFF;
159 
160 	if (blk_queue_nonrot(q))
161 		dev->dev_attrib.is_nonrot = 1;
162 	return 0;
163 
164 out_free_bioset:
165 	bioset_free(ib_dev->ibd_bio_set);
166 	ib_dev->ibd_bio_set = NULL;
167 out:
168 	return ret;
169 }
170 
171 static void iblock_free_device(struct se_device *dev)
172 {
173 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
174 
175 	if (ib_dev->ibd_bd != NULL)
176 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
177 	if (ib_dev->ibd_bio_set != NULL)
178 		bioset_free(ib_dev->ibd_bio_set);
179 	kfree(ib_dev);
180 }
181 
182 static unsigned long long iblock_emulate_read_cap_with_block_size(
183 	struct se_device *dev,
184 	struct block_device *bd,
185 	struct request_queue *q)
186 {
187 	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
188 					bdev_logical_block_size(bd)) - 1);
189 	u32 block_size = bdev_logical_block_size(bd);
190 
191 	if (block_size == dev->dev_attrib.block_size)
192 		return blocks_long;
193 
194 	switch (block_size) {
195 	case 4096:
196 		switch (dev->dev_attrib.block_size) {
197 		case 2048:
198 			blocks_long <<= 1;
199 			break;
200 		case 1024:
201 			blocks_long <<= 2;
202 			break;
203 		case 512:
204 			blocks_long <<= 3;
205 		default:
206 			break;
207 		}
208 		break;
209 	case 2048:
210 		switch (dev->dev_attrib.block_size) {
211 		case 4096:
212 			blocks_long >>= 1;
213 			break;
214 		case 1024:
215 			blocks_long <<= 1;
216 			break;
217 		case 512:
218 			blocks_long <<= 2;
219 			break;
220 		default:
221 			break;
222 		}
223 		break;
224 	case 1024:
225 		switch (dev->dev_attrib.block_size) {
226 		case 4096:
227 			blocks_long >>= 2;
228 			break;
229 		case 2048:
230 			blocks_long >>= 1;
231 			break;
232 		case 512:
233 			blocks_long <<= 1;
234 			break;
235 		default:
236 			break;
237 		}
238 		break;
239 	case 512:
240 		switch (dev->dev_attrib.block_size) {
241 		case 4096:
242 			blocks_long >>= 3;
243 			break;
244 		case 2048:
245 			blocks_long >>= 2;
246 			break;
247 		case 1024:
248 			blocks_long >>= 1;
249 			break;
250 		default:
251 			break;
252 		}
253 		break;
254 	default:
255 		break;
256 	}
257 
258 	return blocks_long;
259 }
260 
261 static void iblock_end_io_flush(struct bio *bio, int err)
262 {
263 	struct se_cmd *cmd = bio->bi_private;
264 
265 	if (err)
266 		pr_err("IBLOCK: cache flush failed: %d\n", err);
267 
268 	if (cmd) {
269 		if (err)
270 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
271 		else
272 			target_complete_cmd(cmd, SAM_STAT_GOOD);
273 	}
274 
275 	bio_put(bio);
276 }
277 
278 /*
279  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
280  * always flush the whole cache.
281  */
282 static sense_reason_t
283 iblock_execute_sync_cache(struct se_cmd *cmd)
284 {
285 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
286 	int immed = (cmd->t_task_cdb[1] & 0x2);
287 	struct bio *bio;
288 
289 	/*
290 	 * If the Immediate bit is set, queue up the GOOD response
291 	 * for this SYNCHRONIZE_CACHE op.
292 	 */
293 	if (immed)
294 		target_complete_cmd(cmd, SAM_STAT_GOOD);
295 
296 	bio = bio_alloc(GFP_KERNEL, 0);
297 	bio->bi_end_io = iblock_end_io_flush;
298 	bio->bi_bdev = ib_dev->ibd_bd;
299 	if (!immed)
300 		bio->bi_private = cmd;
301 	submit_bio(WRITE_FLUSH, bio);
302 	return 0;
303 }
304 
305 static sense_reason_t
306 iblock_execute_unmap(struct se_cmd *cmd)
307 {
308 	struct se_device *dev = cmd->se_dev;
309 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
310 	unsigned char *buf, *ptr = NULL;
311 	sector_t lba;
312 	int size;
313 	u32 range;
314 	sense_reason_t ret = 0;
315 	int dl, bd_dl, err;
316 
317 	if (cmd->data_length < 8) {
318 		pr_warn("UNMAP parameter list length %u too small\n",
319 			cmd->data_length);
320 		return TCM_INVALID_PARAMETER_LIST;
321 	}
322 
323 	buf = transport_kmap_data_sg(cmd);
324 	if (!buf)
325 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
326 
327 	dl = get_unaligned_be16(&buf[0]);
328 	bd_dl = get_unaligned_be16(&buf[2]);
329 
330 	size = cmd->data_length - 8;
331 	if (bd_dl > size)
332 		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
333 			cmd->data_length, bd_dl);
334 	else
335 		size = bd_dl;
336 
337 	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
338 		ret = TCM_INVALID_PARAMETER_LIST;
339 		goto err;
340 	}
341 
342 	/* First UNMAP block descriptor starts at 8 byte offset */
343 	ptr = &buf[8];
344 	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
345 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
346 
347 	while (size >= 16) {
348 		lba = get_unaligned_be64(&ptr[0]);
349 		range = get_unaligned_be32(&ptr[8]);
350 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
351 				 (unsigned long long)lba, range);
352 
353 		if (range > dev->dev_attrib.max_unmap_lba_count) {
354 			ret = TCM_INVALID_PARAMETER_LIST;
355 			goto err;
356 		}
357 
358 		if (lba + range > dev->transport->get_blocks(dev) + 1) {
359 			ret = TCM_ADDRESS_OUT_OF_RANGE;
360 			goto err;
361 		}
362 
363 		err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
364 					   GFP_KERNEL, 0);
365 		if (err < 0) {
366 			pr_err("blkdev_issue_discard() failed: %d\n",
367 					err);
368 			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
369 			goto err;
370 		}
371 
372 		ptr += 16;
373 		size -= 16;
374 	}
375 
376 err:
377 	transport_kunmap_data_sg(cmd);
378 	if (!ret)
379 		target_complete_cmd(cmd, GOOD);
380 	return ret;
381 }
382 
383 static struct bio *iblock_get_bio(struct se_cmd *, sector_t, u32);
384 static void iblock_submit_bios(struct bio_list *, int);
385 static void iblock_complete_cmd(struct se_cmd *);
386 
387 static sense_reason_t
388 iblock_execute_write_same_unmap(struct se_cmd *cmd)
389 {
390 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
391 	int rc;
392 
393 	rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
394 			spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
395 	if (rc < 0) {
396 		pr_warn("blkdev_issue_discard() failed: %d\n", rc);
397 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
398 	}
399 
400 	target_complete_cmd(cmd, GOOD);
401 	return 0;
402 }
403 
404 static sense_reason_t
405 iblock_execute_write_same(struct se_cmd *cmd)
406 {
407 	struct iblock_req *ibr;
408 	struct scatterlist *sg;
409 	struct bio *bio;
410 	struct bio_list list;
411 	sector_t block_lba = cmd->t_task_lba;
412 	unsigned int sectors = spc_get_write_same_sectors(cmd);
413 
414 	sg = &cmd->t_data_sg[0];
415 
416 	if (cmd->t_data_nents > 1 ||
417 	    sg->length != cmd->se_dev->dev_attrib.block_size) {
418 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
419 			" block_size: %u\n", cmd->t_data_nents, sg->length,
420 			cmd->se_dev->dev_attrib.block_size);
421 		return TCM_INVALID_CDB_FIELD;
422 	}
423 
424 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
425 	if (!ibr)
426 		goto fail;
427 	cmd->priv = ibr;
428 
429 	bio = iblock_get_bio(cmd, block_lba, 1);
430 	if (!bio)
431 		goto fail_free_ibr;
432 
433 	bio_list_init(&list);
434 	bio_list_add(&list, bio);
435 
436 	atomic_set(&ibr->pending, 1);
437 
438 	while (sectors) {
439 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
440 				!= sg->length) {
441 
442 			bio = iblock_get_bio(cmd, block_lba, 1);
443 			if (!bio)
444 				goto fail_put_bios;
445 
446 			atomic_inc(&ibr->pending);
447 			bio_list_add(&list, bio);
448 		}
449 
450 		/* Always in 512 byte units for Linux/Block */
451 		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
452 		sectors -= 1;
453 	}
454 
455 	iblock_submit_bios(&list, WRITE);
456 	return 0;
457 
458 fail_put_bios:
459 	while ((bio = bio_list_pop(&list)))
460 		bio_put(bio);
461 fail_free_ibr:
462 	kfree(ibr);
463 fail:
464 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
465 }
466 
467 enum {
468 	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
469 };
470 
471 static match_table_t tokens = {
472 	{Opt_udev_path, "udev_path=%s"},
473 	{Opt_readonly, "readonly=%d"},
474 	{Opt_force, "force=%d"},
475 	{Opt_err, NULL}
476 };
477 
478 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
479 		const char *page, ssize_t count)
480 {
481 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
482 	char *orig, *ptr, *arg_p, *opts;
483 	substring_t args[MAX_OPT_ARGS];
484 	int ret = 0, token;
485 	unsigned long tmp_readonly;
486 
487 	opts = kstrdup(page, GFP_KERNEL);
488 	if (!opts)
489 		return -ENOMEM;
490 
491 	orig = opts;
492 
493 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
494 		if (!*ptr)
495 			continue;
496 
497 		token = match_token(ptr, tokens, args);
498 		switch (token) {
499 		case Opt_udev_path:
500 			if (ib_dev->ibd_bd) {
501 				pr_err("Unable to set udev_path= while"
502 					" ib_dev->ibd_bd exists\n");
503 				ret = -EEXIST;
504 				goto out;
505 			}
506 			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
507 				SE_UDEV_PATH_LEN) == 0) {
508 				ret = -EINVAL;
509 				break;
510 			}
511 			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
512 					ib_dev->ibd_udev_path);
513 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
514 			break;
515 		case Opt_readonly:
516 			arg_p = match_strdup(&args[0]);
517 			if (!arg_p) {
518 				ret = -ENOMEM;
519 				break;
520 			}
521 			ret = strict_strtoul(arg_p, 0, &tmp_readonly);
522 			kfree(arg_p);
523 			if (ret < 0) {
524 				pr_err("strict_strtoul() failed for"
525 						" readonly=\n");
526 				goto out;
527 			}
528 			ib_dev->ibd_readonly = tmp_readonly;
529 			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
530 			break;
531 		case Opt_force:
532 			break;
533 		default:
534 			break;
535 		}
536 	}
537 
538 out:
539 	kfree(orig);
540 	return (!ret) ? count : ret;
541 }
542 
543 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
544 {
545 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
546 	struct block_device *bd = ib_dev->ibd_bd;
547 	char buf[BDEVNAME_SIZE];
548 	ssize_t bl = 0;
549 
550 	if (bd)
551 		bl += sprintf(b + bl, "iBlock device: %s",
552 				bdevname(bd, buf));
553 	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
554 		bl += sprintf(b + bl, "  UDEV PATH: %s",
555 				ib_dev->ibd_udev_path);
556 	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
557 
558 	bl += sprintf(b + bl, "        ");
559 	if (bd) {
560 		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
561 			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
562 			"" : (bd->bd_holder == ib_dev) ?
563 			"CLAIMED: IBLOCK" : "CLAIMED: OS");
564 	} else {
565 		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
566 	}
567 
568 	return bl;
569 }
570 
571 static void iblock_complete_cmd(struct se_cmd *cmd)
572 {
573 	struct iblock_req *ibr = cmd->priv;
574 	u8 status;
575 
576 	if (!atomic_dec_and_test(&ibr->pending))
577 		return;
578 
579 	if (atomic_read(&ibr->ib_bio_err_cnt))
580 		status = SAM_STAT_CHECK_CONDITION;
581 	else
582 		status = SAM_STAT_GOOD;
583 
584 	target_complete_cmd(cmd, status);
585 	kfree(ibr);
586 }
587 
588 static struct bio *
589 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
590 {
591 	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
592 	struct bio *bio;
593 
594 	/*
595 	 * Only allocate as many vector entries as the bio code allows us to,
596 	 * we'll loop later on until we have handled the whole request.
597 	 */
598 	if (sg_num > BIO_MAX_PAGES)
599 		sg_num = BIO_MAX_PAGES;
600 
601 	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
602 	if (!bio) {
603 		pr_err("Unable to allocate memory for bio\n");
604 		return NULL;
605 	}
606 
607 	bio->bi_bdev = ib_dev->ibd_bd;
608 	bio->bi_private = cmd;
609 	bio->bi_end_io = &iblock_bio_done;
610 	bio->bi_sector = lba;
611 	return bio;
612 }
613 
614 static void iblock_submit_bios(struct bio_list *list, int rw)
615 {
616 	struct blk_plug plug;
617 	struct bio *bio;
618 
619 	blk_start_plug(&plug);
620 	while ((bio = bio_list_pop(list)))
621 		submit_bio(rw, bio);
622 	blk_finish_plug(&plug);
623 }
624 
625 static sense_reason_t
626 iblock_execute_rw(struct se_cmd *cmd)
627 {
628 	struct scatterlist *sgl = cmd->t_data_sg;
629 	u32 sgl_nents = cmd->t_data_nents;
630 	enum dma_data_direction data_direction = cmd->data_direction;
631 	struct se_device *dev = cmd->se_dev;
632 	struct iblock_req *ibr;
633 	struct bio *bio;
634 	struct bio_list list;
635 	struct scatterlist *sg;
636 	u32 sg_num = sgl_nents;
637 	sector_t block_lba;
638 	unsigned bio_cnt;
639 	int rw;
640 	int i;
641 
642 	if (data_direction == DMA_TO_DEVICE) {
643 		/*
644 		 * Force data to disk if we pretend to not have a volatile
645 		 * write cache, or the initiator set the Force Unit Access bit.
646 		 */
647 		if (dev->dev_attrib.emulate_write_cache == 0 ||
648 		    (dev->dev_attrib.emulate_fua_write > 0 &&
649 		     (cmd->se_cmd_flags & SCF_FUA)))
650 			rw = WRITE_FUA;
651 		else
652 			rw = WRITE;
653 	} else {
654 		rw = READ;
655 	}
656 
657 	/*
658 	 * Convert the blocksize advertised to the initiator to the 512 byte
659 	 * units unconditionally used by the Linux block layer.
660 	 */
661 	if (dev->dev_attrib.block_size == 4096)
662 		block_lba = (cmd->t_task_lba << 3);
663 	else if (dev->dev_attrib.block_size == 2048)
664 		block_lba = (cmd->t_task_lba << 2);
665 	else if (dev->dev_attrib.block_size == 1024)
666 		block_lba = (cmd->t_task_lba << 1);
667 	else if (dev->dev_attrib.block_size == 512)
668 		block_lba = cmd->t_task_lba;
669 	else {
670 		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
671 				" %u\n", dev->dev_attrib.block_size);
672 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
673 	}
674 
675 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
676 	if (!ibr)
677 		goto fail;
678 	cmd->priv = ibr;
679 
680 	if (!sgl_nents) {
681 		atomic_set(&ibr->pending, 1);
682 		iblock_complete_cmd(cmd);
683 		return 0;
684 	}
685 
686 	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
687 	if (!bio)
688 		goto fail_free_ibr;
689 
690 	bio_list_init(&list);
691 	bio_list_add(&list, bio);
692 
693 	atomic_set(&ibr->pending, 2);
694 	bio_cnt = 1;
695 
696 	for_each_sg(sgl, sg, sgl_nents, i) {
697 		/*
698 		 * XXX: if the length the device accepts is shorter than the
699 		 *	length of the S/G list entry this will cause and
700 		 *	endless loop.  Better hope no driver uses huge pages.
701 		 */
702 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
703 				!= sg->length) {
704 			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
705 				iblock_submit_bios(&list, rw);
706 				bio_cnt = 0;
707 			}
708 
709 			bio = iblock_get_bio(cmd, block_lba, sg_num);
710 			if (!bio)
711 				goto fail_put_bios;
712 
713 			atomic_inc(&ibr->pending);
714 			bio_list_add(&list, bio);
715 			bio_cnt++;
716 		}
717 
718 		/* Always in 512 byte units for Linux/Block */
719 		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
720 		sg_num--;
721 	}
722 
723 	iblock_submit_bios(&list, rw);
724 	iblock_complete_cmd(cmd);
725 	return 0;
726 
727 fail_put_bios:
728 	while ((bio = bio_list_pop(&list)))
729 		bio_put(bio);
730 fail_free_ibr:
731 	kfree(ibr);
732 fail:
733 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
734 }
735 
736 static sector_t iblock_get_blocks(struct se_device *dev)
737 {
738 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
739 	struct block_device *bd = ib_dev->ibd_bd;
740 	struct request_queue *q = bdev_get_queue(bd);
741 
742 	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
743 }
744 
745 static void iblock_bio_done(struct bio *bio, int err)
746 {
747 	struct se_cmd *cmd = bio->bi_private;
748 	struct iblock_req *ibr = cmd->priv;
749 
750 	/*
751 	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
752 	 */
753 	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
754 		err = -EIO;
755 
756 	if (err != 0) {
757 		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
758 			" err: %d\n", bio, err);
759 		/*
760 		 * Bump the ib_bio_err_cnt and release bio.
761 		 */
762 		atomic_inc(&ibr->ib_bio_err_cnt);
763 		smp_mb__after_atomic_inc();
764 	}
765 
766 	bio_put(bio);
767 
768 	iblock_complete_cmd(cmd);
769 }
770 
771 static struct sbc_ops iblock_sbc_ops = {
772 	.execute_rw		= iblock_execute_rw,
773 	.execute_sync_cache	= iblock_execute_sync_cache,
774 	.execute_write_same	= iblock_execute_write_same,
775 	.execute_write_same_unmap = iblock_execute_write_same_unmap,
776 	.execute_unmap		= iblock_execute_unmap,
777 };
778 
779 static sense_reason_t
780 iblock_parse_cdb(struct se_cmd *cmd)
781 {
782 	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
783 }
784 
785 static struct se_subsystem_api iblock_template = {
786 	.name			= "iblock",
787 	.inquiry_prod		= "IBLOCK",
788 	.inquiry_rev		= IBLOCK_VERSION,
789 	.owner			= THIS_MODULE,
790 	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
791 	.attach_hba		= iblock_attach_hba,
792 	.detach_hba		= iblock_detach_hba,
793 	.alloc_device		= iblock_alloc_device,
794 	.configure_device	= iblock_configure_device,
795 	.free_device		= iblock_free_device,
796 	.parse_cdb		= iblock_parse_cdb,
797 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
798 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
799 	.get_device_type	= sbc_get_device_type,
800 	.get_blocks		= iblock_get_blocks,
801 };
802 
803 static int __init iblock_module_init(void)
804 {
805 	return transport_subsystem_register(&iblock_template);
806 }
807 
808 static void iblock_module_exit(void)
809 {
810 	transport_subsystem_release(&iblock_template);
811 }
812 
813 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
814 MODULE_AUTHOR("nab@Linux-iSCSI.org");
815 MODULE_LICENSE("GPL");
816 
817 module_init(iblock_module_init);
818 module_exit(iblock_module_exit);
819