xref: /openbmc/linux/block/bsg.c (revision 94c7b6fc)
1 /*
2  * bsg.c - block layer implementation of the sg v4 interface
3  *
4  * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5  * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6  *
7  *  This file is subject to the terms and conditions of the GNU General Public
8  *  License version 2.  See the file "COPYING" in the main directory of this
9  *  archive for more details.
10  *
11  */
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/uio.h>
21 #include <linux/idr.h>
22 #include <linux/bsg.h>
23 #include <linux/slab.h>
24 
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_ioctl.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/sg.h>
31 
32 #define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
33 #define BSG_VERSION	"0.4"
34 
35 struct bsg_device {
36 	struct request_queue *queue;
37 	spinlock_t lock;
38 	struct list_head busy_list;
39 	struct list_head done_list;
40 	struct hlist_node dev_list;
41 	atomic_t ref_count;
42 	int queued_cmds;
43 	int done_cmds;
44 	wait_queue_head_t wq_done;
45 	wait_queue_head_t wq_free;
46 	char name[20];
47 	int max_queue;
48 	unsigned long flags;
49 };
50 
51 enum {
52 	BSG_F_BLOCK		= 1,
53 };
54 
55 #define BSG_DEFAULT_CMDS	64
56 #define BSG_MAX_DEVS		32768
57 
58 #undef BSG_DEBUG
59 
60 #ifdef BSG_DEBUG
61 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
62 #else
63 #define dprintk(fmt, args...)
64 #endif
65 
66 static DEFINE_MUTEX(bsg_mutex);
67 static DEFINE_IDR(bsg_minor_idr);
68 
69 #define BSG_LIST_ARRAY_SIZE	8
70 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
71 
72 static struct class *bsg_class;
73 static int bsg_major;
74 
75 static struct kmem_cache *bsg_cmd_cachep;
76 
77 /*
78  * our internal command type
79  */
80 struct bsg_command {
81 	struct bsg_device *bd;
82 	struct list_head list;
83 	struct request *rq;
84 	struct bio *bio;
85 	struct bio *bidi_bio;
86 	int err;
87 	struct sg_io_v4 hdr;
88 	char sense[SCSI_SENSE_BUFFERSIZE];
89 };
90 
91 static void bsg_free_command(struct bsg_command *bc)
92 {
93 	struct bsg_device *bd = bc->bd;
94 	unsigned long flags;
95 
96 	kmem_cache_free(bsg_cmd_cachep, bc);
97 
98 	spin_lock_irqsave(&bd->lock, flags);
99 	bd->queued_cmds--;
100 	spin_unlock_irqrestore(&bd->lock, flags);
101 
102 	wake_up(&bd->wq_free);
103 }
104 
105 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
106 {
107 	struct bsg_command *bc = ERR_PTR(-EINVAL);
108 
109 	spin_lock_irq(&bd->lock);
110 
111 	if (bd->queued_cmds >= bd->max_queue)
112 		goto out;
113 
114 	bd->queued_cmds++;
115 	spin_unlock_irq(&bd->lock);
116 
117 	bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
118 	if (unlikely(!bc)) {
119 		spin_lock_irq(&bd->lock);
120 		bd->queued_cmds--;
121 		bc = ERR_PTR(-ENOMEM);
122 		goto out;
123 	}
124 
125 	bc->bd = bd;
126 	INIT_LIST_HEAD(&bc->list);
127 	dprintk("%s: returning free cmd %p\n", bd->name, bc);
128 	return bc;
129 out:
130 	spin_unlock_irq(&bd->lock);
131 	return bc;
132 }
133 
134 static inline struct hlist_head *bsg_dev_idx_hash(int index)
135 {
136 	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
137 }
138 
139 static int bsg_io_schedule(struct bsg_device *bd)
140 {
141 	DEFINE_WAIT(wait);
142 	int ret = 0;
143 
144 	spin_lock_irq(&bd->lock);
145 
146 	BUG_ON(bd->done_cmds > bd->queued_cmds);
147 
148 	/*
149 	 * -ENOSPC or -ENODATA?  I'm going for -ENODATA, meaning "I have no
150 	 * work to do", even though we return -ENOSPC after this same test
151 	 * during bsg_write() -- there, it means our buffer can't have more
152 	 * bsg_commands added to it, thus has no space left.
153 	 */
154 	if (bd->done_cmds == bd->queued_cmds) {
155 		ret = -ENODATA;
156 		goto unlock;
157 	}
158 
159 	if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
160 		ret = -EAGAIN;
161 		goto unlock;
162 	}
163 
164 	prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
165 	spin_unlock_irq(&bd->lock);
166 	io_schedule();
167 	finish_wait(&bd->wq_done, &wait);
168 
169 	return ret;
170 unlock:
171 	spin_unlock_irq(&bd->lock);
172 	return ret;
173 }
174 
175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
176 				struct sg_io_v4 *hdr, struct bsg_device *bd,
177 				fmode_t has_write_perm)
178 {
179 	if (hdr->request_len > BLK_MAX_CDB) {
180 		rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
181 		if (!rq->cmd)
182 			return -ENOMEM;
183 	}
184 
185 	if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
186 			   hdr->request_len))
187 		return -EFAULT;
188 
189 	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
190 		if (blk_verify_command(rq->cmd, has_write_perm))
191 			return -EPERM;
192 	} else if (!capable(CAP_SYS_RAWIO))
193 		return -EPERM;
194 
195 	/*
196 	 * fill in request structure
197 	 */
198 	rq->cmd_len = hdr->request_len;
199 
200 	rq->timeout = msecs_to_jiffies(hdr->timeout);
201 	if (!rq->timeout)
202 		rq->timeout = q->sg_timeout;
203 	if (!rq->timeout)
204 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
205 	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
206 		rq->timeout = BLK_MIN_SG_TIMEOUT;
207 
208 	return 0;
209 }
210 
211 /*
212  * Check if sg_io_v4 from user is allowed and valid
213  */
214 static int
215 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
216 {
217 	int ret = 0;
218 
219 	if (hdr->guard != 'Q')
220 		return -EINVAL;
221 
222 	switch (hdr->protocol) {
223 	case BSG_PROTOCOL_SCSI:
224 		switch (hdr->subprotocol) {
225 		case BSG_SUB_PROTOCOL_SCSI_CMD:
226 		case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
227 			break;
228 		default:
229 			ret = -EINVAL;
230 		}
231 		break;
232 	default:
233 		ret = -EINVAL;
234 	}
235 
236 	*rw = hdr->dout_xfer_len ? WRITE : READ;
237 	return ret;
238 }
239 
240 /*
241  * map sg_io_v4 to a request.
242  */
243 static struct request *
244 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
245 	    u8 *sense)
246 {
247 	struct request_queue *q = bd->queue;
248 	struct request *rq, *next_rq = NULL;
249 	int ret, rw;
250 	unsigned int dxfer_len;
251 	void __user *dxferp = NULL;
252 	struct bsg_class_device *bcd = &q->bsg_dev;
253 
254 	/* if the LLD has been removed then the bsg_unregister_queue will
255 	 * eventually be called and the class_dev was freed, so we can no
256 	 * longer use this request_queue. Return no such address.
257 	 */
258 	if (!bcd->class_dev)
259 		return ERR_PTR(-ENXIO);
260 
261 	dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
262 		hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
263 		hdr->din_xfer_len);
264 
265 	ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
266 	if (ret)
267 		return ERR_PTR(ret);
268 
269 	/*
270 	 * map scatter-gather elements separately and string them to request
271 	 */
272 	rq = blk_get_request(q, rw, GFP_KERNEL);
273 	if (!rq)
274 		return ERR_PTR(-ENOMEM);
275 	blk_rq_set_block_pc(rq);
276 
277 	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
278 	if (ret)
279 		goto out;
280 
281 	if (rw == WRITE && hdr->din_xfer_len) {
282 		if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
283 			ret = -EOPNOTSUPP;
284 			goto out;
285 		}
286 
287 		next_rq = blk_get_request(q, READ, GFP_KERNEL);
288 		if (!next_rq) {
289 			ret = -ENOMEM;
290 			goto out;
291 		}
292 		rq->next_rq = next_rq;
293 		next_rq->cmd_type = rq->cmd_type;
294 
295 		dxferp = (void __user *)(unsigned long)hdr->din_xferp;
296 		ret =  blk_rq_map_user(q, next_rq, NULL, dxferp,
297 				       hdr->din_xfer_len, GFP_KERNEL);
298 		if (ret)
299 			goto out;
300 	}
301 
302 	if (hdr->dout_xfer_len) {
303 		dxfer_len = hdr->dout_xfer_len;
304 		dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
305 	} else if (hdr->din_xfer_len) {
306 		dxfer_len = hdr->din_xfer_len;
307 		dxferp = (void __user *)(unsigned long)hdr->din_xferp;
308 	} else
309 		dxfer_len = 0;
310 
311 	if (dxfer_len) {
312 		ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
313 				      GFP_KERNEL);
314 		if (ret)
315 			goto out;
316 	}
317 
318 	rq->sense = sense;
319 	rq->sense_len = 0;
320 
321 	return rq;
322 out:
323 	if (rq->cmd != rq->__cmd)
324 		kfree(rq->cmd);
325 	blk_put_request(rq);
326 	if (next_rq) {
327 		blk_rq_unmap_user(next_rq->bio);
328 		blk_put_request(next_rq);
329 	}
330 	return ERR_PTR(ret);
331 }
332 
333 /*
334  * async completion call-back from the block layer, when scsi/ide/whatever
335  * calls end_that_request_last() on a request
336  */
337 static void bsg_rq_end_io(struct request *rq, int uptodate)
338 {
339 	struct bsg_command *bc = rq->end_io_data;
340 	struct bsg_device *bd = bc->bd;
341 	unsigned long flags;
342 
343 	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
344 		bd->name, rq, bc, bc->bio, uptodate);
345 
346 	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
347 
348 	spin_lock_irqsave(&bd->lock, flags);
349 	list_move_tail(&bc->list, &bd->done_list);
350 	bd->done_cmds++;
351 	spin_unlock_irqrestore(&bd->lock, flags);
352 
353 	wake_up(&bd->wq_done);
354 }
355 
356 /*
357  * do final setup of a 'bc' and submit the matching 'rq' to the block
358  * layer for io
359  */
360 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
361 			    struct bsg_command *bc, struct request *rq)
362 {
363 	int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
364 
365 	/*
366 	 * add bc command to busy queue and submit rq for io
367 	 */
368 	bc->rq = rq;
369 	bc->bio = rq->bio;
370 	if (rq->next_rq)
371 		bc->bidi_bio = rq->next_rq->bio;
372 	bc->hdr.duration = jiffies;
373 	spin_lock_irq(&bd->lock);
374 	list_add_tail(&bc->list, &bd->busy_list);
375 	spin_unlock_irq(&bd->lock);
376 
377 	dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
378 
379 	rq->end_io_data = bc;
380 	blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
381 }
382 
383 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
384 {
385 	struct bsg_command *bc = NULL;
386 
387 	spin_lock_irq(&bd->lock);
388 	if (bd->done_cmds) {
389 		bc = list_first_entry(&bd->done_list, struct bsg_command, list);
390 		list_del(&bc->list);
391 		bd->done_cmds--;
392 	}
393 	spin_unlock_irq(&bd->lock);
394 
395 	return bc;
396 }
397 
398 /*
399  * Get a finished command from the done list
400  */
401 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
402 {
403 	struct bsg_command *bc;
404 	int ret;
405 
406 	do {
407 		bc = bsg_next_done_cmd(bd);
408 		if (bc)
409 			break;
410 
411 		if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
412 			bc = ERR_PTR(-EAGAIN);
413 			break;
414 		}
415 
416 		ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
417 		if (ret) {
418 			bc = ERR_PTR(-ERESTARTSYS);
419 			break;
420 		}
421 	} while (1);
422 
423 	dprintk("%s: returning done %p\n", bd->name, bc);
424 
425 	return bc;
426 }
427 
428 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
429 				    struct bio *bio, struct bio *bidi_bio)
430 {
431 	int ret = 0;
432 
433 	dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
434 	/*
435 	 * fill in all the output members
436 	 */
437 	hdr->device_status = rq->errors & 0xff;
438 	hdr->transport_status = host_byte(rq->errors);
439 	hdr->driver_status = driver_byte(rq->errors);
440 	hdr->info = 0;
441 	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
442 		hdr->info |= SG_INFO_CHECK;
443 	hdr->response_len = 0;
444 
445 	if (rq->sense_len && hdr->response) {
446 		int len = min_t(unsigned int, hdr->max_response_len,
447 					rq->sense_len);
448 
449 		ret = copy_to_user((void __user *)(unsigned long)hdr->response,
450 				   rq->sense, len);
451 		if (!ret)
452 			hdr->response_len = len;
453 		else
454 			ret = -EFAULT;
455 	}
456 
457 	if (rq->next_rq) {
458 		hdr->dout_resid = rq->resid_len;
459 		hdr->din_resid = rq->next_rq->resid_len;
460 		blk_rq_unmap_user(bidi_bio);
461 		blk_put_request(rq->next_rq);
462 	} else if (rq_data_dir(rq) == READ)
463 		hdr->din_resid = rq->resid_len;
464 	else
465 		hdr->dout_resid = rq->resid_len;
466 
467 	/*
468 	 * If the request generated a negative error number, return it
469 	 * (providing we aren't already returning an error); if it's
470 	 * just a protocol response (i.e. non negative), that gets
471 	 * processed above.
472 	 */
473 	if (!ret && rq->errors < 0)
474 		ret = rq->errors;
475 
476 	blk_rq_unmap_user(bio);
477 	if (rq->cmd != rq->__cmd)
478 		kfree(rq->cmd);
479 	blk_put_request(rq);
480 
481 	return ret;
482 }
483 
484 static int bsg_complete_all_commands(struct bsg_device *bd)
485 {
486 	struct bsg_command *bc;
487 	int ret, tret;
488 
489 	dprintk("%s: entered\n", bd->name);
490 
491 	/*
492 	 * wait for all commands to complete
493 	 */
494 	ret = 0;
495 	do {
496 		ret = bsg_io_schedule(bd);
497 		/*
498 		 * look for -ENODATA specifically -- we'll sometimes get
499 		 * -ERESTARTSYS when we've taken a signal, but we can't
500 		 * return until we're done freeing the queue, so ignore
501 		 * it.  The signal will get handled when we're done freeing
502 		 * the bsg_device.
503 		 */
504 	} while (ret != -ENODATA);
505 
506 	/*
507 	 * discard done commands
508 	 */
509 	ret = 0;
510 	do {
511 		spin_lock_irq(&bd->lock);
512 		if (!bd->queued_cmds) {
513 			spin_unlock_irq(&bd->lock);
514 			break;
515 		}
516 		spin_unlock_irq(&bd->lock);
517 
518 		bc = bsg_get_done_cmd(bd);
519 		if (IS_ERR(bc))
520 			break;
521 
522 		tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
523 						bc->bidi_bio);
524 		if (!ret)
525 			ret = tret;
526 
527 		bsg_free_command(bc);
528 	} while (1);
529 
530 	return ret;
531 }
532 
533 static int
534 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
535 	   const struct iovec *iov, ssize_t *bytes_read)
536 {
537 	struct bsg_command *bc;
538 	int nr_commands, ret;
539 
540 	if (count % sizeof(struct sg_io_v4))
541 		return -EINVAL;
542 
543 	ret = 0;
544 	nr_commands = count / sizeof(struct sg_io_v4);
545 	while (nr_commands) {
546 		bc = bsg_get_done_cmd(bd);
547 		if (IS_ERR(bc)) {
548 			ret = PTR_ERR(bc);
549 			break;
550 		}
551 
552 		/*
553 		 * this is the only case where we need to copy data back
554 		 * after completing the request. so do that here,
555 		 * bsg_complete_work() cannot do that for us
556 		 */
557 		ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
558 					       bc->bidi_bio);
559 
560 		if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
561 			ret = -EFAULT;
562 
563 		bsg_free_command(bc);
564 
565 		if (ret)
566 			break;
567 
568 		buf += sizeof(struct sg_io_v4);
569 		*bytes_read += sizeof(struct sg_io_v4);
570 		nr_commands--;
571 	}
572 
573 	return ret;
574 }
575 
576 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
577 {
578 	if (file->f_flags & O_NONBLOCK)
579 		clear_bit(BSG_F_BLOCK, &bd->flags);
580 	else
581 		set_bit(BSG_F_BLOCK, &bd->flags);
582 }
583 
584 /*
585  * Check if the error is a "real" error that we should return.
586  */
587 static inline int err_block_err(int ret)
588 {
589 	if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
590 		return 1;
591 
592 	return 0;
593 }
594 
595 static ssize_t
596 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
597 {
598 	struct bsg_device *bd = file->private_data;
599 	int ret;
600 	ssize_t bytes_read;
601 
602 	dprintk("%s: read %Zd bytes\n", bd->name, count);
603 
604 	bsg_set_block(bd, file);
605 
606 	bytes_read = 0;
607 	ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
608 	*ppos = bytes_read;
609 
610 	if (!bytes_read || err_block_err(ret))
611 		bytes_read = ret;
612 
613 	return bytes_read;
614 }
615 
616 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
617 		       size_t count, ssize_t *bytes_written,
618 		       fmode_t has_write_perm)
619 {
620 	struct bsg_command *bc;
621 	struct request *rq;
622 	int ret, nr_commands;
623 
624 	if (count % sizeof(struct sg_io_v4))
625 		return -EINVAL;
626 
627 	nr_commands = count / sizeof(struct sg_io_v4);
628 	rq = NULL;
629 	bc = NULL;
630 	ret = 0;
631 	while (nr_commands) {
632 		struct request_queue *q = bd->queue;
633 
634 		bc = bsg_alloc_command(bd);
635 		if (IS_ERR(bc)) {
636 			ret = PTR_ERR(bc);
637 			bc = NULL;
638 			break;
639 		}
640 
641 		if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
642 			ret = -EFAULT;
643 			break;
644 		}
645 
646 		/*
647 		 * get a request, fill in the blanks, and add to request queue
648 		 */
649 		rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
650 		if (IS_ERR(rq)) {
651 			ret = PTR_ERR(rq);
652 			rq = NULL;
653 			break;
654 		}
655 
656 		bsg_add_command(bd, q, bc, rq);
657 		bc = NULL;
658 		rq = NULL;
659 		nr_commands--;
660 		buf += sizeof(struct sg_io_v4);
661 		*bytes_written += sizeof(struct sg_io_v4);
662 	}
663 
664 	if (bc)
665 		bsg_free_command(bc);
666 
667 	return ret;
668 }
669 
670 static ssize_t
671 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
672 {
673 	struct bsg_device *bd = file->private_data;
674 	ssize_t bytes_written;
675 	int ret;
676 
677 	dprintk("%s: write %Zd bytes\n", bd->name, count);
678 
679 	bsg_set_block(bd, file);
680 
681 	bytes_written = 0;
682 	ret = __bsg_write(bd, buf, count, &bytes_written,
683 			  file->f_mode & FMODE_WRITE);
684 
685 	*ppos = bytes_written;
686 
687 	/*
688 	 * return bytes written on non-fatal errors
689 	 */
690 	if (!bytes_written || err_block_err(ret))
691 		bytes_written = ret;
692 
693 	dprintk("%s: returning %Zd\n", bd->name, bytes_written);
694 	return bytes_written;
695 }
696 
697 static struct bsg_device *bsg_alloc_device(void)
698 {
699 	struct bsg_device *bd;
700 
701 	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
702 	if (unlikely(!bd))
703 		return NULL;
704 
705 	spin_lock_init(&bd->lock);
706 
707 	bd->max_queue = BSG_DEFAULT_CMDS;
708 
709 	INIT_LIST_HEAD(&bd->busy_list);
710 	INIT_LIST_HEAD(&bd->done_list);
711 	INIT_HLIST_NODE(&bd->dev_list);
712 
713 	init_waitqueue_head(&bd->wq_free);
714 	init_waitqueue_head(&bd->wq_done);
715 	return bd;
716 }
717 
718 static void bsg_kref_release_function(struct kref *kref)
719 {
720 	struct bsg_class_device *bcd =
721 		container_of(kref, struct bsg_class_device, ref);
722 	struct device *parent = bcd->parent;
723 
724 	if (bcd->release)
725 		bcd->release(bcd->parent);
726 
727 	put_device(parent);
728 }
729 
730 static int bsg_put_device(struct bsg_device *bd)
731 {
732 	int ret = 0, do_free;
733 	struct request_queue *q = bd->queue;
734 
735 	mutex_lock(&bsg_mutex);
736 
737 	do_free = atomic_dec_and_test(&bd->ref_count);
738 	if (!do_free) {
739 		mutex_unlock(&bsg_mutex);
740 		goto out;
741 	}
742 
743 	hlist_del(&bd->dev_list);
744 	mutex_unlock(&bsg_mutex);
745 
746 	dprintk("%s: tearing down\n", bd->name);
747 
748 	/*
749 	 * close can always block
750 	 */
751 	set_bit(BSG_F_BLOCK, &bd->flags);
752 
753 	/*
754 	 * correct error detection baddies here again. it's the responsibility
755 	 * of the app to properly reap commands before close() if it wants
756 	 * fool-proof error detection
757 	 */
758 	ret = bsg_complete_all_commands(bd);
759 
760 	kfree(bd);
761 out:
762 	kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
763 	if (do_free)
764 		blk_put_queue(q);
765 	return ret;
766 }
767 
768 static struct bsg_device *bsg_add_device(struct inode *inode,
769 					 struct request_queue *rq,
770 					 struct file *file)
771 {
772 	struct bsg_device *bd;
773 #ifdef BSG_DEBUG
774 	unsigned char buf[32];
775 #endif
776 	if (!blk_get_queue(rq))
777 		return ERR_PTR(-ENXIO);
778 
779 	bd = bsg_alloc_device();
780 	if (!bd) {
781 		blk_put_queue(rq);
782 		return ERR_PTR(-ENOMEM);
783 	}
784 
785 	bd->queue = rq;
786 
787 	bsg_set_block(bd, file);
788 
789 	atomic_set(&bd->ref_count, 1);
790 	mutex_lock(&bsg_mutex);
791 	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
792 
793 	strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
794 	dprintk("bound to <%s>, max queue %d\n",
795 		format_dev_t(buf, inode->i_rdev), bd->max_queue);
796 
797 	mutex_unlock(&bsg_mutex);
798 	return bd;
799 }
800 
801 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
802 {
803 	struct bsg_device *bd;
804 
805 	mutex_lock(&bsg_mutex);
806 
807 	hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
808 		if (bd->queue == q) {
809 			atomic_inc(&bd->ref_count);
810 			goto found;
811 		}
812 	}
813 	bd = NULL;
814 found:
815 	mutex_unlock(&bsg_mutex);
816 	return bd;
817 }
818 
819 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
820 {
821 	struct bsg_device *bd;
822 	struct bsg_class_device *bcd;
823 
824 	/*
825 	 * find the class device
826 	 */
827 	mutex_lock(&bsg_mutex);
828 	bcd = idr_find(&bsg_minor_idr, iminor(inode));
829 	if (bcd)
830 		kref_get(&bcd->ref);
831 	mutex_unlock(&bsg_mutex);
832 
833 	if (!bcd)
834 		return ERR_PTR(-ENODEV);
835 
836 	bd = __bsg_get_device(iminor(inode), bcd->queue);
837 	if (bd)
838 		return bd;
839 
840 	bd = bsg_add_device(inode, bcd->queue, file);
841 	if (IS_ERR(bd))
842 		kref_put(&bcd->ref, bsg_kref_release_function);
843 
844 	return bd;
845 }
846 
847 static int bsg_open(struct inode *inode, struct file *file)
848 {
849 	struct bsg_device *bd;
850 
851 	bd = bsg_get_device(inode, file);
852 
853 	if (IS_ERR(bd))
854 		return PTR_ERR(bd);
855 
856 	file->private_data = bd;
857 	return 0;
858 }
859 
860 static int bsg_release(struct inode *inode, struct file *file)
861 {
862 	struct bsg_device *bd = file->private_data;
863 
864 	file->private_data = NULL;
865 	return bsg_put_device(bd);
866 }
867 
868 static unsigned int bsg_poll(struct file *file, poll_table *wait)
869 {
870 	struct bsg_device *bd = file->private_data;
871 	unsigned int mask = 0;
872 
873 	poll_wait(file, &bd->wq_done, wait);
874 	poll_wait(file, &bd->wq_free, wait);
875 
876 	spin_lock_irq(&bd->lock);
877 	if (!list_empty(&bd->done_list))
878 		mask |= POLLIN | POLLRDNORM;
879 	if (bd->queued_cmds < bd->max_queue)
880 		mask |= POLLOUT;
881 	spin_unlock_irq(&bd->lock);
882 
883 	return mask;
884 }
885 
886 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
887 {
888 	struct bsg_device *bd = file->private_data;
889 	int __user *uarg = (int __user *) arg;
890 	int ret;
891 
892 	switch (cmd) {
893 		/*
894 		 * our own ioctls
895 		 */
896 	case SG_GET_COMMAND_Q:
897 		return put_user(bd->max_queue, uarg);
898 	case SG_SET_COMMAND_Q: {
899 		int queue;
900 
901 		if (get_user(queue, uarg))
902 			return -EFAULT;
903 		if (queue < 1)
904 			return -EINVAL;
905 
906 		spin_lock_irq(&bd->lock);
907 		bd->max_queue = queue;
908 		spin_unlock_irq(&bd->lock);
909 		return 0;
910 	}
911 
912 	/*
913 	 * SCSI/sg ioctls
914 	 */
915 	case SG_GET_VERSION_NUM:
916 	case SCSI_IOCTL_GET_IDLUN:
917 	case SCSI_IOCTL_GET_BUS_NUMBER:
918 	case SG_SET_TIMEOUT:
919 	case SG_GET_TIMEOUT:
920 	case SG_GET_RESERVED_SIZE:
921 	case SG_SET_RESERVED_SIZE:
922 	case SG_EMULATED_HOST:
923 	case SCSI_IOCTL_SEND_COMMAND: {
924 		void __user *uarg = (void __user *) arg;
925 		return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
926 	}
927 	case SG_IO: {
928 		struct request *rq;
929 		struct bio *bio, *bidi_bio = NULL;
930 		struct sg_io_v4 hdr;
931 		int at_head;
932 		u8 sense[SCSI_SENSE_BUFFERSIZE];
933 
934 		if (copy_from_user(&hdr, uarg, sizeof(hdr)))
935 			return -EFAULT;
936 
937 		rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
938 		if (IS_ERR(rq))
939 			return PTR_ERR(rq);
940 
941 		bio = rq->bio;
942 		if (rq->next_rq)
943 			bidi_bio = rq->next_rq->bio;
944 
945 		at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
946 		blk_execute_rq(bd->queue, NULL, rq, at_head);
947 		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
948 
949 		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
950 			return -EFAULT;
951 
952 		return ret;
953 	}
954 	/*
955 	 * block device ioctls
956 	 */
957 	default:
958 #if 0
959 		return ioctl_by_bdev(bd->bdev, cmd, arg);
960 #else
961 		return -ENOTTY;
962 #endif
963 	}
964 }
965 
966 static const struct file_operations bsg_fops = {
967 	.read		=	bsg_read,
968 	.write		=	bsg_write,
969 	.poll		=	bsg_poll,
970 	.open		=	bsg_open,
971 	.release	=	bsg_release,
972 	.unlocked_ioctl	=	bsg_ioctl,
973 	.owner		=	THIS_MODULE,
974 	.llseek		=	default_llseek,
975 };
976 
977 void bsg_unregister_queue(struct request_queue *q)
978 {
979 	struct bsg_class_device *bcd = &q->bsg_dev;
980 
981 	if (!bcd->class_dev)
982 		return;
983 
984 	mutex_lock(&bsg_mutex);
985 	idr_remove(&bsg_minor_idr, bcd->minor);
986 	if (q->kobj.sd)
987 		sysfs_remove_link(&q->kobj, "bsg");
988 	device_unregister(bcd->class_dev);
989 	bcd->class_dev = NULL;
990 	kref_put(&bcd->ref, bsg_kref_release_function);
991 	mutex_unlock(&bsg_mutex);
992 }
993 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
994 
995 int bsg_register_queue(struct request_queue *q, struct device *parent,
996 		       const char *name, void (*release)(struct device *))
997 {
998 	struct bsg_class_device *bcd;
999 	dev_t dev;
1000 	int ret;
1001 	struct device *class_dev = NULL;
1002 	const char *devname;
1003 
1004 	if (name)
1005 		devname = name;
1006 	else
1007 		devname = dev_name(parent);
1008 
1009 	/*
1010 	 * we need a proper transport to send commands, not a stacked device
1011 	 */
1012 	if (!queue_is_rq_based(q))
1013 		return 0;
1014 
1015 	bcd = &q->bsg_dev;
1016 	memset(bcd, 0, sizeof(*bcd));
1017 
1018 	mutex_lock(&bsg_mutex);
1019 
1020 	ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
1021 	if (ret < 0) {
1022 		if (ret == -ENOSPC) {
1023 			printk(KERN_ERR "bsg: too many bsg devices\n");
1024 			ret = -EINVAL;
1025 		}
1026 		goto unlock;
1027 	}
1028 
1029 	bcd->minor = ret;
1030 	bcd->queue = q;
1031 	bcd->parent = get_device(parent);
1032 	bcd->release = release;
1033 	kref_init(&bcd->ref);
1034 	dev = MKDEV(bsg_major, bcd->minor);
1035 	class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
1036 	if (IS_ERR(class_dev)) {
1037 		ret = PTR_ERR(class_dev);
1038 		goto put_dev;
1039 	}
1040 	bcd->class_dev = class_dev;
1041 
1042 	if (q->kobj.sd) {
1043 		ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1044 		if (ret)
1045 			goto unregister_class_dev;
1046 	}
1047 
1048 	mutex_unlock(&bsg_mutex);
1049 	return 0;
1050 
1051 unregister_class_dev:
1052 	device_unregister(class_dev);
1053 put_dev:
1054 	put_device(parent);
1055 	idr_remove(&bsg_minor_idr, bcd->minor);
1056 unlock:
1057 	mutex_unlock(&bsg_mutex);
1058 	return ret;
1059 }
1060 EXPORT_SYMBOL_GPL(bsg_register_queue);
1061 
1062 static struct cdev bsg_cdev;
1063 
1064 static char *bsg_devnode(struct device *dev, umode_t *mode)
1065 {
1066 	return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
1067 }
1068 
1069 static int __init bsg_init(void)
1070 {
1071 	int ret, i;
1072 	dev_t devid;
1073 
1074 	bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1075 				sizeof(struct bsg_command), 0, 0, NULL);
1076 	if (!bsg_cmd_cachep) {
1077 		printk(KERN_ERR "bsg: failed creating slab cache\n");
1078 		return -ENOMEM;
1079 	}
1080 
1081 	for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1082 		INIT_HLIST_HEAD(&bsg_device_list[i]);
1083 
1084 	bsg_class = class_create(THIS_MODULE, "bsg");
1085 	if (IS_ERR(bsg_class)) {
1086 		ret = PTR_ERR(bsg_class);
1087 		goto destroy_kmemcache;
1088 	}
1089 	bsg_class->devnode = bsg_devnode;
1090 
1091 	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1092 	if (ret)
1093 		goto destroy_bsg_class;
1094 
1095 	bsg_major = MAJOR(devid);
1096 
1097 	cdev_init(&bsg_cdev, &bsg_fops);
1098 	ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1099 	if (ret)
1100 		goto unregister_chrdev;
1101 
1102 	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1103 	       " loaded (major %d)\n", bsg_major);
1104 	return 0;
1105 unregister_chrdev:
1106 	unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1107 destroy_bsg_class:
1108 	class_destroy(bsg_class);
1109 destroy_kmemcache:
1110 	kmem_cache_destroy(bsg_cmd_cachep);
1111 	return ret;
1112 }
1113 
1114 MODULE_AUTHOR("Jens Axboe");
1115 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1116 MODULE_LICENSE("GPL");
1117 
1118 device_initcall(bsg_init);
1119