xref: /openbmc/linux/drivers/scsi/sg.c (revision 609e478b)
1 /*
2  *  History:
3  *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4  *           to allow user process control of SCSI devices.
5  *  Development Sponsored by Killy Corp. NY NY
6  *
7  * Original driver (sg.c):
8  *        Copyright (C) 1992 Lawrence Foard
9  * Version 2 and 3 extensions to driver:
10  *        Copyright (C) 1998 - 2014 Douglas Gilbert
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  */
18 
19 static int sg_version_num = 30536;	/* 2 digits for each component */
20 #define SG_VERSION_STR "3.5.36"
21 
22 /*
23  *  D. P. Gilbert (dgilbert@interlog.com), notes:
24  *      - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
25  *        the kernel/module needs to be built with CONFIG_SCSI_LOGGING
26  *        (otherwise the macros compile to empty statements).
27  *
28  */
29 #include <linux/module.h>
30 
31 #include <linux/fs.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/aio.h>
37 #include <linux/errno.h>
38 #include <linux/mtio.h>
39 #include <linux/ioctl.h>
40 #include <linux/slab.h>
41 #include <linux/fcntl.h>
42 #include <linux/init.h>
43 #include <linux/poll.h>
44 #include <linux/moduleparam.h>
45 #include <linux/cdev.h>
46 #include <linux/idr.h>
47 #include <linux/seq_file.h>
48 #include <linux/blkdev.h>
49 #include <linux/delay.h>
50 #include <linux/blktrace_api.h>
51 #include <linux/mutex.h>
52 #include <linux/atomic.h>
53 #include <linux/ratelimit.h>
54 
55 #include "scsi.h"
56 #include <scsi/scsi_dbg.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_driver.h>
59 #include <scsi/scsi_ioctl.h>
60 #include <scsi/sg.h>
61 
62 #include "scsi_logging.h"
63 
64 #ifdef CONFIG_SCSI_PROC_FS
65 #include <linux/proc_fs.h>
66 static char *sg_version_date = "20140603";
67 
68 static int sg_proc_init(void);
69 static void sg_proc_cleanup(void);
70 #endif
71 
72 #define SG_ALLOW_DIO_DEF 0
73 
74 #define SG_MAX_DEVS 32768
75 
76 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
77  * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
78  * than 16 bytes are "variable length" whose length is a multiple of 4
79  */
80 #define SG_MAX_CDB_SIZE 252
81 
82 /*
83  * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
84  * Then when using 32 bit integers x * m may overflow during the calculation.
85  * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
86  * calculates the same, but prevents the overflow when both m and d
87  * are "small" numbers (like HZ and USER_HZ).
88  * Of course an overflow is inavoidable if the result of muldiv doesn't fit
89  * in 32 bits.
90  */
91 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
92 
93 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
94 
95 int sg_big_buff = SG_DEF_RESERVED_SIZE;
96 /* N.B. This variable is readable and writeable via
97    /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
98    of this size (or less if there is not enough memory) will be reserved
99    for use by this file descriptor. [Deprecated usage: this variable is also
100    readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
101    the kernel (i.e. it is not a module).] */
102 static int def_reserved_size = -1;	/* picks up init parameter */
103 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
104 
105 static int scatter_elem_sz = SG_SCATTER_SZ;
106 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
107 
108 #define SG_SECTOR_SZ 512
109 
110 static int sg_add_device(struct device *, struct class_interface *);
111 static void sg_remove_device(struct device *, struct class_interface *);
112 
113 static DEFINE_IDR(sg_index_idr);
114 static DEFINE_RWLOCK(sg_index_lock);	/* Also used to lock
115 							   file descriptor list for device */
116 
117 static struct class_interface sg_interface = {
118 	.add_dev        = sg_add_device,
119 	.remove_dev     = sg_remove_device,
120 };
121 
122 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
123 	unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
124 	unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
125 	unsigned bufflen;	/* Size of (aggregate) data buffer */
126 	struct page **pages;
127 	int page_order;
128 	char dio_in_use;	/* 0->indirect IO (or mmap), 1->dio */
129 	unsigned char cmd_opcode; /* first byte of command */
130 } Sg_scatter_hold;
131 
132 struct sg_device;		/* forward declarations */
133 struct sg_fd;
134 
135 typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */
136 	struct sg_request *nextrp;	/* NULL -> tail request (slist) */
137 	struct sg_fd *parentfp;	/* NULL -> not in use */
138 	Sg_scatter_hold data;	/* hold buffer, perhaps scatter list */
139 	sg_io_hdr_t header;	/* scsi command+info, see <scsi/sg.h> */
140 	unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
141 	char res_used;		/* 1 -> using reserve buffer, 0 -> not ... */
142 	char orphan;		/* 1 -> drop on sight, 0 -> normal */
143 	char sg_io_owned;	/* 1 -> packet belongs to SG_IO */
144 	/* done protected by rq_list_lock */
145 	char done;		/* 0->before bh, 1->before read, 2->read */
146 	struct request *rq;
147 	struct bio *bio;
148 	struct execute_work ew;
149 } Sg_request;
150 
151 typedef struct sg_fd {		/* holds the state of a file descriptor */
152 	struct list_head sfd_siblings;  /* protected by device's sfd_lock */
153 	struct sg_device *parentdp;	/* owning device */
154 	wait_queue_head_t read_wait;	/* queue read until command done */
155 	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
156 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
157 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
158 	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
159 	unsigned save_scat_len;	/* original length of trunc. scat. element */
160 	Sg_request *headrp;	/* head of request slist, NULL->empty */
161 	struct fasync_struct *async_qp;	/* used by asynchronous notification */
162 	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
163 	char low_dma;		/* as in parent but possibly overridden to 1 */
164 	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
165 	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
166 	unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
167 	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
168 	char mmap_called;	/* 0 -> mmap() never called on this fd */
169 	struct kref f_ref;
170 	struct execute_work ew;
171 } Sg_fd;
172 
173 typedef struct sg_device { /* holds the state of each scsi generic device */
174 	struct scsi_device *device;
175 	wait_queue_head_t open_wait;    /* queue open() when O_EXCL present */
176 	struct mutex open_rel_lock;     /* held when in open() or release() */
177 	int sg_tablesize;	/* adapter's max scatter-gather table size */
178 	u32 index;		/* device index number */
179 	struct list_head sfds;
180 	rwlock_t sfd_lock;      /* protect access to sfd list */
181 	atomic_t detaching;     /* 0->device usable, 1->device detaching */
182 	bool exclude;		/* 1->open(O_EXCL) succeeded and is active */
183 	int open_cnt;		/* count of opens (perhaps < num(sfds) ) */
184 	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
185 	struct gendisk *disk;
186 	struct cdev * cdev;	/* char_dev [sysfs: /sys/cdev/major/sg<n>] */
187 	struct kref d_ref;
188 } Sg_device;
189 
190 /* tasklet or soft irq callback */
191 static void sg_rq_end_io(struct request *rq, int uptodate);
192 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
193 static int sg_finish_rem_req(Sg_request * srp);
194 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
195 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
196 			   Sg_request * srp);
197 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
198 			const char __user *buf, size_t count, int blocking,
199 			int read_only, int sg_io_owned, Sg_request **o_srp);
200 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
201 			   unsigned char *cmnd, int timeout, int blocking);
202 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
203 static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
204 static void sg_build_reserve(Sg_fd * sfp, int req_size);
205 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
206 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
207 static Sg_fd *sg_add_sfp(Sg_device * sdp);
208 static void sg_remove_sfp(struct kref *);
209 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
210 static Sg_request *sg_add_request(Sg_fd * sfp);
211 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
212 static int sg_res_in_use(Sg_fd * sfp);
213 static Sg_device *sg_get_dev(int dev);
214 static void sg_device_destroy(struct kref *kref);
215 
216 #define SZ_SG_HEADER sizeof(struct sg_header)
217 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
218 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
219 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
220 
221 #define sg_printk(prefix, sdp, fmt, a...) \
222 	sdev_printk(prefix, (sdp)->device, "[%s] " fmt, \
223 		    (sdp)->disk->disk_name, ##a)
224 
225 static int sg_allow_access(struct file *filp, unsigned char *cmd)
226 {
227 	struct sg_fd *sfp = filp->private_data;
228 
229 	if (sfp->parentdp->device->type == TYPE_SCANNER)
230 		return 0;
231 
232 	return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
233 }
234 
235 static int
236 open_wait(Sg_device *sdp, int flags)
237 {
238 	int retval = 0;
239 
240 	if (flags & O_EXCL) {
241 		while (sdp->open_cnt > 0) {
242 			mutex_unlock(&sdp->open_rel_lock);
243 			retval = wait_event_interruptible(sdp->open_wait,
244 					(atomic_read(&sdp->detaching) ||
245 					 !sdp->open_cnt));
246 			mutex_lock(&sdp->open_rel_lock);
247 
248 			if (retval) /* -ERESTARTSYS */
249 				return retval;
250 			if (atomic_read(&sdp->detaching))
251 				return -ENODEV;
252 		}
253 	} else {
254 		while (sdp->exclude) {
255 			mutex_unlock(&sdp->open_rel_lock);
256 			retval = wait_event_interruptible(sdp->open_wait,
257 					(atomic_read(&sdp->detaching) ||
258 					 !sdp->exclude));
259 			mutex_lock(&sdp->open_rel_lock);
260 
261 			if (retval) /* -ERESTARTSYS */
262 				return retval;
263 			if (atomic_read(&sdp->detaching))
264 				return -ENODEV;
265 		}
266 	}
267 
268 	return retval;
269 }
270 
271 /* Returns 0 on success, else a negated errno value */
272 static int
273 sg_open(struct inode *inode, struct file *filp)
274 {
275 	int dev = iminor(inode);
276 	int flags = filp->f_flags;
277 	struct request_queue *q;
278 	Sg_device *sdp;
279 	Sg_fd *sfp;
280 	int retval;
281 
282 	nonseekable_open(inode, filp);
283 	if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
284 		return -EPERM; /* Can't lock it with read only access */
285 	sdp = sg_get_dev(dev);
286 	if (IS_ERR(sdp))
287 		return PTR_ERR(sdp);
288 
289 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
290 				      "sg_open: flags=0x%x\n", flags));
291 
292 	/* This driver's module count bumped by fops_get in <linux/fs.h> */
293 	/* Prevent the device driver from vanishing while we sleep */
294 	retval = scsi_device_get(sdp->device);
295 	if (retval)
296 		goto sg_put;
297 
298 	retval = scsi_autopm_get_device(sdp->device);
299 	if (retval)
300 		goto sdp_put;
301 
302 	/* scsi_block_when_processing_errors() may block so bypass
303 	 * check if O_NONBLOCK. Permits SCSI commands to be issued
304 	 * during error recovery. Tread carefully. */
305 	if (!((flags & O_NONBLOCK) ||
306 	      scsi_block_when_processing_errors(sdp->device))) {
307 		retval = -ENXIO;
308 		/* we are in error recovery for this device */
309 		goto error_out;
310 	}
311 
312 	mutex_lock(&sdp->open_rel_lock);
313 	if (flags & O_NONBLOCK) {
314 		if (flags & O_EXCL) {
315 			if (sdp->open_cnt > 0) {
316 				retval = -EBUSY;
317 				goto error_mutex_locked;
318 			}
319 		} else {
320 			if (sdp->exclude) {
321 				retval = -EBUSY;
322 				goto error_mutex_locked;
323 			}
324 		}
325 	} else {
326 		retval = open_wait(sdp, flags);
327 		if (retval) /* -ERESTARTSYS or -ENODEV */
328 			goto error_mutex_locked;
329 	}
330 
331 	/* N.B. at this point we are holding the open_rel_lock */
332 	if (flags & O_EXCL)
333 		sdp->exclude = true;
334 
335 	if (sdp->open_cnt < 1) {  /* no existing opens */
336 		sdp->sgdebug = 0;
337 		q = sdp->device->request_queue;
338 		sdp->sg_tablesize = queue_max_segments(q);
339 	}
340 	sfp = sg_add_sfp(sdp);
341 	if (IS_ERR(sfp)) {
342 		retval = PTR_ERR(sfp);
343 		goto out_undo;
344 	}
345 
346 	filp->private_data = sfp;
347 	sdp->open_cnt++;
348 	mutex_unlock(&sdp->open_rel_lock);
349 
350 	retval = 0;
351 sg_put:
352 	kref_put(&sdp->d_ref, sg_device_destroy);
353 	return retval;
354 
355 out_undo:
356 	if (flags & O_EXCL) {
357 		sdp->exclude = false;   /* undo if error */
358 		wake_up_interruptible(&sdp->open_wait);
359 	}
360 error_mutex_locked:
361 	mutex_unlock(&sdp->open_rel_lock);
362 error_out:
363 	scsi_autopm_put_device(sdp->device);
364 sdp_put:
365 	scsi_device_put(sdp->device);
366 	goto sg_put;
367 }
368 
369 /* Release resources associated with a successful sg_open()
370  * Returns 0 on success, else a negated errno value */
371 static int
372 sg_release(struct inode *inode, struct file *filp)
373 {
374 	Sg_device *sdp;
375 	Sg_fd *sfp;
376 
377 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
378 		return -ENXIO;
379 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
380 
381 	mutex_lock(&sdp->open_rel_lock);
382 	scsi_autopm_put_device(sdp->device);
383 	kref_put(&sfp->f_ref, sg_remove_sfp);
384 	sdp->open_cnt--;
385 
386 	/* possibly many open()s waiting on exlude clearing, start many;
387 	 * only open(O_EXCL)s wait on 0==open_cnt so only start one */
388 	if (sdp->exclude) {
389 		sdp->exclude = false;
390 		wake_up_interruptible_all(&sdp->open_wait);
391 	} else if (0 == sdp->open_cnt) {
392 		wake_up_interruptible(&sdp->open_wait);
393 	}
394 	mutex_unlock(&sdp->open_rel_lock);
395 	return 0;
396 }
397 
398 static ssize_t
399 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
400 {
401 	Sg_device *sdp;
402 	Sg_fd *sfp;
403 	Sg_request *srp;
404 	int req_pack_id = -1;
405 	sg_io_hdr_t *hp;
406 	struct sg_header *old_hdr = NULL;
407 	int retval = 0;
408 
409 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
410 		return -ENXIO;
411 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
412 				      "sg_read: count=%d\n", (int) count));
413 
414 	if (!access_ok(VERIFY_WRITE, buf, count))
415 		return -EFAULT;
416 	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
417 		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
418 		if (!old_hdr)
419 			return -ENOMEM;
420 		if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
421 			retval = -EFAULT;
422 			goto free_old_hdr;
423 		}
424 		if (old_hdr->reply_len < 0) {
425 			if (count >= SZ_SG_IO_HDR) {
426 				sg_io_hdr_t *new_hdr;
427 				new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
428 				if (!new_hdr) {
429 					retval = -ENOMEM;
430 					goto free_old_hdr;
431 				}
432 				retval =__copy_from_user
433 				    (new_hdr, buf, SZ_SG_IO_HDR);
434 				req_pack_id = new_hdr->pack_id;
435 				kfree(new_hdr);
436 				if (retval) {
437 					retval = -EFAULT;
438 					goto free_old_hdr;
439 				}
440 			}
441 		} else
442 			req_pack_id = old_hdr->pack_id;
443 	}
444 	srp = sg_get_rq_mark(sfp, req_pack_id);
445 	if (!srp) {		/* now wait on packet to arrive */
446 		if (atomic_read(&sdp->detaching)) {
447 			retval = -ENODEV;
448 			goto free_old_hdr;
449 		}
450 		if (filp->f_flags & O_NONBLOCK) {
451 			retval = -EAGAIN;
452 			goto free_old_hdr;
453 		}
454 		retval = wait_event_interruptible(sfp->read_wait,
455 			(atomic_read(&sdp->detaching) ||
456 			(srp = sg_get_rq_mark(sfp, req_pack_id))));
457 		if (atomic_read(&sdp->detaching)) {
458 			retval = -ENODEV;
459 			goto free_old_hdr;
460 		}
461 		if (retval) {
462 			/* -ERESTARTSYS as signal hit process */
463 			goto free_old_hdr;
464 		}
465 	}
466 	if (srp->header.interface_id != '\0') {
467 		retval = sg_new_read(sfp, buf, count, srp);
468 		goto free_old_hdr;
469 	}
470 
471 	hp = &srp->header;
472 	if (old_hdr == NULL) {
473 		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
474 		if (! old_hdr) {
475 			retval = -ENOMEM;
476 			goto free_old_hdr;
477 		}
478 	}
479 	memset(old_hdr, 0, SZ_SG_HEADER);
480 	old_hdr->reply_len = (int) hp->timeout;
481 	old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
482 	old_hdr->pack_id = hp->pack_id;
483 	old_hdr->twelve_byte =
484 	    ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
485 	old_hdr->target_status = hp->masked_status;
486 	old_hdr->host_status = hp->host_status;
487 	old_hdr->driver_status = hp->driver_status;
488 	if ((CHECK_CONDITION & hp->masked_status) ||
489 	    (DRIVER_SENSE & hp->driver_status))
490 		memcpy(old_hdr->sense_buffer, srp->sense_b,
491 		       sizeof (old_hdr->sense_buffer));
492 	switch (hp->host_status) {
493 	/* This setup of 'result' is for backward compatibility and is best
494 	   ignored by the user who should use target, host + driver status */
495 	case DID_OK:
496 	case DID_PASSTHROUGH:
497 	case DID_SOFT_ERROR:
498 		old_hdr->result = 0;
499 		break;
500 	case DID_NO_CONNECT:
501 	case DID_BUS_BUSY:
502 	case DID_TIME_OUT:
503 		old_hdr->result = EBUSY;
504 		break;
505 	case DID_BAD_TARGET:
506 	case DID_ABORT:
507 	case DID_PARITY:
508 	case DID_RESET:
509 	case DID_BAD_INTR:
510 		old_hdr->result = EIO;
511 		break;
512 	case DID_ERROR:
513 		old_hdr->result = (srp->sense_b[0] == 0 &&
514 				  hp->masked_status == GOOD) ? 0 : EIO;
515 		break;
516 	default:
517 		old_hdr->result = EIO;
518 		break;
519 	}
520 
521 	/* Now copy the result back to the user buffer.  */
522 	if (count >= SZ_SG_HEADER) {
523 		if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
524 			retval = -EFAULT;
525 			goto free_old_hdr;
526 		}
527 		buf += SZ_SG_HEADER;
528 		if (count > old_hdr->reply_len)
529 			count = old_hdr->reply_len;
530 		if (count > SZ_SG_HEADER) {
531 			if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
532 				retval = -EFAULT;
533 				goto free_old_hdr;
534 			}
535 		}
536 	} else
537 		count = (old_hdr->result == 0) ? 0 : -EIO;
538 	sg_finish_rem_req(srp);
539 	retval = count;
540 free_old_hdr:
541 	kfree(old_hdr);
542 	return retval;
543 }
544 
545 static ssize_t
546 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
547 {
548 	sg_io_hdr_t *hp = &srp->header;
549 	int err = 0;
550 	int len;
551 
552 	if (count < SZ_SG_IO_HDR) {
553 		err = -EINVAL;
554 		goto err_out;
555 	}
556 	hp->sb_len_wr = 0;
557 	if ((hp->mx_sb_len > 0) && hp->sbp) {
558 		if ((CHECK_CONDITION & hp->masked_status) ||
559 		    (DRIVER_SENSE & hp->driver_status)) {
560 			int sb_len = SCSI_SENSE_BUFFERSIZE;
561 			sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
562 			len = 8 + (int) srp->sense_b[7];	/* Additional sense length field */
563 			len = (len > sb_len) ? sb_len : len;
564 			if (copy_to_user(hp->sbp, srp->sense_b, len)) {
565 				err = -EFAULT;
566 				goto err_out;
567 			}
568 			hp->sb_len_wr = len;
569 		}
570 	}
571 	if (hp->masked_status || hp->host_status || hp->driver_status)
572 		hp->info |= SG_INFO_CHECK;
573 	if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
574 		err = -EFAULT;
575 		goto err_out;
576 	}
577 err_out:
578 	err = sg_finish_rem_req(srp);
579 	return (0 == err) ? count : err;
580 }
581 
582 static ssize_t
583 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
584 {
585 	int mxsize, cmd_size, k;
586 	int input_size, blocking;
587 	unsigned char opcode;
588 	Sg_device *sdp;
589 	Sg_fd *sfp;
590 	Sg_request *srp;
591 	struct sg_header old_hdr;
592 	sg_io_hdr_t *hp;
593 	unsigned char cmnd[SG_MAX_CDB_SIZE];
594 
595 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
596 		return -ENXIO;
597 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
598 				      "sg_write: count=%d\n", (int) count));
599 	if (atomic_read(&sdp->detaching))
600 		return -ENODEV;
601 	if (!((filp->f_flags & O_NONBLOCK) ||
602 	      scsi_block_when_processing_errors(sdp->device)))
603 		return -ENXIO;
604 
605 	if (!access_ok(VERIFY_READ, buf, count))
606 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
607 	if (count < SZ_SG_HEADER)
608 		return -EIO;
609 	if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
610 		return -EFAULT;
611 	blocking = !(filp->f_flags & O_NONBLOCK);
612 	if (old_hdr.reply_len < 0)
613 		return sg_new_write(sfp, filp, buf, count,
614 				    blocking, 0, 0, NULL);
615 	if (count < (SZ_SG_HEADER + 6))
616 		return -EIO;	/* The minimum scsi command length is 6 bytes. */
617 
618 	if (!(srp = sg_add_request(sfp))) {
619 		SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
620 					      "sg_write: queue full\n"));
621 		return -EDOM;
622 	}
623 	buf += SZ_SG_HEADER;
624 	__get_user(opcode, buf);
625 	if (sfp->next_cmd_len > 0) {
626 		cmd_size = sfp->next_cmd_len;
627 		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
628 	} else {
629 		cmd_size = COMMAND_SIZE(opcode);	/* based on SCSI command group */
630 		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
631 			cmd_size = 12;
632 	}
633 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
634 		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
635 /* Determine buffer size.  */
636 	input_size = count - cmd_size;
637 	mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
638 	mxsize -= SZ_SG_HEADER;
639 	input_size -= SZ_SG_HEADER;
640 	if (input_size < 0) {
641 		sg_remove_request(sfp, srp);
642 		return -EIO;	/* User did not pass enough bytes for this command. */
643 	}
644 	hp = &srp->header;
645 	hp->interface_id = '\0';	/* indicator of old interface tunnelled */
646 	hp->cmd_len = (unsigned char) cmd_size;
647 	hp->iovec_count = 0;
648 	hp->mx_sb_len = 0;
649 	if (input_size > 0)
650 		hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
651 		    SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
652 	else
653 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
654 	hp->dxfer_len = mxsize;
655 	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
656 		hp->dxferp = (char __user *)buf + cmd_size;
657 	else
658 		hp->dxferp = NULL;
659 	hp->sbp = NULL;
660 	hp->timeout = old_hdr.reply_len;	/* structure abuse ... */
661 	hp->flags = input_size;	/* structure abuse ... */
662 	hp->pack_id = old_hdr.pack_id;
663 	hp->usr_ptr = NULL;
664 	if (__copy_from_user(cmnd, buf, cmd_size))
665 		return -EFAULT;
666 	/*
667 	 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
668 	 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
669 	 * is a non-zero input_size, so emit a warning.
670 	 */
671 	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
672 		static char cmd[TASK_COMM_LEN];
673 		if (strcmp(current->comm, cmd)) {
674 			printk_ratelimited(KERN_WARNING
675 					   "sg_write: data in/out %d/%d bytes "
676 					   "for SCSI command 0x%x-- guessing "
677 					   "data in;\n   program %s not setting "
678 					   "count and/or reply_len properly\n",
679 					   old_hdr.reply_len - (int)SZ_SG_HEADER,
680 					   input_size, (unsigned int) cmnd[0],
681 					   current->comm);
682 			strcpy(cmd, current->comm);
683 		}
684 	}
685 	k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
686 	return (k < 0) ? k : count;
687 }
688 
689 static ssize_t
690 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
691 		 size_t count, int blocking, int read_only, int sg_io_owned,
692 		 Sg_request **o_srp)
693 {
694 	int k;
695 	Sg_request *srp;
696 	sg_io_hdr_t *hp;
697 	unsigned char cmnd[SG_MAX_CDB_SIZE];
698 	int timeout;
699 	unsigned long ul_timeout;
700 
701 	if (count < SZ_SG_IO_HDR)
702 		return -EINVAL;
703 	if (!access_ok(VERIFY_READ, buf, count))
704 		return -EFAULT; /* protects following copy_from_user()s + get_user()s */
705 
706 	sfp->cmd_q = 1;	/* when sg_io_hdr seen, set command queuing on */
707 	if (!(srp = sg_add_request(sfp))) {
708 		SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
709 					      "sg_new_write: queue full\n"));
710 		return -EDOM;
711 	}
712 	srp->sg_io_owned = sg_io_owned;
713 	hp = &srp->header;
714 	if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
715 		sg_remove_request(sfp, srp);
716 		return -EFAULT;
717 	}
718 	if (hp->interface_id != 'S') {
719 		sg_remove_request(sfp, srp);
720 		return -ENOSYS;
721 	}
722 	if (hp->flags & SG_FLAG_MMAP_IO) {
723 		if (hp->dxfer_len > sfp->reserve.bufflen) {
724 			sg_remove_request(sfp, srp);
725 			return -ENOMEM;	/* MMAP_IO size must fit in reserve buffer */
726 		}
727 		if (hp->flags & SG_FLAG_DIRECT_IO) {
728 			sg_remove_request(sfp, srp);
729 			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
730 		}
731 		if (sg_res_in_use(sfp)) {
732 			sg_remove_request(sfp, srp);
733 			return -EBUSY;	/* reserve buffer already being used */
734 		}
735 	}
736 	ul_timeout = msecs_to_jiffies(srp->header.timeout);
737 	timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
738 	if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
739 		sg_remove_request(sfp, srp);
740 		return -EMSGSIZE;
741 	}
742 	if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
743 		sg_remove_request(sfp, srp);
744 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
745 	}
746 	if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
747 		sg_remove_request(sfp, srp);
748 		return -EFAULT;
749 	}
750 	if (read_only && sg_allow_access(file, cmnd)) {
751 		sg_remove_request(sfp, srp);
752 		return -EPERM;
753 	}
754 	k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
755 	if (k < 0)
756 		return k;
757 	if (o_srp)
758 		*o_srp = srp;
759 	return count;
760 }
761 
762 static int
763 sg_common_write(Sg_fd * sfp, Sg_request * srp,
764 		unsigned char *cmnd, int timeout, int blocking)
765 {
766 	int k, data_dir, at_head;
767 	Sg_device *sdp = sfp->parentdp;
768 	sg_io_hdr_t *hp = &srp->header;
769 
770 	srp->data.cmd_opcode = cmnd[0];	/* hold opcode of command */
771 	hp->status = 0;
772 	hp->masked_status = 0;
773 	hp->msg_status = 0;
774 	hp->info = 0;
775 	hp->host_status = 0;
776 	hp->driver_status = 0;
777 	hp->resid = 0;
778 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
779 			"sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
780 			(int) cmnd[0], (int) hp->cmd_len));
781 
782 	k = sg_start_req(srp, cmnd);
783 	if (k) {
784 		SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
785 			"sg_common_write: start_req err=%d\n", k));
786 		sg_finish_rem_req(srp);
787 		return k;	/* probably out of space --> ENOMEM */
788 	}
789 	if (atomic_read(&sdp->detaching)) {
790 		if (srp->bio)
791 			blk_end_request_all(srp->rq, -EIO);
792 		sg_finish_rem_req(srp);
793 		return -ENODEV;
794 	}
795 
796 	switch (hp->dxfer_direction) {
797 	case SG_DXFER_TO_FROM_DEV:
798 	case SG_DXFER_FROM_DEV:
799 		data_dir = DMA_FROM_DEVICE;
800 		break;
801 	case SG_DXFER_TO_DEV:
802 		data_dir = DMA_TO_DEVICE;
803 		break;
804 	case SG_DXFER_UNKNOWN:
805 		data_dir = DMA_BIDIRECTIONAL;
806 		break;
807 	default:
808 		data_dir = DMA_NONE;
809 		break;
810 	}
811 	hp->duration = jiffies_to_msecs(jiffies);
812 	if (hp->interface_id != '\0' &&	/* v3 (or later) interface */
813 	    (SG_FLAG_Q_AT_TAIL & hp->flags))
814 		at_head = 0;
815 	else
816 		at_head = 1;
817 
818 	srp->rq->timeout = timeout;
819 	kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
820 	blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
821 			      srp->rq, at_head, sg_rq_end_io);
822 	return 0;
823 }
824 
825 static int srp_done(Sg_fd *sfp, Sg_request *srp)
826 {
827 	unsigned long flags;
828 	int ret;
829 
830 	read_lock_irqsave(&sfp->rq_list_lock, flags);
831 	ret = srp->done;
832 	read_unlock_irqrestore(&sfp->rq_list_lock, flags);
833 	return ret;
834 }
835 
836 static int max_sectors_bytes(struct request_queue *q)
837 {
838 	unsigned int max_sectors = queue_max_sectors(q);
839 
840 	max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
841 
842 	return max_sectors << 9;
843 }
844 
845 static long
846 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
847 {
848 	void __user *p = (void __user *)arg;
849 	int __user *ip = p;
850 	int result, val, read_only;
851 	Sg_device *sdp;
852 	Sg_fd *sfp;
853 	Sg_request *srp;
854 	unsigned long iflags;
855 
856 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
857 		return -ENXIO;
858 
859 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
860 				   "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
861 	read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
862 
863 	switch (cmd_in) {
864 	case SG_IO:
865 		if (atomic_read(&sdp->detaching))
866 			return -ENODEV;
867 		if (!scsi_block_when_processing_errors(sdp->device))
868 			return -ENXIO;
869 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
870 			return -EFAULT;
871 		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
872 				 1, read_only, 1, &srp);
873 		if (result < 0)
874 			return result;
875 		result = wait_event_interruptible(sfp->read_wait,
876 			(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
877 		if (atomic_read(&sdp->detaching))
878 			return -ENODEV;
879 		write_lock_irq(&sfp->rq_list_lock);
880 		if (srp->done) {
881 			srp->done = 2;
882 			write_unlock_irq(&sfp->rq_list_lock);
883 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
884 			return (result < 0) ? result : 0;
885 		}
886 		srp->orphan = 1;
887 		write_unlock_irq(&sfp->rq_list_lock);
888 		return result;	/* -ERESTARTSYS because signal hit process */
889 	case SG_SET_TIMEOUT:
890 		result = get_user(val, ip);
891 		if (result)
892 			return result;
893 		if (val < 0)
894 			return -EIO;
895 		if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
896 		    val = MULDIV (INT_MAX, USER_HZ, HZ);
897 		sfp->timeout_user = val;
898 		sfp->timeout = MULDIV (val, HZ, USER_HZ);
899 
900 		return 0;
901 	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */
902 				/* strange ..., for backward compatibility */
903 		return sfp->timeout_user;
904 	case SG_SET_FORCE_LOW_DMA:
905 		result = get_user(val, ip);
906 		if (result)
907 			return result;
908 		if (val) {
909 			sfp->low_dma = 1;
910 			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
911 				val = (int) sfp->reserve.bufflen;
912 				sg_remove_scat(sfp, &sfp->reserve);
913 				sg_build_reserve(sfp, val);
914 			}
915 		} else {
916 			if (atomic_read(&sdp->detaching))
917 				return -ENODEV;
918 			sfp->low_dma = sdp->device->host->unchecked_isa_dma;
919 		}
920 		return 0;
921 	case SG_GET_LOW_DMA:
922 		return put_user((int) sfp->low_dma, ip);
923 	case SG_GET_SCSI_ID:
924 		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
925 			return -EFAULT;
926 		else {
927 			sg_scsi_id_t __user *sg_idp = p;
928 
929 			if (atomic_read(&sdp->detaching))
930 				return -ENODEV;
931 			__put_user((int) sdp->device->host->host_no,
932 				   &sg_idp->host_no);
933 			__put_user((int) sdp->device->channel,
934 				   &sg_idp->channel);
935 			__put_user((int) sdp->device->id, &sg_idp->scsi_id);
936 			__put_user((int) sdp->device->lun, &sg_idp->lun);
937 			__put_user((int) sdp->device->type, &sg_idp->scsi_type);
938 			__put_user((short) sdp->device->host->cmd_per_lun,
939 				   &sg_idp->h_cmd_per_lun);
940 			__put_user((short) sdp->device->queue_depth,
941 				   &sg_idp->d_queue_depth);
942 			__put_user(0, &sg_idp->unused[0]);
943 			__put_user(0, &sg_idp->unused[1]);
944 			return 0;
945 		}
946 	case SG_SET_FORCE_PACK_ID:
947 		result = get_user(val, ip);
948 		if (result)
949 			return result;
950 		sfp->force_packid = val ? 1 : 0;
951 		return 0;
952 	case SG_GET_PACK_ID:
953 		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
954 			return -EFAULT;
955 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
956 		for (srp = sfp->headrp; srp; srp = srp->nextrp) {
957 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
958 				read_unlock_irqrestore(&sfp->rq_list_lock,
959 						       iflags);
960 				__put_user(srp->header.pack_id, ip);
961 				return 0;
962 			}
963 		}
964 		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
965 		__put_user(-1, ip);
966 		return 0;
967 	case SG_GET_NUM_WAITING:
968 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
969 		for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
970 			if ((1 == srp->done) && (!srp->sg_io_owned))
971 				++val;
972 		}
973 		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
974 		return put_user(val, ip);
975 	case SG_GET_SG_TABLESIZE:
976 		return put_user(sdp->sg_tablesize, ip);
977 	case SG_SET_RESERVED_SIZE:
978 		result = get_user(val, ip);
979 		if (result)
980 			return result;
981                 if (val < 0)
982                         return -EINVAL;
983 		val = min_t(int, val,
984 			    max_sectors_bytes(sdp->device->request_queue));
985 		if (val != sfp->reserve.bufflen) {
986 			if (sg_res_in_use(sfp) || sfp->mmap_called)
987 				return -EBUSY;
988 			sg_remove_scat(sfp, &sfp->reserve);
989 			sg_build_reserve(sfp, val);
990 		}
991 		return 0;
992 	case SG_GET_RESERVED_SIZE:
993 		val = min_t(int, sfp->reserve.bufflen,
994 			    max_sectors_bytes(sdp->device->request_queue));
995 		return put_user(val, ip);
996 	case SG_SET_COMMAND_Q:
997 		result = get_user(val, ip);
998 		if (result)
999 			return result;
1000 		sfp->cmd_q = val ? 1 : 0;
1001 		return 0;
1002 	case SG_GET_COMMAND_Q:
1003 		return put_user((int) sfp->cmd_q, ip);
1004 	case SG_SET_KEEP_ORPHAN:
1005 		result = get_user(val, ip);
1006 		if (result)
1007 			return result;
1008 		sfp->keep_orphan = val;
1009 		return 0;
1010 	case SG_GET_KEEP_ORPHAN:
1011 		return put_user((int) sfp->keep_orphan, ip);
1012 	case SG_NEXT_CMD_LEN:
1013 		result = get_user(val, ip);
1014 		if (result)
1015 			return result;
1016 		sfp->next_cmd_len = (val > 0) ? val : 0;
1017 		return 0;
1018 	case SG_GET_VERSION_NUM:
1019 		return put_user(sg_version_num, ip);
1020 	case SG_GET_ACCESS_COUNT:
1021 		/* faked - we don't have a real access count anymore */
1022 		val = (sdp->device ? 1 : 0);
1023 		return put_user(val, ip);
1024 	case SG_GET_REQUEST_TABLE:
1025 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
1026 			return -EFAULT;
1027 		else {
1028 			sg_req_info_t *rinfo;
1029 			unsigned int ms;
1030 
1031 			rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
1032 								GFP_KERNEL);
1033 			if (!rinfo)
1034 				return -ENOMEM;
1035 			read_lock_irqsave(&sfp->rq_list_lock, iflags);
1036 			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
1037 			     ++val, srp = srp ? srp->nextrp : srp) {
1038 				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
1039 				if (srp) {
1040 					rinfo[val].req_state = srp->done + 1;
1041 					rinfo[val].problem =
1042 					    srp->header.masked_status &
1043 					    srp->header.host_status &
1044 					    srp->header.driver_status;
1045 					if (srp->done)
1046 						rinfo[val].duration =
1047 							srp->header.duration;
1048 					else {
1049 						ms = jiffies_to_msecs(jiffies);
1050 						rinfo[val].duration =
1051 						    (ms > srp->header.duration) ?
1052 						    (ms - srp->header.duration) : 0;
1053 					}
1054 					rinfo[val].orphan = srp->orphan;
1055 					rinfo[val].sg_io_owned =
1056 							srp->sg_io_owned;
1057 					rinfo[val].pack_id =
1058 							srp->header.pack_id;
1059 					rinfo[val].usr_ptr =
1060 							srp->header.usr_ptr;
1061 				}
1062 			}
1063 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1064 			result = __copy_to_user(p, rinfo,
1065 						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1066 			result = result ? -EFAULT : 0;
1067 			kfree(rinfo);
1068 			return result;
1069 		}
1070 	case SG_EMULATED_HOST:
1071 		if (atomic_read(&sdp->detaching))
1072 			return -ENODEV;
1073 		return put_user(sdp->device->host->hostt->emulated, ip);
1074 	case SG_SCSI_RESET:
1075 		if (atomic_read(&sdp->detaching))
1076 			return -ENODEV;
1077 		if (filp->f_flags & O_NONBLOCK) {
1078 			if (scsi_host_in_recovery(sdp->device->host))
1079 				return -EBUSY;
1080 		} else if (!scsi_block_when_processing_errors(sdp->device))
1081 			return -EBUSY;
1082 		result = get_user(val, ip);
1083 		if (result)
1084 			return result;
1085 		if (SG_SCSI_RESET_NOTHING == val)
1086 			return 0;
1087 		switch (val) {
1088 		case SG_SCSI_RESET_DEVICE:
1089 			val = SCSI_TRY_RESET_DEVICE;
1090 			break;
1091 		case SG_SCSI_RESET_TARGET:
1092 			val = SCSI_TRY_RESET_TARGET;
1093 			break;
1094 		case SG_SCSI_RESET_BUS:
1095 			val = SCSI_TRY_RESET_BUS;
1096 			break;
1097 		case SG_SCSI_RESET_HOST:
1098 			val = SCSI_TRY_RESET_HOST;
1099 			break;
1100 		default:
1101 			return -EINVAL;
1102 		}
1103 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1104 			return -EACCES;
1105 		return (scsi_reset_provider(sdp->device, val) ==
1106 			SUCCESS) ? 0 : -EIO;
1107 	case SCSI_IOCTL_SEND_COMMAND:
1108 		if (atomic_read(&sdp->detaching))
1109 			return -ENODEV;
1110 		if (read_only) {
1111 			unsigned char opcode = WRITE_6;
1112 			Scsi_Ioctl_Command __user *siocp = p;
1113 
1114 			if (copy_from_user(&opcode, siocp->data, 1))
1115 				return -EFAULT;
1116 			if (sg_allow_access(filp, &opcode))
1117 				return -EPERM;
1118 		}
1119 		return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1120 	case SG_SET_DEBUG:
1121 		result = get_user(val, ip);
1122 		if (result)
1123 			return result;
1124 		sdp->sgdebug = (char) val;
1125 		return 0;
1126 	case SCSI_IOCTL_GET_IDLUN:
1127 	case SCSI_IOCTL_GET_BUS_NUMBER:
1128 	case SCSI_IOCTL_PROBE_HOST:
1129 	case SG_GET_TRANSFORM:
1130 		if (atomic_read(&sdp->detaching))
1131 			return -ENODEV;
1132 		return scsi_ioctl(sdp->device, cmd_in, p);
1133 	case BLKSECTGET:
1134 		return put_user(max_sectors_bytes(sdp->device->request_queue),
1135 				ip);
1136 	case BLKTRACESETUP:
1137 		return blk_trace_setup(sdp->device->request_queue,
1138 				       sdp->disk->disk_name,
1139 				       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1140 				       NULL,
1141 				       (char *)arg);
1142 	case BLKTRACESTART:
1143 		return blk_trace_startstop(sdp->device->request_queue, 1);
1144 	case BLKTRACESTOP:
1145 		return blk_trace_startstop(sdp->device->request_queue, 0);
1146 	case BLKTRACETEARDOWN:
1147 		return blk_trace_remove(sdp->device->request_queue);
1148 	default:
1149 		if (read_only)
1150 			return -EPERM;	/* don't know so take safe approach */
1151 		return scsi_ioctl(sdp->device, cmd_in, p);
1152 	}
1153 }
1154 
1155 #ifdef CONFIG_COMPAT
1156 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1157 {
1158 	Sg_device *sdp;
1159 	Sg_fd *sfp;
1160 	struct scsi_device *sdev;
1161 
1162 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1163 		return -ENXIO;
1164 
1165 	sdev = sdp->device;
1166 	if (sdev->host->hostt->compat_ioctl) {
1167 		int ret;
1168 
1169 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1170 
1171 		return ret;
1172 	}
1173 
1174 	return -ENOIOCTLCMD;
1175 }
1176 #endif
1177 
1178 static unsigned int
1179 sg_poll(struct file *filp, poll_table * wait)
1180 {
1181 	unsigned int res = 0;
1182 	Sg_device *sdp;
1183 	Sg_fd *sfp;
1184 	Sg_request *srp;
1185 	int count = 0;
1186 	unsigned long iflags;
1187 
1188 	sfp = filp->private_data;
1189 	if (!sfp)
1190 		return POLLERR;
1191 	sdp = sfp->parentdp;
1192 	if (!sdp)
1193 		return POLLERR;
1194 	poll_wait(filp, &sfp->read_wait, wait);
1195 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
1196 	for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1197 		/* if any read waiting, flag it */
1198 		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1199 			res = POLLIN | POLLRDNORM;
1200 		++count;
1201 	}
1202 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1203 
1204 	if (atomic_read(&sdp->detaching))
1205 		res |= POLLHUP;
1206 	else if (!sfp->cmd_q) {
1207 		if (0 == count)
1208 			res |= POLLOUT | POLLWRNORM;
1209 	} else if (count < SG_MAX_QUEUE)
1210 		res |= POLLOUT | POLLWRNORM;
1211 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1212 				      "sg_poll: res=0x%x\n", (int) res));
1213 	return res;
1214 }
1215 
1216 static int
1217 sg_fasync(int fd, struct file *filp, int mode)
1218 {
1219 	Sg_device *sdp;
1220 	Sg_fd *sfp;
1221 
1222 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1223 		return -ENXIO;
1224 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1225 				      "sg_fasync: mode=%d\n", mode));
1226 
1227 	return fasync_helper(fd, filp, mode, &sfp->async_qp);
1228 }
1229 
1230 static int
1231 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1232 {
1233 	Sg_fd *sfp;
1234 	unsigned long offset, len, sa;
1235 	Sg_scatter_hold *rsv_schp;
1236 	int k, length;
1237 
1238 	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1239 		return VM_FAULT_SIGBUS;
1240 	rsv_schp = &sfp->reserve;
1241 	offset = vmf->pgoff << PAGE_SHIFT;
1242 	if (offset >= rsv_schp->bufflen)
1243 		return VM_FAULT_SIGBUS;
1244 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
1245 				      "sg_vma_fault: offset=%lu, scatg=%d\n",
1246 				      offset, rsv_schp->k_use_sg));
1247 	sa = vma->vm_start;
1248 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1249 	for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1250 		len = vma->vm_end - sa;
1251 		len = (len < length) ? len : length;
1252 		if (offset < len) {
1253 			struct page *page = nth_page(rsv_schp->pages[k],
1254 						     offset >> PAGE_SHIFT);
1255 			get_page(page);	/* increment page count */
1256 			vmf->page = page;
1257 			return 0; /* success */
1258 		}
1259 		sa += len;
1260 		offset -= len;
1261 	}
1262 
1263 	return VM_FAULT_SIGBUS;
1264 }
1265 
1266 static const struct vm_operations_struct sg_mmap_vm_ops = {
1267 	.fault = sg_vma_fault,
1268 };
1269 
1270 static int
1271 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1272 {
1273 	Sg_fd *sfp;
1274 	unsigned long req_sz, len, sa;
1275 	Sg_scatter_hold *rsv_schp;
1276 	int k, length;
1277 
1278 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1279 		return -ENXIO;
1280 	req_sz = vma->vm_end - vma->vm_start;
1281 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
1282 				      "sg_mmap starting, vm_start=%p, len=%d\n",
1283 				      (void *) vma->vm_start, (int) req_sz));
1284 	if (vma->vm_pgoff)
1285 		return -EINVAL;	/* want no offset */
1286 	rsv_schp = &sfp->reserve;
1287 	if (req_sz > rsv_schp->bufflen)
1288 		return -ENOMEM;	/* cannot map more than reserved buffer */
1289 
1290 	sa = vma->vm_start;
1291 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1292 	for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1293 		len = vma->vm_end - sa;
1294 		len = (len < length) ? len : length;
1295 		sa += len;
1296 	}
1297 
1298 	sfp->mmap_called = 1;
1299 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1300 	vma->vm_private_data = sfp;
1301 	vma->vm_ops = &sg_mmap_vm_ops;
1302 	return 0;
1303 }
1304 
1305 static void
1306 sg_rq_end_io_usercontext(struct work_struct *work)
1307 {
1308 	struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1309 	struct sg_fd *sfp = srp->parentfp;
1310 
1311 	sg_finish_rem_req(srp);
1312 	kref_put(&sfp->f_ref, sg_remove_sfp);
1313 }
1314 
1315 /*
1316  * This function is a "bottom half" handler that is called by the mid
1317  * level when a command is completed (or has failed).
1318  */
1319 static void
1320 sg_rq_end_io(struct request *rq, int uptodate)
1321 {
1322 	struct sg_request *srp = rq->end_io_data;
1323 	Sg_device *sdp;
1324 	Sg_fd *sfp;
1325 	unsigned long iflags;
1326 	unsigned int ms;
1327 	char *sense;
1328 	int result, resid, done = 1;
1329 
1330 	if (WARN_ON(srp->done != 0))
1331 		return;
1332 
1333 	sfp = srp->parentfp;
1334 	if (WARN_ON(sfp == NULL))
1335 		return;
1336 
1337 	sdp = sfp->parentdp;
1338 	if (unlikely(atomic_read(&sdp->detaching)))
1339 		pr_info("%s: device detaching\n", __func__);
1340 
1341 	sense = rq->sense;
1342 	result = rq->errors;
1343 	resid = rq->resid_len;
1344 
1345 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
1346 				      "sg_cmd_done: pack_id=%d, res=0x%x\n",
1347 				      srp->header.pack_id, result));
1348 	srp->header.resid = resid;
1349 	ms = jiffies_to_msecs(jiffies);
1350 	srp->header.duration = (ms > srp->header.duration) ?
1351 				(ms - srp->header.duration) : 0;
1352 	if (0 != result) {
1353 		struct scsi_sense_hdr sshdr;
1354 
1355 		srp->header.status = 0xff & result;
1356 		srp->header.masked_status = status_byte(result);
1357 		srp->header.msg_status = msg_byte(result);
1358 		srp->header.host_status = host_byte(result);
1359 		srp->header.driver_status = driver_byte(result);
1360 		if ((sdp->sgdebug > 0) &&
1361 		    ((CHECK_CONDITION == srp->header.masked_status) ||
1362 		     (COMMAND_TERMINATED == srp->header.masked_status)))
1363 			__scsi_print_sense(__func__, sense,
1364 					   SCSI_SENSE_BUFFERSIZE);
1365 
1366 		/* Following if statement is a patch supplied by Eric Youngdale */
1367 		if (driver_byte(result) != 0
1368 		    && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1369 		    && !scsi_sense_is_deferred(&sshdr)
1370 		    && sshdr.sense_key == UNIT_ATTENTION
1371 		    && sdp->device->removable) {
1372 			/* Detected possible disc change. Set the bit - this */
1373 			/* may be used if there are filesystems using this device */
1374 			sdp->device->changed = 1;
1375 		}
1376 	}
1377 	/* Rely on write phase to clean out srp status values, so no "else" */
1378 
1379 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
1380 	if (unlikely(srp->orphan)) {
1381 		if (sfp->keep_orphan)
1382 			srp->sg_io_owned = 0;
1383 		else
1384 			done = 0;
1385 	}
1386 	srp->done = done;
1387 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1388 
1389 	if (likely(done)) {
1390 		/* Now wake up any sg_read() that is waiting for this
1391 		 * packet.
1392 		 */
1393 		wake_up_interruptible(&sfp->read_wait);
1394 		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1395 		kref_put(&sfp->f_ref, sg_remove_sfp);
1396 	} else {
1397 		INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1398 		schedule_work(&srp->ew.work);
1399 	}
1400 }
1401 
1402 static const struct file_operations sg_fops = {
1403 	.owner = THIS_MODULE,
1404 	.read = sg_read,
1405 	.write = sg_write,
1406 	.poll = sg_poll,
1407 	.unlocked_ioctl = sg_ioctl,
1408 #ifdef CONFIG_COMPAT
1409 	.compat_ioctl = sg_compat_ioctl,
1410 #endif
1411 	.open = sg_open,
1412 	.mmap = sg_mmap,
1413 	.release = sg_release,
1414 	.fasync = sg_fasync,
1415 	.llseek = no_llseek,
1416 };
1417 
1418 static struct class *sg_sysfs_class;
1419 
1420 static int sg_sysfs_valid = 0;
1421 
1422 static Sg_device *
1423 sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1424 {
1425 	struct request_queue *q = scsidp->request_queue;
1426 	Sg_device *sdp;
1427 	unsigned long iflags;
1428 	int error;
1429 	u32 k;
1430 
1431 	sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1432 	if (!sdp) {
1433 		sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
1434 			    "failure\n", __func__);
1435 		return ERR_PTR(-ENOMEM);
1436 	}
1437 
1438 	idr_preload(GFP_KERNEL);
1439 	write_lock_irqsave(&sg_index_lock, iflags);
1440 
1441 	error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
1442 	if (error < 0) {
1443 		if (error == -ENOSPC) {
1444 			sdev_printk(KERN_WARNING, scsidp,
1445 				    "Unable to attach sg device type=%d, minor number exceeds %d\n",
1446 				    scsidp->type, SG_MAX_DEVS - 1);
1447 			error = -ENODEV;
1448 		} else {
1449 			sdev_printk(KERN_WARNING, scsidp, "%s: idr "
1450 				    "allocation Sg_device failure: %d\n",
1451 				    __func__, error);
1452 		}
1453 		goto out_unlock;
1454 	}
1455 	k = error;
1456 
1457 	SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
1458 					"sg_alloc: dev=%d \n", k));
1459 	sprintf(disk->disk_name, "sg%d", k);
1460 	disk->first_minor = k;
1461 	sdp->disk = disk;
1462 	sdp->device = scsidp;
1463 	mutex_init(&sdp->open_rel_lock);
1464 	INIT_LIST_HEAD(&sdp->sfds);
1465 	init_waitqueue_head(&sdp->open_wait);
1466 	atomic_set(&sdp->detaching, 0);
1467 	rwlock_init(&sdp->sfd_lock);
1468 	sdp->sg_tablesize = queue_max_segments(q);
1469 	sdp->index = k;
1470 	kref_init(&sdp->d_ref);
1471 	error = 0;
1472 
1473 out_unlock:
1474 	write_unlock_irqrestore(&sg_index_lock, iflags);
1475 	idr_preload_end();
1476 
1477 	if (error) {
1478 		kfree(sdp);
1479 		return ERR_PTR(error);
1480 	}
1481 	return sdp;
1482 }
1483 
1484 static int
1485 sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
1486 {
1487 	struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1488 	struct gendisk *disk;
1489 	Sg_device *sdp = NULL;
1490 	struct cdev * cdev = NULL;
1491 	int error;
1492 	unsigned long iflags;
1493 
1494 	disk = alloc_disk(1);
1495 	if (!disk) {
1496 		pr_warn("%s: alloc_disk failed\n", __func__);
1497 		return -ENOMEM;
1498 	}
1499 	disk->major = SCSI_GENERIC_MAJOR;
1500 
1501 	error = -ENOMEM;
1502 	cdev = cdev_alloc();
1503 	if (!cdev) {
1504 		pr_warn("%s: cdev_alloc failed\n", __func__);
1505 		goto out;
1506 	}
1507 	cdev->owner = THIS_MODULE;
1508 	cdev->ops = &sg_fops;
1509 
1510 	sdp = sg_alloc(disk, scsidp);
1511 	if (IS_ERR(sdp)) {
1512 		pr_warn("%s: sg_alloc failed\n", __func__);
1513 		error = PTR_ERR(sdp);
1514 		goto out;
1515 	}
1516 
1517 	error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1518 	if (error)
1519 		goto cdev_add_err;
1520 
1521 	sdp->cdev = cdev;
1522 	if (sg_sysfs_valid) {
1523 		struct device *sg_class_member;
1524 
1525 		sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1526 						MKDEV(SCSI_GENERIC_MAJOR,
1527 						      sdp->index),
1528 						sdp, "%s", disk->disk_name);
1529 		if (IS_ERR(sg_class_member)) {
1530 			pr_err("%s: device_create failed\n", __func__);
1531 			error = PTR_ERR(sg_class_member);
1532 			goto cdev_add_err;
1533 		}
1534 		error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1535 					  &sg_class_member->kobj, "generic");
1536 		if (error)
1537 			pr_err("%s: unable to make symlink 'generic' back "
1538 			       "to sg%d\n", __func__, sdp->index);
1539 	} else
1540 		pr_warn("%s: sg_sys Invalid\n", __func__);
1541 
1542 	sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
1543 		    "type %d\n", sdp->index, scsidp->type);
1544 
1545 	dev_set_drvdata(cl_dev, sdp);
1546 
1547 	return 0;
1548 
1549 cdev_add_err:
1550 	write_lock_irqsave(&sg_index_lock, iflags);
1551 	idr_remove(&sg_index_idr, sdp->index);
1552 	write_unlock_irqrestore(&sg_index_lock, iflags);
1553 	kfree(sdp);
1554 
1555 out:
1556 	put_disk(disk);
1557 	if (cdev)
1558 		cdev_del(cdev);
1559 	return error;
1560 }
1561 
1562 static void
1563 sg_device_destroy(struct kref *kref)
1564 {
1565 	struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1566 	unsigned long flags;
1567 
1568 	/* CAUTION!  Note that the device can still be found via idr_find()
1569 	 * even though the refcount is 0.  Therefore, do idr_remove() BEFORE
1570 	 * any other cleanup.
1571 	 */
1572 
1573 	write_lock_irqsave(&sg_index_lock, flags);
1574 	idr_remove(&sg_index_idr, sdp->index);
1575 	write_unlock_irqrestore(&sg_index_lock, flags);
1576 
1577 	SCSI_LOG_TIMEOUT(3,
1578 		sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
1579 
1580 	put_disk(sdp->disk);
1581 	kfree(sdp);
1582 }
1583 
1584 static void
1585 sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
1586 {
1587 	struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1588 	Sg_device *sdp = dev_get_drvdata(cl_dev);
1589 	unsigned long iflags;
1590 	Sg_fd *sfp;
1591 	int val;
1592 
1593 	if (!sdp)
1594 		return;
1595 	/* want sdp->detaching non-zero as soon as possible */
1596 	val = atomic_inc_return(&sdp->detaching);
1597 	if (val > 1)
1598 		return; /* only want to do following once per device */
1599 
1600 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1601 				      "%s\n", __func__));
1602 
1603 	read_lock_irqsave(&sdp->sfd_lock, iflags);
1604 	list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1605 		wake_up_interruptible_all(&sfp->read_wait);
1606 		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1607 	}
1608 	wake_up_interruptible_all(&sdp->open_wait);
1609 	read_unlock_irqrestore(&sdp->sfd_lock, iflags);
1610 
1611 	sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1612 	device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1613 	cdev_del(sdp->cdev);
1614 	sdp->cdev = NULL;
1615 
1616 	kref_put(&sdp->d_ref, sg_device_destroy);
1617 }
1618 
1619 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1620 module_param_named(def_reserved_size, def_reserved_size, int,
1621 		   S_IRUGO | S_IWUSR);
1622 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1623 
1624 MODULE_AUTHOR("Douglas Gilbert");
1625 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1626 MODULE_LICENSE("GPL");
1627 MODULE_VERSION(SG_VERSION_STR);
1628 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1629 
1630 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1631                 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1632 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1633 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1634 
1635 static int __init
1636 init_sg(void)
1637 {
1638 	int rc;
1639 
1640 	if (scatter_elem_sz < PAGE_SIZE) {
1641 		scatter_elem_sz = PAGE_SIZE;
1642 		scatter_elem_sz_prev = scatter_elem_sz;
1643 	}
1644 	if (def_reserved_size >= 0)
1645 		sg_big_buff = def_reserved_size;
1646 	else
1647 		def_reserved_size = sg_big_buff;
1648 
1649 	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1650 				    SG_MAX_DEVS, "sg");
1651 	if (rc)
1652 		return rc;
1653         sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1654         if ( IS_ERR(sg_sysfs_class) ) {
1655 		rc = PTR_ERR(sg_sysfs_class);
1656 		goto err_out;
1657         }
1658 	sg_sysfs_valid = 1;
1659 	rc = scsi_register_interface(&sg_interface);
1660 	if (0 == rc) {
1661 #ifdef CONFIG_SCSI_PROC_FS
1662 		sg_proc_init();
1663 #endif				/* CONFIG_SCSI_PROC_FS */
1664 		return 0;
1665 	}
1666 	class_destroy(sg_sysfs_class);
1667 err_out:
1668 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1669 	return rc;
1670 }
1671 
1672 static void __exit
1673 exit_sg(void)
1674 {
1675 #ifdef CONFIG_SCSI_PROC_FS
1676 	sg_proc_cleanup();
1677 #endif				/* CONFIG_SCSI_PROC_FS */
1678 	scsi_unregister_interface(&sg_interface);
1679 	class_destroy(sg_sysfs_class);
1680 	sg_sysfs_valid = 0;
1681 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1682 				 SG_MAX_DEVS);
1683 	idr_destroy(&sg_index_idr);
1684 }
1685 
1686 static int
1687 sg_start_req(Sg_request *srp, unsigned char *cmd)
1688 {
1689 	int res;
1690 	struct request *rq;
1691 	Sg_fd *sfp = srp->parentfp;
1692 	sg_io_hdr_t *hp = &srp->header;
1693 	int dxfer_len = (int) hp->dxfer_len;
1694 	int dxfer_dir = hp->dxfer_direction;
1695 	unsigned int iov_count = hp->iovec_count;
1696 	Sg_scatter_hold *req_schp = &srp->data;
1697 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
1698 	struct request_queue *q = sfp->parentdp->device->request_queue;
1699 	struct rq_map_data *md, map_data;
1700 	int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1701 	unsigned char *long_cmdp = NULL;
1702 
1703 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1704 				      "sg_start_req: dxfer_len=%d\n",
1705 				      dxfer_len));
1706 
1707 	if (hp->cmd_len > BLK_MAX_CDB) {
1708 		long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
1709 		if (!long_cmdp)
1710 			return -ENOMEM;
1711 	}
1712 
1713 	rq = blk_get_request(q, rw, GFP_ATOMIC);
1714 	if (IS_ERR(rq)) {
1715 		kfree(long_cmdp);
1716 		return PTR_ERR(rq);
1717 	}
1718 
1719 	blk_rq_set_block_pc(rq);
1720 
1721 	if (hp->cmd_len > BLK_MAX_CDB)
1722 		rq->cmd = long_cmdp;
1723 	memcpy(rq->cmd, cmd, hp->cmd_len);
1724 	rq->cmd_len = hp->cmd_len;
1725 
1726 	srp->rq = rq;
1727 	rq->end_io_data = srp;
1728 	rq->sense = srp->sense_b;
1729 	rq->retries = SG_DEFAULT_RETRIES;
1730 
1731 	if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1732 		return 0;
1733 
1734 	if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1735 	    dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1736 	    !sfp->parentdp->device->host->unchecked_isa_dma &&
1737 	    blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
1738 		md = NULL;
1739 	else
1740 		md = &map_data;
1741 
1742 	if (md) {
1743 		if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1744 			sg_link_reserve(sfp, srp, dxfer_len);
1745 		else {
1746 			res = sg_build_indirect(req_schp, sfp, dxfer_len);
1747 			if (res)
1748 				return res;
1749 		}
1750 
1751 		md->pages = req_schp->pages;
1752 		md->page_order = req_schp->page_order;
1753 		md->nr_entries = req_schp->k_use_sg;
1754 		md->offset = 0;
1755 		md->null_mapped = hp->dxferp ? 0 : 1;
1756 		if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1757 			md->from_user = 1;
1758 		else
1759 			md->from_user = 0;
1760 	}
1761 
1762 	if (iov_count) {
1763 		int len, size = sizeof(struct sg_iovec) * iov_count;
1764 		struct iovec *iov;
1765 
1766 		iov = memdup_user(hp->dxferp, size);
1767 		if (IS_ERR(iov))
1768 			return PTR_ERR(iov);
1769 
1770 		len = iov_length(iov, iov_count);
1771 		if (hp->dxfer_len < len) {
1772 			iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
1773 			len = hp->dxfer_len;
1774 		}
1775 
1776 		res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
1777 					  iov_count,
1778 					  len, GFP_ATOMIC);
1779 		kfree(iov);
1780 	} else
1781 		res = blk_rq_map_user(q, rq, md, hp->dxferp,
1782 				      hp->dxfer_len, GFP_ATOMIC);
1783 
1784 	if (!res) {
1785 		srp->bio = rq->bio;
1786 
1787 		if (!md) {
1788 			req_schp->dio_in_use = 1;
1789 			hp->info |= SG_INFO_DIRECT_IO;
1790 		}
1791 	}
1792 	return res;
1793 }
1794 
1795 static int
1796 sg_finish_rem_req(Sg_request *srp)
1797 {
1798 	int ret = 0;
1799 
1800 	Sg_fd *sfp = srp->parentfp;
1801 	Sg_scatter_hold *req_schp = &srp->data;
1802 
1803 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1804 				      "sg_finish_rem_req: res_used=%d\n",
1805 				      (int) srp->res_used));
1806 	if (srp->rq) {
1807 		if (srp->bio)
1808 			ret = blk_rq_unmap_user(srp->bio);
1809 
1810 		if (srp->rq->cmd != srp->rq->__cmd)
1811 			kfree(srp->rq->cmd);
1812 		blk_put_request(srp->rq);
1813 	}
1814 
1815 	if (srp->res_used)
1816 		sg_unlink_reserve(sfp, srp);
1817 	else
1818 		sg_remove_scat(sfp, req_schp);
1819 
1820 	sg_remove_request(sfp, srp);
1821 
1822 	return ret;
1823 }
1824 
1825 static int
1826 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1827 {
1828 	int sg_bufflen = tablesize * sizeof(struct page *);
1829 	gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1830 
1831 	schp->pages = kzalloc(sg_bufflen, gfp_flags);
1832 	if (!schp->pages)
1833 		return -ENOMEM;
1834 	schp->sglist_len = sg_bufflen;
1835 	return tablesize;	/* number of scat_gath elements allocated */
1836 }
1837 
1838 static int
1839 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1840 {
1841 	int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1842 	int sg_tablesize = sfp->parentdp->sg_tablesize;
1843 	int blk_size = buff_size, order;
1844 	gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1845 
1846 	if (blk_size < 0)
1847 		return -EFAULT;
1848 	if (0 == blk_size)
1849 		++blk_size;	/* don't know why */
1850 	/* round request up to next highest SG_SECTOR_SZ byte boundary */
1851 	blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1852 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1853 		"sg_build_indirect: buff_size=%d, blk_size=%d\n",
1854 		buff_size, blk_size));
1855 
1856 	/* N.B. ret_sz carried into this block ... */
1857 	mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1858 	if (mx_sc_elems < 0)
1859 		return mx_sc_elems;	/* most likely -ENOMEM */
1860 
1861 	num = scatter_elem_sz;
1862 	if (unlikely(num != scatter_elem_sz_prev)) {
1863 		if (num < PAGE_SIZE) {
1864 			scatter_elem_sz = PAGE_SIZE;
1865 			scatter_elem_sz_prev = PAGE_SIZE;
1866 		} else
1867 			scatter_elem_sz_prev = num;
1868 	}
1869 
1870 	if (sfp->low_dma)
1871 		gfp_mask |= GFP_DMA;
1872 
1873 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1874 		gfp_mask |= __GFP_ZERO;
1875 
1876 	order = get_order(num);
1877 retry:
1878 	ret_sz = 1 << (PAGE_SHIFT + order);
1879 
1880 	for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1881 	     k++, rem_sz -= ret_sz) {
1882 
1883 		num = (rem_sz > scatter_elem_sz_prev) ?
1884 			scatter_elem_sz_prev : rem_sz;
1885 
1886 		schp->pages[k] = alloc_pages(gfp_mask, order);
1887 		if (!schp->pages[k])
1888 			goto out;
1889 
1890 		if (num == scatter_elem_sz_prev) {
1891 			if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1892 				scatter_elem_sz = ret_sz;
1893 				scatter_elem_sz_prev = ret_sz;
1894 			}
1895 		}
1896 
1897 		SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
1898 				 "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
1899 				 k, num, ret_sz));
1900 	}		/* end of for loop */
1901 
1902 	schp->page_order = order;
1903 	schp->k_use_sg = k;
1904 	SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
1905 			 "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
1906 			 k, rem_sz));
1907 
1908 	schp->bufflen = blk_size;
1909 	if (rem_sz > 0)	/* must have failed */
1910 		return -ENOMEM;
1911 	return 0;
1912 out:
1913 	for (i = 0; i < k; i++)
1914 		__free_pages(schp->pages[i], order);
1915 
1916 	if (--order >= 0)
1917 		goto retry;
1918 
1919 	return -ENOMEM;
1920 }
1921 
1922 static void
1923 sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
1924 {
1925 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1926 			 "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1927 	if (schp->pages && schp->sglist_len > 0) {
1928 		if (!schp->dio_in_use) {
1929 			int k;
1930 
1931 			for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1932 				SCSI_LOG_TIMEOUT(5,
1933 					sg_printk(KERN_INFO, sfp->parentdp,
1934 					"sg_remove_scat: k=%d, pg=0x%p\n",
1935 					k, schp->pages[k]));
1936 				__free_pages(schp->pages[k], schp->page_order);
1937 			}
1938 
1939 			kfree(schp->pages);
1940 		}
1941 	}
1942 	memset(schp, 0, sizeof (*schp));
1943 }
1944 
1945 static int
1946 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1947 {
1948 	Sg_scatter_hold *schp = &srp->data;
1949 	int k, num;
1950 
1951 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
1952 			 "sg_read_oxfer: num_read_xfer=%d\n",
1953 			 num_read_xfer));
1954 	if ((!outp) || (num_read_xfer <= 0))
1955 		return 0;
1956 
1957 	num = 1 << (PAGE_SHIFT + schp->page_order);
1958 	for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1959 		if (num > num_read_xfer) {
1960 			if (__copy_to_user(outp, page_address(schp->pages[k]),
1961 					   num_read_xfer))
1962 				return -EFAULT;
1963 			break;
1964 		} else {
1965 			if (__copy_to_user(outp, page_address(schp->pages[k]),
1966 					   num))
1967 				return -EFAULT;
1968 			num_read_xfer -= num;
1969 			if (num_read_xfer <= 0)
1970 				break;
1971 			outp += num;
1972 		}
1973 	}
1974 
1975 	return 0;
1976 }
1977 
1978 static void
1979 sg_build_reserve(Sg_fd * sfp, int req_size)
1980 {
1981 	Sg_scatter_hold *schp = &sfp->reserve;
1982 
1983 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1984 			 "sg_build_reserve: req_size=%d\n", req_size));
1985 	do {
1986 		if (req_size < PAGE_SIZE)
1987 			req_size = PAGE_SIZE;
1988 		if (0 == sg_build_indirect(schp, sfp, req_size))
1989 			return;
1990 		else
1991 			sg_remove_scat(sfp, schp);
1992 		req_size >>= 1;	/* divide by 2 */
1993 	} while (req_size > (PAGE_SIZE / 2));
1994 }
1995 
1996 static void
1997 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
1998 {
1999 	Sg_scatter_hold *req_schp = &srp->data;
2000 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2001 	int k, num, rem;
2002 
2003 	srp->res_used = 1;
2004 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
2005 			 "sg_link_reserve: size=%d\n", size));
2006 	rem = size;
2007 
2008 	num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2009 	for (k = 0; k < rsv_schp->k_use_sg; k++) {
2010 		if (rem <= num) {
2011 			req_schp->k_use_sg = k + 1;
2012 			req_schp->sglist_len = rsv_schp->sglist_len;
2013 			req_schp->pages = rsv_schp->pages;
2014 
2015 			req_schp->bufflen = size;
2016 			req_schp->page_order = rsv_schp->page_order;
2017 			break;
2018 		} else
2019 			rem -= num;
2020 	}
2021 
2022 	if (k >= rsv_schp->k_use_sg)
2023 		SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
2024 				 "sg_link_reserve: BAD size\n"));
2025 }
2026 
2027 static void
2028 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2029 {
2030 	Sg_scatter_hold *req_schp = &srp->data;
2031 
2032 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
2033 				      "sg_unlink_reserve: req->k_use_sg=%d\n",
2034 				      (int) req_schp->k_use_sg));
2035 	req_schp->k_use_sg = 0;
2036 	req_schp->bufflen = 0;
2037 	req_schp->pages = NULL;
2038 	req_schp->page_order = 0;
2039 	req_schp->sglist_len = 0;
2040 	sfp->save_scat_len = 0;
2041 	srp->res_used = 0;
2042 }
2043 
2044 static Sg_request *
2045 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2046 {
2047 	Sg_request *resp;
2048 	unsigned long iflags;
2049 
2050 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2051 	for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2052 		/* look for requests that are ready + not SG_IO owned */
2053 		if ((1 == resp->done) && (!resp->sg_io_owned) &&
2054 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2055 			resp->done = 2;	/* guard against other readers */
2056 			break;
2057 		}
2058 	}
2059 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2060 	return resp;
2061 }
2062 
2063 /* always adds to end of list */
2064 static Sg_request *
2065 sg_add_request(Sg_fd * sfp)
2066 {
2067 	int k;
2068 	unsigned long iflags;
2069 	Sg_request *resp;
2070 	Sg_request *rp = sfp->req_arr;
2071 
2072 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2073 	resp = sfp->headrp;
2074 	if (!resp) {
2075 		memset(rp, 0, sizeof (Sg_request));
2076 		rp->parentfp = sfp;
2077 		resp = rp;
2078 		sfp->headrp = resp;
2079 	} else {
2080 		if (0 == sfp->cmd_q)
2081 			resp = NULL;	/* command queuing disallowed */
2082 		else {
2083 			for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2084 				if (!rp->parentfp)
2085 					break;
2086 			}
2087 			if (k < SG_MAX_QUEUE) {
2088 				memset(rp, 0, sizeof (Sg_request));
2089 				rp->parentfp = sfp;
2090 				while (resp->nextrp)
2091 					resp = resp->nextrp;
2092 				resp->nextrp = rp;
2093 				resp = rp;
2094 			} else
2095 				resp = NULL;
2096 		}
2097 	}
2098 	if (resp) {
2099 		resp->nextrp = NULL;
2100 		resp->header.duration = jiffies_to_msecs(jiffies);
2101 	}
2102 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2103 	return resp;
2104 }
2105 
2106 /* Return of 1 for found; 0 for not found */
2107 static int
2108 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2109 {
2110 	Sg_request *prev_rp;
2111 	Sg_request *rp;
2112 	unsigned long iflags;
2113 	int res = 0;
2114 
2115 	if ((!sfp) || (!srp) || (!sfp->headrp))
2116 		return res;
2117 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2118 	prev_rp = sfp->headrp;
2119 	if (srp == prev_rp) {
2120 		sfp->headrp = prev_rp->nextrp;
2121 		prev_rp->parentfp = NULL;
2122 		res = 1;
2123 	} else {
2124 		while ((rp = prev_rp->nextrp)) {
2125 			if (srp == rp) {
2126 				prev_rp->nextrp = rp->nextrp;
2127 				rp->parentfp = NULL;
2128 				res = 1;
2129 				break;
2130 			}
2131 			prev_rp = rp;
2132 		}
2133 	}
2134 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2135 	return res;
2136 }
2137 
2138 static Sg_fd *
2139 sg_add_sfp(Sg_device * sdp)
2140 {
2141 	Sg_fd *sfp;
2142 	unsigned long iflags;
2143 	int bufflen;
2144 
2145 	sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2146 	if (!sfp)
2147 		return ERR_PTR(-ENOMEM);
2148 
2149 	init_waitqueue_head(&sfp->read_wait);
2150 	rwlock_init(&sfp->rq_list_lock);
2151 
2152 	kref_init(&sfp->f_ref);
2153 	sfp->timeout = SG_DEFAULT_TIMEOUT;
2154 	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2155 	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2156 	sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2157 	    sdp->device->host->unchecked_isa_dma : 1;
2158 	sfp->cmd_q = SG_DEF_COMMAND_Q;
2159 	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2160 	sfp->parentdp = sdp;
2161 	write_lock_irqsave(&sdp->sfd_lock, iflags);
2162 	if (atomic_read(&sdp->detaching)) {
2163 		write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2164 		return ERR_PTR(-ENODEV);
2165 	}
2166 	list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2167 	write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2168 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
2169 				      "sg_add_sfp: sfp=0x%p\n", sfp));
2170 	if (unlikely(sg_big_buff != def_reserved_size))
2171 		sg_big_buff = def_reserved_size;
2172 
2173 	bufflen = min_t(int, sg_big_buff,
2174 			max_sectors_bytes(sdp->device->request_queue));
2175 	sg_build_reserve(sfp, bufflen);
2176 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
2177 				      "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2178 				      sfp->reserve.bufflen,
2179 				      sfp->reserve.k_use_sg));
2180 
2181 	kref_get(&sdp->d_ref);
2182 	__module_get(THIS_MODULE);
2183 	return sfp;
2184 }
2185 
2186 static void
2187 sg_remove_sfp_usercontext(struct work_struct *work)
2188 {
2189 	struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2190 	struct sg_device *sdp = sfp->parentdp;
2191 
2192 	/* Cleanup any responses which were never read(). */
2193 	while (sfp->headrp)
2194 		sg_finish_rem_req(sfp->headrp);
2195 
2196 	if (sfp->reserve.bufflen > 0) {
2197 		SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2198 				"sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
2199 				(int) sfp->reserve.bufflen,
2200 				(int) sfp->reserve.k_use_sg));
2201 		sg_remove_scat(sfp, &sfp->reserve);
2202 	}
2203 
2204 	SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2205 			"sg_remove_sfp: sfp=0x%p\n", sfp));
2206 	kfree(sfp);
2207 
2208 	scsi_device_put(sdp->device);
2209 	kref_put(&sdp->d_ref, sg_device_destroy);
2210 	module_put(THIS_MODULE);
2211 }
2212 
2213 static void
2214 sg_remove_sfp(struct kref *kref)
2215 {
2216 	struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2217 	struct sg_device *sdp = sfp->parentdp;
2218 	unsigned long iflags;
2219 
2220 	write_lock_irqsave(&sdp->sfd_lock, iflags);
2221 	list_del(&sfp->sfd_siblings);
2222 	write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2223 
2224 	INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2225 	schedule_work(&sfp->ew.work);
2226 }
2227 
2228 static int
2229 sg_res_in_use(Sg_fd * sfp)
2230 {
2231 	const Sg_request *srp;
2232 	unsigned long iflags;
2233 
2234 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2235 	for (srp = sfp->headrp; srp; srp = srp->nextrp)
2236 		if (srp->res_used)
2237 			break;
2238 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2239 	return srp ? 1 : 0;
2240 }
2241 
2242 #ifdef CONFIG_SCSI_PROC_FS
2243 static int
2244 sg_idr_max_id(int id, void *p, void *data)
2245 {
2246 	int *k = data;
2247 
2248 	if (*k < id)
2249 		*k = id;
2250 
2251 	return 0;
2252 }
2253 
2254 static int
2255 sg_last_dev(void)
2256 {
2257 	int k = -1;
2258 	unsigned long iflags;
2259 
2260 	read_lock_irqsave(&sg_index_lock, iflags);
2261 	idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2262 	read_unlock_irqrestore(&sg_index_lock, iflags);
2263 	return k + 1;		/* origin 1 */
2264 }
2265 #endif
2266 
2267 /* must be called with sg_index_lock held */
2268 static Sg_device *sg_lookup_dev(int dev)
2269 {
2270 	return idr_find(&sg_index_idr, dev);
2271 }
2272 
2273 static Sg_device *
2274 sg_get_dev(int dev)
2275 {
2276 	struct sg_device *sdp;
2277 	unsigned long flags;
2278 
2279 	read_lock_irqsave(&sg_index_lock, flags);
2280 	sdp = sg_lookup_dev(dev);
2281 	if (!sdp)
2282 		sdp = ERR_PTR(-ENXIO);
2283 	else if (atomic_read(&sdp->detaching)) {
2284 		/* If sdp->detaching, then the refcount may already be 0, in
2285 		 * which case it would be a bug to do kref_get().
2286 		 */
2287 		sdp = ERR_PTR(-ENODEV);
2288 	} else
2289 		kref_get(&sdp->d_ref);
2290 	read_unlock_irqrestore(&sg_index_lock, flags);
2291 
2292 	return sdp;
2293 }
2294 
2295 #ifdef CONFIG_SCSI_PROC_FS
2296 
2297 static struct proc_dir_entry *sg_proc_sgp = NULL;
2298 
2299 static char sg_proc_sg_dirname[] = "scsi/sg";
2300 
2301 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2302 
2303 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2304 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2305 			          size_t count, loff_t *off);
2306 static const struct file_operations adio_fops = {
2307 	.owner = THIS_MODULE,
2308 	.open = sg_proc_single_open_adio,
2309 	.read = seq_read,
2310 	.llseek = seq_lseek,
2311 	.write = sg_proc_write_adio,
2312 	.release = single_release,
2313 };
2314 
2315 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2316 static ssize_t sg_proc_write_dressz(struct file *filp,
2317 		const char __user *buffer, size_t count, loff_t *off);
2318 static const struct file_operations dressz_fops = {
2319 	.owner = THIS_MODULE,
2320 	.open = sg_proc_single_open_dressz,
2321 	.read = seq_read,
2322 	.llseek = seq_lseek,
2323 	.write = sg_proc_write_dressz,
2324 	.release = single_release,
2325 };
2326 
2327 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2328 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2329 static const struct file_operations version_fops = {
2330 	.owner = THIS_MODULE,
2331 	.open = sg_proc_single_open_version,
2332 	.read = seq_read,
2333 	.llseek = seq_lseek,
2334 	.release = single_release,
2335 };
2336 
2337 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2338 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2339 static const struct file_operations devhdr_fops = {
2340 	.owner = THIS_MODULE,
2341 	.open = sg_proc_single_open_devhdr,
2342 	.read = seq_read,
2343 	.llseek = seq_lseek,
2344 	.release = single_release,
2345 };
2346 
2347 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2348 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2349 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2350 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2351 static void dev_seq_stop(struct seq_file *s, void *v);
2352 static const struct file_operations dev_fops = {
2353 	.owner = THIS_MODULE,
2354 	.open = sg_proc_open_dev,
2355 	.read = seq_read,
2356 	.llseek = seq_lseek,
2357 	.release = seq_release,
2358 };
2359 static const struct seq_operations dev_seq_ops = {
2360 	.start = dev_seq_start,
2361 	.next  = dev_seq_next,
2362 	.stop  = dev_seq_stop,
2363 	.show  = sg_proc_seq_show_dev,
2364 };
2365 
2366 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2367 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2368 static const struct file_operations devstrs_fops = {
2369 	.owner = THIS_MODULE,
2370 	.open = sg_proc_open_devstrs,
2371 	.read = seq_read,
2372 	.llseek = seq_lseek,
2373 	.release = seq_release,
2374 };
2375 static const struct seq_operations devstrs_seq_ops = {
2376 	.start = dev_seq_start,
2377 	.next  = dev_seq_next,
2378 	.stop  = dev_seq_stop,
2379 	.show  = sg_proc_seq_show_devstrs,
2380 };
2381 
2382 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2383 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2384 static const struct file_operations debug_fops = {
2385 	.owner = THIS_MODULE,
2386 	.open = sg_proc_open_debug,
2387 	.read = seq_read,
2388 	.llseek = seq_lseek,
2389 	.release = seq_release,
2390 };
2391 static const struct seq_operations debug_seq_ops = {
2392 	.start = dev_seq_start,
2393 	.next  = dev_seq_next,
2394 	.stop  = dev_seq_stop,
2395 	.show  = sg_proc_seq_show_debug,
2396 };
2397 
2398 
2399 struct sg_proc_leaf {
2400 	const char * name;
2401 	const struct file_operations * fops;
2402 };
2403 
2404 static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
2405 	{"allow_dio", &adio_fops},
2406 	{"debug", &debug_fops},
2407 	{"def_reserved_size", &dressz_fops},
2408 	{"device_hdr", &devhdr_fops},
2409 	{"devices", &dev_fops},
2410 	{"device_strs", &devstrs_fops},
2411 	{"version", &version_fops}
2412 };
2413 
2414 static int
2415 sg_proc_init(void)
2416 {
2417 	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2418 	int k;
2419 
2420 	sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2421 	if (!sg_proc_sgp)
2422 		return 1;
2423 	for (k = 0; k < num_leaves; ++k) {
2424 		const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
2425 		umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2426 		proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2427 	}
2428 	return 0;
2429 }
2430 
2431 static void
2432 sg_proc_cleanup(void)
2433 {
2434 	int k;
2435 	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2436 
2437 	if (!sg_proc_sgp)
2438 		return;
2439 	for (k = 0; k < num_leaves; ++k)
2440 		remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2441 	remove_proc_entry(sg_proc_sg_dirname, NULL);
2442 }
2443 
2444 
2445 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2446 {
2447 	seq_printf(s, "%d\n", *((int *)s->private));
2448 	return 0;
2449 }
2450 
2451 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2452 {
2453 	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2454 }
2455 
2456 static ssize_t
2457 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2458 		   size_t count, loff_t *off)
2459 {
2460 	int err;
2461 	unsigned long num;
2462 
2463 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2464 		return -EACCES;
2465 	err = kstrtoul_from_user(buffer, count, 0, &num);
2466 	if (err)
2467 		return err;
2468 	sg_allow_dio = num ? 1 : 0;
2469 	return count;
2470 }
2471 
2472 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2473 {
2474 	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2475 }
2476 
2477 static ssize_t
2478 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2479 		     size_t count, loff_t *off)
2480 {
2481 	int err;
2482 	unsigned long k = ULONG_MAX;
2483 
2484 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2485 		return -EACCES;
2486 
2487 	err = kstrtoul_from_user(buffer, count, 0, &k);
2488 	if (err)
2489 		return err;
2490 	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
2491 		sg_big_buff = k;
2492 		return count;
2493 	}
2494 	return -ERANGE;
2495 }
2496 
2497 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2498 {
2499 	seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2500 		   sg_version_date);
2501 	return 0;
2502 }
2503 
2504 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2505 {
2506 	return single_open(file, sg_proc_seq_show_version, NULL);
2507 }
2508 
2509 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2510 {
2511 	seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
2512 	return 0;
2513 }
2514 
2515 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2516 {
2517 	return single_open(file, sg_proc_seq_show_devhdr, NULL);
2518 }
2519 
2520 struct sg_proc_deviter {
2521 	loff_t	index;
2522 	size_t	max;
2523 };
2524 
2525 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2526 {
2527 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2528 
2529 	s->private = it;
2530 	if (! it)
2531 		return NULL;
2532 
2533 	it->index = *pos;
2534 	it->max = sg_last_dev();
2535 	if (it->index >= it->max)
2536 		return NULL;
2537 	return it;
2538 }
2539 
2540 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2541 {
2542 	struct sg_proc_deviter * it = s->private;
2543 
2544 	*pos = ++it->index;
2545 	return (it->index < it->max) ? it : NULL;
2546 }
2547 
2548 static void dev_seq_stop(struct seq_file *s, void *v)
2549 {
2550 	kfree(s->private);
2551 }
2552 
2553 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2554 {
2555         return seq_open(file, &dev_seq_ops);
2556 }
2557 
2558 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2559 {
2560 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2561 	Sg_device *sdp;
2562 	struct scsi_device *scsidp;
2563 	unsigned long iflags;
2564 
2565 	read_lock_irqsave(&sg_index_lock, iflags);
2566 	sdp = it ? sg_lookup_dev(it->index) : NULL;
2567 	if ((NULL == sdp) || (NULL == sdp->device) ||
2568 	    (atomic_read(&sdp->detaching)))
2569 		seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2570 	else {
2571 		scsidp = sdp->device;
2572 		seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
2573 			      scsidp->host->host_no, scsidp->channel,
2574 			      scsidp->id, scsidp->lun, (int) scsidp->type,
2575 			      1,
2576 			      (int) scsidp->queue_depth,
2577 			      (int) atomic_read(&scsidp->device_busy),
2578 			      (int) scsi_device_online(scsidp));
2579 	}
2580 	read_unlock_irqrestore(&sg_index_lock, iflags);
2581 	return 0;
2582 }
2583 
2584 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2585 {
2586         return seq_open(file, &devstrs_seq_ops);
2587 }
2588 
2589 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2590 {
2591 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2592 	Sg_device *sdp;
2593 	struct scsi_device *scsidp;
2594 	unsigned long iflags;
2595 
2596 	read_lock_irqsave(&sg_index_lock, iflags);
2597 	sdp = it ? sg_lookup_dev(it->index) : NULL;
2598 	scsidp = sdp ? sdp->device : NULL;
2599 	if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
2600 		seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2601 			   scsidp->vendor, scsidp->model, scsidp->rev);
2602 	else
2603 		seq_puts(s, "<no active device>\n");
2604 	read_unlock_irqrestore(&sg_index_lock, iflags);
2605 	return 0;
2606 }
2607 
2608 /* must be called while holding sg_index_lock */
2609 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2610 {
2611 	int k, m, new_interface, blen, usg;
2612 	Sg_request *srp;
2613 	Sg_fd *fp;
2614 	const sg_io_hdr_t *hp;
2615 	const char * cp;
2616 	unsigned int ms;
2617 
2618 	k = 0;
2619 	list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2620 		k++;
2621 		read_lock(&fp->rq_list_lock); /* irqs already disabled */
2622 		seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
2623 			   "(res)sgat=%d low_dma=%d\n", k,
2624 			   jiffies_to_msecs(fp->timeout),
2625 			   fp->reserve.bufflen,
2626 			   (int) fp->reserve.k_use_sg,
2627 			   (int) fp->low_dma);
2628 		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2629 			   (int) fp->cmd_q, (int) fp->force_packid,
2630 			   (int) fp->keep_orphan);
2631 		for (m = 0, srp = fp->headrp;
2632 				srp != NULL;
2633 				++m, srp = srp->nextrp) {
2634 			hp = &srp->header;
2635 			new_interface = (hp->interface_id == '\0') ? 0 : 1;
2636 			if (srp->res_used) {
2637 				if (new_interface &&
2638 				    (SG_FLAG_MMAP_IO & hp->flags))
2639 					cp = "     mmap>> ";
2640 				else
2641 					cp = "     rb>> ";
2642 			} else {
2643 				if (SG_INFO_DIRECT_IO_MASK & hp->info)
2644 					cp = "     dio>> ";
2645 				else
2646 					cp = "     ";
2647 			}
2648 			seq_puts(s, cp);
2649 			blen = srp->data.bufflen;
2650 			usg = srp->data.k_use_sg;
2651 			seq_puts(s, srp->done ?
2652 				 ((1 == srp->done) ?  "rcv:" : "fin:")
2653 				  : "act:");
2654 			seq_printf(s, " id=%d blen=%d",
2655 				   srp->header.pack_id, blen);
2656 			if (srp->done)
2657 				seq_printf(s, " dur=%d", hp->duration);
2658 			else {
2659 				ms = jiffies_to_msecs(jiffies);
2660 				seq_printf(s, " t_o/elap=%d/%d",
2661 					(new_interface ? hp->timeout :
2662 						  jiffies_to_msecs(fp->timeout)),
2663 					(ms > hp->duration ? ms - hp->duration : 0));
2664 			}
2665 			seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2666 				   (int) srp->data.cmd_opcode);
2667 		}
2668 		if (0 == m)
2669 			seq_puts(s, "     No requests active\n");
2670 		read_unlock(&fp->rq_list_lock);
2671 	}
2672 }
2673 
2674 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2675 {
2676         return seq_open(file, &debug_seq_ops);
2677 }
2678 
2679 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2680 {
2681 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2682 	Sg_device *sdp;
2683 	unsigned long iflags;
2684 
2685 	if (it && (0 == it->index))
2686 		seq_printf(s, "max_active_device=%d  def_reserved_size=%d\n",
2687 			   (int)it->max, sg_big_buff);
2688 
2689 	read_lock_irqsave(&sg_index_lock, iflags);
2690 	sdp = it ? sg_lookup_dev(it->index) : NULL;
2691 	if (NULL == sdp)
2692 		goto skip;
2693 	read_lock(&sdp->sfd_lock);
2694 	if (!list_empty(&sdp->sfds)) {
2695 		seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2696 		if (atomic_read(&sdp->detaching))
2697 			seq_puts(s, "detaching pending close ");
2698 		else if (sdp->device) {
2699 			struct scsi_device *scsidp = sdp->device;
2700 
2701 			seq_printf(s, "%d:%d:%d:%llu   em=%d",
2702 				   scsidp->host->host_no,
2703 				   scsidp->channel, scsidp->id,
2704 				   scsidp->lun,
2705 				   scsidp->host->hostt->emulated);
2706 		}
2707 		seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
2708 			   sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
2709 		sg_proc_debug_helper(s, sdp);
2710 	}
2711 	read_unlock(&sdp->sfd_lock);
2712 skip:
2713 	read_unlock_irqrestore(&sg_index_lock, iflags);
2714 	return 0;
2715 }
2716 
2717 #endif				/* CONFIG_SCSI_PROC_FS */
2718 
2719 module_init(init_sg);
2720 module_exit(exit_sg);
2721