xref: /openbmc/linux/drivers/scsi/sg.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  *  History:
3  *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4  *           to allow user process control of SCSI devices.
5  *  Development Sponsored by Killy Corp. NY NY
6  *
7  * Original driver (sg.c):
8  *        Copyright (C) 1992 Lawrence Foard
9  * Version 2 and 3 extensions to driver:
10  *        Copyright (C) 1998 - 2005 Douglas Gilbert
11  *
12  *  Modified  19-JAN-1998  Richard Gooch <rgooch@atnf.csiro.au>  Devfs support
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  */
20 
21 static int sg_version_num = 30532;	/* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.32"
23 
24 /*
25  *  D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26  *      - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27  *        the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28  *        (otherwise the macros compile to empty statements).
29  *
30  */
31 #include <linux/config.h>
32 #include <linux/module.h>
33 
34 #include <linux/fs.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/errno.h>
40 #include <linux/mtio.h>
41 #include <linux/ioctl.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/poll.h>
45 #include <linux/smp_lock.h>
46 #include <linux/moduleparam.h>
47 #include <linux/devfs_fs_kernel.h>
48 #include <linux/cdev.h>
49 #include <linux/seq_file.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 
53 #include "scsi.h"
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_driver.h>
56 #include <scsi/scsi_ioctl.h>
57 #include <scsi/sg.h>
58 
59 #include "scsi_logging.h"
60 
61 #ifdef CONFIG_SCSI_PROC_FS
62 #include <linux/proc_fs.h>
63 static char *sg_version_date = "20050117";
64 
65 static int sg_proc_init(void);
66 static void sg_proc_cleanup(void);
67 #endif
68 
69 #ifndef LINUX_VERSION_CODE
70 #include <linux/version.h>
71 #endif				/* LINUX_VERSION_CODE */
72 
73 #define SG_ALLOW_DIO_DEF 0
74 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
75 
76 #define SG_MAX_DEVS 32768
77 
78 /*
79  * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
80  * Then when using 32 bit integers x * m may overflow during the calculation.
81  * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
82  * calculates the same, but prevents the overflow when both m and d
83  * are "small" numbers (like HZ and USER_HZ).
84  * Of course an overflow is inavoidable if the result of muldiv doesn't fit
85  * in 32 bits.
86  */
87 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
88 
89 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
90 
91 int sg_big_buff = SG_DEF_RESERVED_SIZE;
92 /* N.B. This variable is readable and writeable via
93    /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
94    of this size (or less if there is not enough memory) will be reserved
95    for use by this file descriptor. [Deprecated usage: this variable is also
96    readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
97    the kernel (i.e. it is not a module).] */
98 static int def_reserved_size = -1;	/* picks up init parameter */
99 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
100 
101 #define SG_SECTOR_SZ 512
102 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
103 
104 #define SG_DEV_ARR_LUMP 32	/* amount to over allocate sg_dev_arr by */
105 
106 static int sg_add(struct class_device *);
107 static void sg_remove(struct class_device *);
108 
109 static Scsi_Request *dummy_cmdp;	/* only used for sizeof */
110 
111 static DEFINE_RWLOCK(sg_dev_arr_lock);	/* Also used to lock
112 							   file descriptor list for device */
113 
114 static struct class_interface sg_interface = {
115 	.add		= sg_add,
116 	.remove		= sg_remove,
117 };
118 
119 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
120 	unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
121 	unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
122 	unsigned bufflen;	/* Size of (aggregate) data buffer */
123 	unsigned b_malloc_len;	/* actual len malloc'ed in buffer */
124 	void *buffer;		/* Data buffer or scatter list (k_use_sg>0) */
125 	char dio_in_use;	/* 0->indirect IO (or mmap), 1->dio */
126 	unsigned char cmd_opcode; /* first byte of command */
127 } Sg_scatter_hold;
128 
129 struct sg_device;		/* forward declarations */
130 struct sg_fd;
131 
132 typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */
133 	Scsi_Request *my_cmdp;	/* != 0  when request with lower levels */
134 	struct sg_request *nextrp;	/* NULL -> tail request (slist) */
135 	struct sg_fd *parentfp;	/* NULL -> not in use */
136 	Sg_scatter_hold data;	/* hold buffer, perhaps scatter list */
137 	sg_io_hdr_t header;	/* scsi command+info, see <scsi/sg.h> */
138 	unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)];
139 	char res_used;		/* 1 -> using reserve buffer, 0 -> not ... */
140 	char orphan;		/* 1 -> drop on sight, 0 -> normal */
141 	char sg_io_owned;	/* 1 -> packet belongs to SG_IO */
142 	volatile char done;	/* 0->before bh, 1->before read, 2->read */
143 } Sg_request;
144 
145 typedef struct sg_fd {		/* holds the state of a file descriptor */
146 	struct sg_fd *nextfp;	/* NULL when last opened fd on this device */
147 	struct sg_device *parentdp;	/* owning device */
148 	wait_queue_head_t read_wait;	/* queue read until command done */
149 	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
150 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
151 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
152 	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
153 	unsigned save_scat_len;	/* original length of trunc. scat. element */
154 	Sg_request *headrp;	/* head of request slist, NULL->empty */
155 	struct fasync_struct *async_qp;	/* used by asynchronous notification */
156 	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
157 	char low_dma;		/* as in parent but possibly overridden to 1 */
158 	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
159 	volatile char closed;	/* 1 -> fd closed but request(s) outstanding */
160 	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
161 	char next_cmd_len;	/* 0 -> automatic (def), >0 -> use on next write() */
162 	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
163 	char mmap_called;	/* 0 -> mmap() never called on this fd */
164 } Sg_fd;
165 
166 typedef struct sg_device { /* holds the state of each scsi generic device */
167 	struct scsi_device *device;
168 	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */
169 	int sg_tablesize;	/* adapter's max scatter-gather table size */
170 	Sg_fd *headfp;		/* first open fd belonging to this device */
171 	volatile char detached;	/* 0->attached, 1->detached pending removal */
172 	volatile char exclude;	/* opened for exclusive access */
173 	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
174 	struct gendisk *disk;
175 	struct cdev * cdev;	/* char_dev [sysfs: /sys/cdev/major/sg<n>] */
176 } Sg_device;
177 
178 static int sg_fasync(int fd, struct file *filp, int mode);
179 static void sg_cmd_done(Scsi_Cmnd * SCpnt);	/* tasklet or soft irq callback */
180 static int sg_start_req(Sg_request * srp);
181 static void sg_finish_rem_req(Sg_request * srp);
182 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
183 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
184 			 int tablesize);
185 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
186 			   Sg_request * srp);
187 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
188 			    int blocking, int read_only, Sg_request ** o_srp);
189 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 			   unsigned char *cmnd, int timeout, int blocking);
191 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 		      int wr_xf, int *countp, unsigned char __user **up);
193 static int sg_write_xfer(Sg_request * srp);
194 static int sg_read_xfer(Sg_request * srp);
195 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196 static void sg_remove_scat(Sg_scatter_hold * schp);
197 static void sg_build_reserve(Sg_fd * sfp, int req_size);
198 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200 static char *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201 static void sg_page_free(char *buff, int size);
202 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
205 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206 static Sg_request *sg_add_request(Sg_fd * sfp);
207 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208 static int sg_res_in_use(Sg_fd * sfp);
209 static int sg_allow_access(unsigned char opcode, char dev_type);
210 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
211 static Sg_device *sg_get_dev(int dev);
212 static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp);
213 #ifdef CONFIG_SCSI_PROC_FS
214 static int sg_last_dev(void);
215 #endif
216 
217 static Sg_device **sg_dev_arr = NULL;
218 static int sg_dev_max;
219 static int sg_nr_dev;
220 
221 #define SZ_SG_HEADER sizeof(struct sg_header)
222 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
223 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
224 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
225 
226 static int
227 sg_open(struct inode *inode, struct file *filp)
228 {
229 	int dev = iminor(inode);
230 	int flags = filp->f_flags;
231 	Sg_device *sdp;
232 	Sg_fd *sfp;
233 	int res;
234 	int retval;
235 
236 	nonseekable_open(inode, filp);
237 	SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
238 	sdp = sg_get_dev(dev);
239 	if ((!sdp) || (!sdp->device))
240 		return -ENXIO;
241 	if (sdp->detached)
242 		return -ENODEV;
243 
244 	/* This driver's module count bumped by fops_get in <linux/fs.h> */
245 	/* Prevent the device driver from vanishing while we sleep */
246 	retval = scsi_device_get(sdp->device);
247 	if (retval)
248 		return retval;
249 
250 	if (!((flags & O_NONBLOCK) ||
251 	      scsi_block_when_processing_errors(sdp->device))) {
252 		retval = -ENXIO;
253 		/* we are in error recovery for this device */
254 		goto error_out;
255 	}
256 
257 	if (flags & O_EXCL) {
258 		if (O_RDONLY == (flags & O_ACCMODE)) {
259 			retval = -EPERM; /* Can't lock it with read only access */
260 			goto error_out;
261 		}
262 		if (sdp->headfp && (flags & O_NONBLOCK)) {
263 			retval = -EBUSY;
264 			goto error_out;
265 		}
266 		res = 0;
267 		__wait_event_interruptible(sdp->o_excl_wait,
268 			((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
269 		if (res) {
270 			retval = res;	/* -ERESTARTSYS because signal hit process */
271 			goto error_out;
272 		}
273 	} else if (sdp->exclude) {	/* some other fd has an exclusive lock on dev */
274 		if (flags & O_NONBLOCK) {
275 			retval = -EBUSY;
276 			goto error_out;
277 		}
278 		res = 0;
279 		__wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
280 					   res);
281 		if (res) {
282 			retval = res;	/* -ERESTARTSYS because signal hit process */
283 			goto error_out;
284 		}
285 	}
286 	if (sdp->detached) {
287 		retval = -ENODEV;
288 		goto error_out;
289 	}
290 	if (!sdp->headfp) {	/* no existing opens on this device */
291 		sdp->sgdebug = 0;
292 		sdp->sg_tablesize = sdp->device->host->sg_tablesize;
293 	}
294 	if ((sfp = sg_add_sfp(sdp, dev)))
295 		filp->private_data = sfp;
296 	else {
297 		if (flags & O_EXCL)
298 			sdp->exclude = 0;	/* undo if error */
299 		retval = -ENOMEM;
300 		goto error_out;
301 	}
302 	return 0;
303 
304       error_out:
305 	scsi_device_put(sdp->device);
306 	return retval;
307 }
308 
309 /* Following function was formerly called 'sg_close' */
310 static int
311 sg_release(struct inode *inode, struct file *filp)
312 {
313 	Sg_device *sdp;
314 	Sg_fd *sfp;
315 
316 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
317 		return -ENXIO;
318 	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
319 	sg_fasync(-1, filp, 0);	/* remove filp from async notification list */
320 	if (0 == sg_remove_sfp(sdp, sfp)) {	/* Returns 1 when sdp gone */
321 		if (!sdp->detached) {
322 			scsi_device_put(sdp->device);
323 		}
324 		sdp->exclude = 0;
325 		wake_up_interruptible(&sdp->o_excl_wait);
326 	}
327 	return 0;
328 }
329 
330 static ssize_t
331 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
332 {
333 	int res;
334 	Sg_device *sdp;
335 	Sg_fd *sfp;
336 	Sg_request *srp;
337 	int req_pack_id = -1;
338 	struct sg_header old_hdr;
339 	sg_io_hdr_t new_hdr;
340 	sg_io_hdr_t *hp;
341 
342 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
343 		return -ENXIO;
344 	SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
345 				   sdp->disk->disk_name, (int) count));
346 	if (!access_ok(VERIFY_WRITE, buf, count))
347 		return -EFAULT;
348 	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
349 		if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
350 			return -EFAULT;
351 		if (old_hdr.reply_len < 0) {
352 			if (count >= SZ_SG_IO_HDR) {
353 				if (__copy_from_user
354 				    (&new_hdr, buf, SZ_SG_IO_HDR))
355 					return -EFAULT;
356 				req_pack_id = new_hdr.pack_id;
357 			}
358 		} else
359 			req_pack_id = old_hdr.pack_id;
360 	}
361 	srp = sg_get_rq_mark(sfp, req_pack_id);
362 	if (!srp) {		/* now wait on packet to arrive */
363 		if (sdp->detached)
364 			return -ENODEV;
365 		if (filp->f_flags & O_NONBLOCK)
366 			return -EAGAIN;
367 		while (1) {
368 			res = 0;	/* following is a macro that beats race condition */
369 			__wait_event_interruptible(sfp->read_wait,
370 				(sdp->detached || (srp = sg_get_rq_mark(sfp, req_pack_id))),
371 						   res);
372 			if (sdp->detached)
373 				return -ENODEV;
374 			if (0 == res)
375 				break;
376 			return res;	/* -ERESTARTSYS because signal hit process */
377 		}
378 	}
379 	if (srp->header.interface_id != '\0')
380 		return sg_new_read(sfp, buf, count, srp);
381 
382 	hp = &srp->header;
383 	memset(&old_hdr, 0, SZ_SG_HEADER);
384 	old_hdr.reply_len = (int) hp->timeout;
385 	old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */
386 	old_hdr.pack_id = hp->pack_id;
387 	old_hdr.twelve_byte =
388 	    ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
389 	old_hdr.target_status = hp->masked_status;
390 	old_hdr.host_status = hp->host_status;
391 	old_hdr.driver_status = hp->driver_status;
392 	if ((CHECK_CONDITION & hp->masked_status) ||
393 	    (DRIVER_SENSE & hp->driver_status))
394 		memcpy(old_hdr.sense_buffer, srp->sense_b,
395 		       sizeof (old_hdr.sense_buffer));
396 	switch (hp->host_status) {
397 	/* This setup of 'result' is for backward compatibility and is best
398 	   ignored by the user who should use target, host + driver status */
399 	case DID_OK:
400 	case DID_PASSTHROUGH:
401 	case DID_SOFT_ERROR:
402 		old_hdr.result = 0;
403 		break;
404 	case DID_NO_CONNECT:
405 	case DID_BUS_BUSY:
406 	case DID_TIME_OUT:
407 		old_hdr.result = EBUSY;
408 		break;
409 	case DID_BAD_TARGET:
410 	case DID_ABORT:
411 	case DID_PARITY:
412 	case DID_RESET:
413 	case DID_BAD_INTR:
414 		old_hdr.result = EIO;
415 		break;
416 	case DID_ERROR:
417 		old_hdr.result = (srp->sense_b[0] == 0 &&
418 				  hp->masked_status == GOOD) ? 0 : EIO;
419 		break;
420 	default:
421 		old_hdr.result = EIO;
422 		break;
423 	}
424 
425 	/* Now copy the result back to the user buffer.  */
426 	if (count >= SZ_SG_HEADER) {
427 		if (__copy_to_user(buf, &old_hdr, SZ_SG_HEADER))
428 			return -EFAULT;
429 		buf += SZ_SG_HEADER;
430 		if (count > old_hdr.reply_len)
431 			count = old_hdr.reply_len;
432 		if (count > SZ_SG_HEADER) {
433 			if ((res =
434 			     sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)))
435 				return -EFAULT;
436 		}
437 	} else
438 		count = (old_hdr.result == 0) ? 0 : -EIO;
439 	sg_finish_rem_req(srp);
440 	return count;
441 }
442 
443 static ssize_t
444 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
445 {
446 	sg_io_hdr_t *hp = &srp->header;
447 	int err = 0;
448 	int len;
449 
450 	if (count < SZ_SG_IO_HDR) {
451 		err = -EINVAL;
452 		goto err_out;
453 	}
454 	hp->sb_len_wr = 0;
455 	if ((hp->mx_sb_len > 0) && hp->sbp) {
456 		if ((CHECK_CONDITION & hp->masked_status) ||
457 		    (DRIVER_SENSE & hp->driver_status)) {
458 			int sb_len = sizeof (dummy_cmdp->sr_sense_buffer);
459 			sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
460 			len = 8 + (int) srp->sense_b[7];	/* Additional sense length field */
461 			len = (len > sb_len) ? sb_len : len;
462 			if (copy_to_user(hp->sbp, srp->sense_b, len)) {
463 				err = -EFAULT;
464 				goto err_out;
465 			}
466 			hp->sb_len_wr = len;
467 		}
468 	}
469 	if (hp->masked_status || hp->host_status || hp->driver_status)
470 		hp->info |= SG_INFO_CHECK;
471 	if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
472 		err = -EFAULT;
473 		goto err_out;
474 	}
475 	err = sg_read_xfer(srp);
476       err_out:
477 	sg_finish_rem_req(srp);
478 	return (0 == err) ? count : err;
479 }
480 
481 static ssize_t
482 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
483 {
484 	int mxsize, cmd_size, k;
485 	int input_size, blocking;
486 	unsigned char opcode;
487 	Sg_device *sdp;
488 	Sg_fd *sfp;
489 	Sg_request *srp;
490 	struct sg_header old_hdr;
491 	sg_io_hdr_t *hp;
492 	unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
493 
494 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
495 		return -ENXIO;
496 	SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
497 				   sdp->disk->disk_name, (int) count));
498 	if (sdp->detached)
499 		return -ENODEV;
500 	if (!((filp->f_flags & O_NONBLOCK) ||
501 	      scsi_block_when_processing_errors(sdp->device)))
502 		return -ENXIO;
503 
504 	if (!access_ok(VERIFY_READ, buf, count))
505 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
506 	if (count < SZ_SG_HEADER)
507 		return -EIO;
508 	if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
509 		return -EFAULT;
510 	blocking = !(filp->f_flags & O_NONBLOCK);
511 	if (old_hdr.reply_len < 0)
512 		return sg_new_write(sfp, buf, count, blocking, 0, NULL);
513 	if (count < (SZ_SG_HEADER + 6))
514 		return -EIO;	/* The minimum scsi command length is 6 bytes. */
515 
516 	if (!(srp = sg_add_request(sfp))) {
517 		SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
518 		return -EDOM;
519 	}
520 	buf += SZ_SG_HEADER;
521 	__get_user(opcode, buf);
522 	if (sfp->next_cmd_len > 0) {
523 		if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
524 			SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
525 			sfp->next_cmd_len = 0;
526 			sg_remove_request(sfp, srp);
527 			return -EIO;
528 		}
529 		cmd_size = sfp->next_cmd_len;
530 		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
531 	} else {
532 		cmd_size = COMMAND_SIZE(opcode);	/* based on SCSI command group */
533 		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
534 			cmd_size = 12;
535 	}
536 	SCSI_LOG_TIMEOUT(4, printk(
537 		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
538 /* Determine buffer size.  */
539 	input_size = count - cmd_size;
540 	mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
541 	mxsize -= SZ_SG_HEADER;
542 	input_size -= SZ_SG_HEADER;
543 	if (input_size < 0) {
544 		sg_remove_request(sfp, srp);
545 		return -EIO;	/* User did not pass enough bytes for this command. */
546 	}
547 	hp = &srp->header;
548 	hp->interface_id = '\0';	/* indicator of old interface tunnelled */
549 	hp->cmd_len = (unsigned char) cmd_size;
550 	hp->iovec_count = 0;
551 	hp->mx_sb_len = 0;
552 	if (input_size > 0)
553 		hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
554 		    SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
555 	else
556 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
557 	hp->dxfer_len = mxsize;
558 	hp->dxferp = (char __user *)buf + cmd_size;
559 	hp->sbp = NULL;
560 	hp->timeout = old_hdr.reply_len;	/* structure abuse ... */
561 	hp->flags = input_size;	/* structure abuse ... */
562 	hp->pack_id = old_hdr.pack_id;
563 	hp->usr_ptr = NULL;
564 	if (__copy_from_user(cmnd, buf, cmd_size))
565 		return -EFAULT;
566 	/*
567 	 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
568 	 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
569 	 * is a non-zero input_size, so emit a warning.
570 	 */
571 	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
572 		if (printk_ratelimit())
573 			printk(KERN_WARNING
574 			       "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
575 			       "guessing data in;\n" KERN_WARNING "   "
576 			       "program %s not setting count and/or reply_len properly\n",
577 			       old_hdr.reply_len - (int)SZ_SG_HEADER,
578 			       input_size, (unsigned int) cmnd[0],
579 			       current->comm);
580 	k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
581 	return (k < 0) ? k : count;
582 }
583 
584 static ssize_t
585 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
586 	     int blocking, int read_only, Sg_request ** o_srp)
587 {
588 	int k;
589 	Sg_request *srp;
590 	sg_io_hdr_t *hp;
591 	unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
592 	int timeout;
593 	unsigned long ul_timeout;
594 
595 	if (count < SZ_SG_IO_HDR)
596 		return -EINVAL;
597 	if (!access_ok(VERIFY_READ, buf, count))
598 		return -EFAULT; /* protects following copy_from_user()s + get_user()s */
599 
600 	sfp->cmd_q = 1;	/* when sg_io_hdr seen, set command queuing on */
601 	if (!(srp = sg_add_request(sfp))) {
602 		SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
603 		return -EDOM;
604 	}
605 	hp = &srp->header;
606 	if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
607 		sg_remove_request(sfp, srp);
608 		return -EFAULT;
609 	}
610 	if (hp->interface_id != 'S') {
611 		sg_remove_request(sfp, srp);
612 		return -ENOSYS;
613 	}
614 	if (hp->flags & SG_FLAG_MMAP_IO) {
615 		if (hp->dxfer_len > sfp->reserve.bufflen) {
616 			sg_remove_request(sfp, srp);
617 			return -ENOMEM;	/* MMAP_IO size must fit in reserve buffer */
618 		}
619 		if (hp->flags & SG_FLAG_DIRECT_IO) {
620 			sg_remove_request(sfp, srp);
621 			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
622 		}
623 		if (sg_res_in_use(sfp)) {
624 			sg_remove_request(sfp, srp);
625 			return -EBUSY;	/* reserve buffer already being used */
626 		}
627 	}
628 	ul_timeout = msecs_to_jiffies(srp->header.timeout);
629 	timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
630 	if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
631 		sg_remove_request(sfp, srp);
632 		return -EMSGSIZE;
633 	}
634 	if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
635 		sg_remove_request(sfp, srp);
636 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
637 	}
638 	if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
639 		sg_remove_request(sfp, srp);
640 		return -EFAULT;
641 	}
642 	if (read_only &&
643 	    (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
644 		sg_remove_request(sfp, srp);
645 		return -EPERM;
646 	}
647 	k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
648 	if (k < 0)
649 		return k;
650 	if (o_srp)
651 		*o_srp = srp;
652 	return count;
653 }
654 
655 static int
656 sg_common_write(Sg_fd * sfp, Sg_request * srp,
657 		unsigned char *cmnd, int timeout, int blocking)
658 {
659 	int k;
660 	Scsi_Request *SRpnt;
661 	Sg_device *sdp = sfp->parentdp;
662 	sg_io_hdr_t *hp = &srp->header;
663 	request_queue_t *q;
664 
665 	srp->data.cmd_opcode = cmnd[0];	/* hold opcode of command */
666 	hp->status = 0;
667 	hp->masked_status = 0;
668 	hp->msg_status = 0;
669 	hp->info = 0;
670 	hp->host_status = 0;
671 	hp->driver_status = 0;
672 	hp->resid = 0;
673 	SCSI_LOG_TIMEOUT(4, printk("sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
674 			  (int) cmnd[0], (int) hp->cmd_len));
675 
676 	if ((k = sg_start_req(srp))) {
677 		SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
678 		sg_finish_rem_req(srp);
679 		return k;	/* probably out of space --> ENOMEM */
680 	}
681 	if ((k = sg_write_xfer(srp))) {
682 		SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
683 		sg_finish_rem_req(srp);
684 		return k;
685 	}
686 	if (sdp->detached) {
687 		sg_finish_rem_req(srp);
688 		return -ENODEV;
689 	}
690 	SRpnt = scsi_allocate_request(sdp->device, GFP_ATOMIC);
691 	if (SRpnt == NULL) {
692 		SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
693 		sg_finish_rem_req(srp);
694 		return -ENOMEM;
695 	}
696 
697 	srp->my_cmdp = SRpnt;
698 	q = SRpnt->sr_device->request_queue;
699 	SRpnt->sr_request->rq_disk = sdp->disk;
700 	SRpnt->sr_sense_buffer[0] = 0;
701 	SRpnt->sr_cmd_len = hp->cmd_len;
702 	SRpnt->sr_use_sg = srp->data.k_use_sg;
703 	SRpnt->sr_sglist_len = srp->data.sglist_len;
704 	SRpnt->sr_bufflen = srp->data.bufflen;
705 	SRpnt->sr_underflow = 0;
706 	SRpnt->sr_buffer = srp->data.buffer;
707 	switch (hp->dxfer_direction) {
708 	case SG_DXFER_TO_FROM_DEV:
709 	case SG_DXFER_FROM_DEV:
710 		SRpnt->sr_data_direction = SCSI_DATA_READ;
711 		break;
712 	case SG_DXFER_TO_DEV:
713 		SRpnt->sr_data_direction = SCSI_DATA_WRITE;
714 		break;
715 	case SG_DXFER_UNKNOWN:
716 		SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
717 		break;
718 	default:
719 		SRpnt->sr_data_direction = SCSI_DATA_NONE;
720 		break;
721 	}
722 	SRpnt->upper_private_data = srp;
723 	srp->data.k_use_sg = 0;
724 	srp->data.sglist_len = 0;
725 	srp->data.bufflen = 0;
726 	srp->data.buffer = NULL;
727 	hp->duration = jiffies;	/* unit jiffies now, millisecs after done */
728 /* Now send everything of to mid-level. The next time we hear about this
729    packet is when sg_cmd_done() is called (i.e. a callback). */
730 	scsi_do_req(SRpnt, (void *) cmnd,
731 		    (void *) SRpnt->sr_buffer, hp->dxfer_len,
732 		    sg_cmd_done, timeout, SG_DEFAULT_RETRIES);
733 	/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
734 	return 0;
735 }
736 
737 static int
738 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
739 {
740 	unsigned long iflags;
741 	int done;
742 
743 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
744 	done = srp->done;
745 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
746 	return done;
747 }
748 
749 static int
750 sg_ioctl(struct inode *inode, struct file *filp,
751 	 unsigned int cmd_in, unsigned long arg)
752 {
753 	void __user *p = (void __user *)arg;
754 	int __user *ip = p;
755 	int result, val, read_only;
756 	Sg_device *sdp;
757 	Sg_fd *sfp;
758 	Sg_request *srp;
759 	unsigned long iflags;
760 
761 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
762 		return -ENXIO;
763 	SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
764 				   sdp->disk->disk_name, (int) cmd_in));
765 	read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
766 
767 	switch (cmd_in) {
768 	case SG_IO:
769 		{
770 			int blocking = 1;	/* ignore O_NONBLOCK flag */
771 
772 			if (sdp->detached)
773 				return -ENODEV;
774 			if (!scsi_block_when_processing_errors(sdp->device))
775 				return -ENXIO;
776 			if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
777 				return -EFAULT;
778 			result =
779 			    sg_new_write(sfp, p, SZ_SG_IO_HDR,
780 					 blocking, read_only, &srp);
781 			if (result < 0)
782 				return result;
783 			srp->sg_io_owned = 1;
784 			while (1) {
785 				result = 0;	/* following macro to beat race condition */
786 				__wait_event_interruptible(sfp->read_wait,
787 					(sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
788 							   result);
789 				if (sdp->detached)
790 					return -ENODEV;
791 				if (sfp->closed)
792 					return 0;	/* request packet dropped already */
793 				if (0 == result)
794 					break;
795 				srp->orphan = 1;
796 				return result;	/* -ERESTARTSYS because signal hit process */
797 			}
798 			write_lock_irqsave(&sfp->rq_list_lock, iflags);
799 			srp->done = 2;
800 			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
801 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
802 			return (result < 0) ? result : 0;
803 		}
804 	case SG_SET_TIMEOUT:
805 		result = get_user(val, ip);
806 		if (result)
807 			return result;
808 		if (val < 0)
809 			return -EIO;
810 		if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
811 		    val = MULDIV (INT_MAX, USER_HZ, HZ);
812 		sfp->timeout_user = val;
813 		sfp->timeout = MULDIV (val, HZ, USER_HZ);
814 
815 		return 0;
816 	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */
817 				/* strange ..., for backward compatibility */
818 		return sfp->timeout_user;
819 	case SG_SET_FORCE_LOW_DMA:
820 		result = get_user(val, ip);
821 		if (result)
822 			return result;
823 		if (val) {
824 			sfp->low_dma = 1;
825 			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
826 				val = (int) sfp->reserve.bufflen;
827 				sg_remove_scat(&sfp->reserve);
828 				sg_build_reserve(sfp, val);
829 			}
830 		} else {
831 			if (sdp->detached)
832 				return -ENODEV;
833 			sfp->low_dma = sdp->device->host->unchecked_isa_dma;
834 		}
835 		return 0;
836 	case SG_GET_LOW_DMA:
837 		return put_user((int) sfp->low_dma, ip);
838 	case SG_GET_SCSI_ID:
839 		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
840 			return -EFAULT;
841 		else {
842 			sg_scsi_id_t __user *sg_idp = p;
843 
844 			if (sdp->detached)
845 				return -ENODEV;
846 			__put_user((int) sdp->device->host->host_no,
847 				   &sg_idp->host_no);
848 			__put_user((int) sdp->device->channel,
849 				   &sg_idp->channel);
850 			__put_user((int) sdp->device->id, &sg_idp->scsi_id);
851 			__put_user((int) sdp->device->lun, &sg_idp->lun);
852 			__put_user((int) sdp->device->type, &sg_idp->scsi_type);
853 			__put_user((short) sdp->device->host->cmd_per_lun,
854 				   &sg_idp->h_cmd_per_lun);
855 			__put_user((short) sdp->device->queue_depth,
856 				   &sg_idp->d_queue_depth);
857 			__put_user(0, &sg_idp->unused[0]);
858 			__put_user(0, &sg_idp->unused[1]);
859 			return 0;
860 		}
861 	case SG_SET_FORCE_PACK_ID:
862 		result = get_user(val, ip);
863 		if (result)
864 			return result;
865 		sfp->force_packid = val ? 1 : 0;
866 		return 0;
867 	case SG_GET_PACK_ID:
868 		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
869 			return -EFAULT;
870 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
871 		for (srp = sfp->headrp; srp; srp = srp->nextrp) {
872 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
873 				read_unlock_irqrestore(&sfp->rq_list_lock,
874 						       iflags);
875 				__put_user(srp->header.pack_id, ip);
876 				return 0;
877 			}
878 		}
879 		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
880 		__put_user(-1, ip);
881 		return 0;
882 	case SG_GET_NUM_WAITING:
883 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
884 		for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
885 			if ((1 == srp->done) && (!srp->sg_io_owned))
886 				++val;
887 		}
888 		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
889 		return put_user(val, ip);
890 	case SG_GET_SG_TABLESIZE:
891 		return put_user(sdp->sg_tablesize, ip);
892 	case SG_SET_RESERVED_SIZE:
893 		result = get_user(val, ip);
894 		if (result)
895 			return result;
896                 if (val < 0)
897                         return -EINVAL;
898 		if (val != sfp->reserve.bufflen) {
899 			if (sg_res_in_use(sfp) || sfp->mmap_called)
900 				return -EBUSY;
901 			sg_remove_scat(&sfp->reserve);
902 			sg_build_reserve(sfp, val);
903 		}
904 		return 0;
905 	case SG_GET_RESERVED_SIZE:
906 		val = (int) sfp->reserve.bufflen;
907 		return put_user(val, ip);
908 	case SG_SET_COMMAND_Q:
909 		result = get_user(val, ip);
910 		if (result)
911 			return result;
912 		sfp->cmd_q = val ? 1 : 0;
913 		return 0;
914 	case SG_GET_COMMAND_Q:
915 		return put_user((int) sfp->cmd_q, ip);
916 	case SG_SET_KEEP_ORPHAN:
917 		result = get_user(val, ip);
918 		if (result)
919 			return result;
920 		sfp->keep_orphan = val;
921 		return 0;
922 	case SG_GET_KEEP_ORPHAN:
923 		return put_user((int) sfp->keep_orphan, ip);
924 	case SG_NEXT_CMD_LEN:
925 		result = get_user(val, ip);
926 		if (result)
927 			return result;
928 		sfp->next_cmd_len = (val > 0) ? val : 0;
929 		return 0;
930 	case SG_GET_VERSION_NUM:
931 		return put_user(sg_version_num, ip);
932 	case SG_GET_ACCESS_COUNT:
933 		/* faked - we don't have a real access count anymore */
934 		val = (sdp->device ? 1 : 0);
935 		return put_user(val, ip);
936 	case SG_GET_REQUEST_TABLE:
937 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
938 			return -EFAULT;
939 		else {
940 			sg_req_info_t rinfo[SG_MAX_QUEUE];
941 			Sg_request *srp;
942 			read_lock_irqsave(&sfp->rq_list_lock, iflags);
943 			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
944 			     ++val, srp = srp ? srp->nextrp : srp) {
945 				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
946 				if (srp) {
947 					rinfo[val].req_state = srp->done + 1;
948 					rinfo[val].problem =
949 					    srp->header.masked_status &
950 					    srp->header.host_status &
951 					    srp->header.driver_status;
952 					rinfo[val].duration =
953 					    srp->done ? srp->header.duration :
954 					    jiffies_to_msecs(
955 						jiffies - srp->header.duration);
956 					rinfo[val].orphan = srp->orphan;
957 					rinfo[val].sg_io_owned = srp->sg_io_owned;
958 					rinfo[val].pack_id = srp->header.pack_id;
959 					rinfo[val].usr_ptr = srp->header.usr_ptr;
960 				}
961 			}
962 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
963 			return (__copy_to_user(p, rinfo,
964 			        SZ_SG_REQ_INFO * SG_MAX_QUEUE) ? -EFAULT : 0);
965 		}
966 	case SG_EMULATED_HOST:
967 		if (sdp->detached)
968 			return -ENODEV;
969 		return put_user(sdp->device->host->hostt->emulated, ip);
970 	case SG_SCSI_RESET:
971 		if (sdp->detached)
972 			return -ENODEV;
973 		if (filp->f_flags & O_NONBLOCK) {
974 			if (test_bit(SHOST_RECOVERY,
975 				     &sdp->device->host->shost_state))
976 				return -EBUSY;
977 		} else if (!scsi_block_when_processing_errors(sdp->device))
978 			return -EBUSY;
979 		result = get_user(val, ip);
980 		if (result)
981 			return result;
982 		if (SG_SCSI_RESET_NOTHING == val)
983 			return 0;
984 		switch (val) {
985 		case SG_SCSI_RESET_DEVICE:
986 			val = SCSI_TRY_RESET_DEVICE;
987 			break;
988 		case SG_SCSI_RESET_BUS:
989 			val = SCSI_TRY_RESET_BUS;
990 			break;
991 		case SG_SCSI_RESET_HOST:
992 			val = SCSI_TRY_RESET_HOST;
993 			break;
994 		default:
995 			return -EINVAL;
996 		}
997 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
998 			return -EACCES;
999 		return (scsi_reset_provider(sdp->device, val) ==
1000 			SUCCESS) ? 0 : -EIO;
1001 	case SCSI_IOCTL_SEND_COMMAND:
1002 		if (sdp->detached)
1003 			return -ENODEV;
1004 		if (read_only) {
1005 			unsigned char opcode = WRITE_6;
1006 			Scsi_Ioctl_Command __user *siocp = p;
1007 
1008 			if (copy_from_user(&opcode, siocp->data, 1))
1009 				return -EFAULT;
1010 			if (!sg_allow_access(opcode, sdp->device->type))
1011 				return -EPERM;
1012 		}
1013 		return scsi_ioctl_send_command(sdp->device, p);
1014 	case SG_SET_DEBUG:
1015 		result = get_user(val, ip);
1016 		if (result)
1017 			return result;
1018 		sdp->sgdebug = (char) val;
1019 		return 0;
1020 	case SCSI_IOCTL_GET_IDLUN:
1021 	case SCSI_IOCTL_GET_BUS_NUMBER:
1022 	case SCSI_IOCTL_PROBE_HOST:
1023 	case SG_GET_TRANSFORM:
1024 		if (sdp->detached)
1025 			return -ENODEV;
1026 		return scsi_ioctl(sdp->device, cmd_in, p);
1027 	default:
1028 		if (read_only)
1029 			return -EPERM;	/* don't know so take safe approach */
1030 		return scsi_ioctl(sdp->device, cmd_in, p);
1031 	}
1032 }
1033 
1034 #ifdef CONFIG_COMPAT
1035 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1036 {
1037 	Sg_device *sdp;
1038 	Sg_fd *sfp;
1039 	struct scsi_device *sdev;
1040 
1041 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1042 		return -ENXIO;
1043 
1044 	sdev = sdp->device;
1045 	if (sdev->host->hostt->compat_ioctl) {
1046 		int ret;
1047 
1048 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1049 
1050 		return ret;
1051 	}
1052 
1053 	return -ENOIOCTLCMD;
1054 }
1055 #endif
1056 
1057 static unsigned int
1058 sg_poll(struct file *filp, poll_table * wait)
1059 {
1060 	unsigned int res = 0;
1061 	Sg_device *sdp;
1062 	Sg_fd *sfp;
1063 	Sg_request *srp;
1064 	int count = 0;
1065 	unsigned long iflags;
1066 
1067 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1068 	    || sfp->closed)
1069 		return POLLERR;
1070 	poll_wait(filp, &sfp->read_wait, wait);
1071 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
1072 	for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1073 		/* if any read waiting, flag it */
1074 		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1075 			res = POLLIN | POLLRDNORM;
1076 		++count;
1077 	}
1078 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1079 
1080 	if (sdp->detached)
1081 		res |= POLLHUP;
1082 	else if (!sfp->cmd_q) {
1083 		if (0 == count)
1084 			res |= POLLOUT | POLLWRNORM;
1085 	} else if (count < SG_MAX_QUEUE)
1086 		res |= POLLOUT | POLLWRNORM;
1087 	SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1088 				   sdp->disk->disk_name, (int) res));
1089 	return res;
1090 }
1091 
1092 static int
1093 sg_fasync(int fd, struct file *filp, int mode)
1094 {
1095 	int retval;
1096 	Sg_device *sdp;
1097 	Sg_fd *sfp;
1098 
1099 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1100 		return -ENXIO;
1101 	SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1102 				   sdp->disk->disk_name, mode));
1103 
1104 	retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1105 	return (retval < 0) ? retval : 0;
1106 }
1107 
1108 static inline unsigned char *
1109 sg_scatg2virt(const struct scatterlist *sclp)
1110 {
1111 	return (sclp && sclp->page) ?
1112 	    (unsigned char *) page_address(sclp->page) + sclp->offset : NULL;
1113 }
1114 
1115 /* When startFinish==1 increments page counts for pages other than the
1116    first of scatter gather elements obtained from __get_free_pages().
1117    When startFinish==0 decrements ... */
1118 static void
1119 sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
1120 {
1121 	void *page_ptr;
1122 	struct page *page;
1123 	int k, m;
1124 
1125 	SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
1126 				   startFinish, rsv_schp->k_use_sg));
1127 	/* N.B. correction _not_ applied to base page of each allocation */
1128 	if (rsv_schp->k_use_sg) {	/* reserve buffer is a scatter gather list */
1129 		struct scatterlist *sclp = rsv_schp->buffer;
1130 
1131 		for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
1132 			for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) {
1133 				page_ptr = sg_scatg2virt(sclp) + m;
1134 				page = virt_to_page(page_ptr);
1135 				if (startFinish)
1136 					get_page(page);
1137 				else {
1138 					if (page_count(page) > 0)
1139 						__put_page(page);
1140 				}
1141 			}
1142 		}
1143 	} else {		/* reserve buffer is just a single allocation */
1144 		for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) {
1145 			page_ptr = (unsigned char *) rsv_schp->buffer + m;
1146 			page = virt_to_page(page_ptr);
1147 			if (startFinish)
1148 				get_page(page);
1149 			else {
1150 				if (page_count(page) > 0)
1151 					__put_page(page);
1152 			}
1153 		}
1154 	}
1155 }
1156 
1157 static struct page *
1158 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1159 {
1160 	Sg_fd *sfp;
1161 	struct page *page = NOPAGE_SIGBUS;
1162 	void *page_ptr = NULL;
1163 	unsigned long offset;
1164 	Sg_scatter_hold *rsv_schp;
1165 
1166 	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1167 		return page;
1168 	rsv_schp = &sfp->reserve;
1169 	offset = addr - vma->vm_start;
1170 	if (offset >= rsv_schp->bufflen)
1171 		return page;
1172 	SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1173 				   offset, rsv_schp->k_use_sg));
1174 	if (rsv_schp->k_use_sg) {	/* reserve buffer is a scatter gather list */
1175 		int k;
1176 		unsigned long sa = vma->vm_start;
1177 		unsigned long len;
1178 		struct scatterlist *sclp = rsv_schp->buffer;
1179 
1180 		for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1181 		     ++k, ++sclp) {
1182 			len = vma->vm_end - sa;
1183 			len = (len < sclp->length) ? len : sclp->length;
1184 			if (offset < len) {
1185 				page_ptr = sg_scatg2virt(sclp) + offset;
1186 				page = virt_to_page(page_ptr);
1187 				get_page(page);	/* increment page count */
1188 				break;
1189 			}
1190 			sa += len;
1191 			offset -= len;
1192 		}
1193 	} else {		/* reserve buffer is just a single allocation */
1194 		page_ptr = (unsigned char *) rsv_schp->buffer + offset;
1195 		page = virt_to_page(page_ptr);
1196 		get_page(page);	/* increment page count */
1197 	}
1198 	if (type)
1199 		*type = VM_FAULT_MINOR;
1200 	return page;
1201 }
1202 
1203 static struct vm_operations_struct sg_mmap_vm_ops = {
1204 	.nopage = sg_vma_nopage,
1205 };
1206 
1207 static int
1208 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1209 {
1210 	Sg_fd *sfp;
1211 	unsigned long req_sz = vma->vm_end - vma->vm_start;
1212 	Sg_scatter_hold *rsv_schp;
1213 
1214 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1215 		return -ENXIO;
1216 	SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1217 				   (void *) vma->vm_start, (int) req_sz));
1218 	if (vma->vm_pgoff)
1219 		return -EINVAL;	/* want no offset */
1220 	rsv_schp = &sfp->reserve;
1221 	if (req_sz > rsv_schp->bufflen)
1222 		return -ENOMEM;	/* cannot map more than reserved buffer */
1223 
1224 	if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1225 		int k;
1226 		unsigned long sa = vma->vm_start;
1227 		unsigned long len;
1228 		struct scatterlist *sclp = rsv_schp->buffer;
1229 
1230 		for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1231 		     ++k, ++sclp) {
1232 			if (0 != sclp->offset)
1233 				return -EFAULT;	/* non page aligned memory ?? */
1234 			len = vma->vm_end - sa;
1235 			len = (len < sclp->length) ? len : sclp->length;
1236 			sa += len;
1237 		}
1238 	} else {	/* reserve buffer is just a single allocation */
1239 		if ((unsigned long) rsv_schp->buffer & (PAGE_SIZE - 1))
1240 			return -EFAULT;	/* non page aligned memory ?? */
1241 	}
1242 	if (0 == sfp->mmap_called) {
1243 		sg_rb_correct4mmap(rsv_schp, 1);	/* do only once per fd lifetime */
1244 		sfp->mmap_called = 1;
1245 	}
1246 	vma->vm_flags |= (VM_RESERVED | VM_IO);
1247 	vma->vm_private_data = sfp;
1248 	vma->vm_ops = &sg_mmap_vm_ops;
1249 	return 0;
1250 }
1251 
1252 /* This function is a "bottom half" handler that is called by the
1253  * mid level when a command is completed (or has failed). */
1254 static void
1255 sg_cmd_done(Scsi_Cmnd * SCpnt)
1256 {
1257 	Scsi_Request *SRpnt = NULL;
1258 	Sg_device *sdp = NULL;
1259 	Sg_fd *sfp;
1260 	Sg_request *srp = NULL;
1261 	unsigned long iflags;
1262 
1263 	if (SCpnt && (SRpnt = SCpnt->sc_request))
1264 		srp = (Sg_request *) SRpnt->upper_private_data;
1265 	if (NULL == srp) {
1266 		printk(KERN_ERR "sg_cmd_done: NULL request\n");
1267 		if (SRpnt)
1268 			scsi_release_request(SRpnt);
1269 		return;
1270 	}
1271 	sfp = srp->parentfp;
1272 	if (sfp)
1273 		sdp = sfp->parentdp;
1274 	if ((NULL == sdp) || sdp->detached) {
1275 		printk(KERN_INFO "sg_cmd_done: device detached\n");
1276 		scsi_release_request(SRpnt);
1277 		return;
1278 	}
1279 
1280 	/* First transfer ownership of data buffers to sg_device object. */
1281 	srp->data.k_use_sg = SRpnt->sr_use_sg;
1282 	srp->data.sglist_len = SRpnt->sr_sglist_len;
1283 	srp->data.bufflen = SRpnt->sr_bufflen;
1284 	srp->data.buffer = SRpnt->sr_buffer;
1285 	/* now clear out request structure */
1286 	SRpnt->sr_use_sg = 0;
1287 	SRpnt->sr_sglist_len = 0;
1288 	SRpnt->sr_bufflen = 0;
1289 	SRpnt->sr_buffer = NULL;
1290 	SRpnt->sr_underflow = 0;
1291 	SRpnt->sr_request->rq_disk = NULL; /* "sg" _disowns_ request blk */
1292 
1293 	srp->my_cmdp = NULL;
1294 
1295 	SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1296 		sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result));
1297 	srp->header.resid = SCpnt->resid;
1298 	/* N.B. unit of duration changes here from jiffies to millisecs */
1299 	srp->header.duration =
1300 	    jiffies_to_msecs(jiffies - srp->header.duration);
1301 	if (0 != SRpnt->sr_result) {
1302 		struct scsi_sense_hdr sshdr;
1303 
1304 		memcpy(srp->sense_b, SRpnt->sr_sense_buffer,
1305 		       sizeof (srp->sense_b));
1306 		srp->header.status = 0xff & SRpnt->sr_result;
1307 		srp->header.masked_status = status_byte(SRpnt->sr_result);
1308 		srp->header.msg_status = msg_byte(SRpnt->sr_result);
1309 		srp->header.host_status = host_byte(SRpnt->sr_result);
1310 		srp->header.driver_status = driver_byte(SRpnt->sr_result);
1311 		if ((sdp->sgdebug > 0) &&
1312 		    ((CHECK_CONDITION == srp->header.masked_status) ||
1313 		     (COMMAND_TERMINATED == srp->header.masked_status)))
1314 			print_req_sense("sg_cmd_done", SRpnt);
1315 
1316 		/* Following if statement is a patch supplied by Eric Youngdale */
1317 		if (driver_byte(SRpnt->sr_result) != 0
1318 		    && scsi_command_normalize_sense(SCpnt, &sshdr)
1319 		    && !scsi_sense_is_deferred(&sshdr)
1320 		    && sshdr.sense_key == UNIT_ATTENTION
1321 		    && sdp->device->removable) {
1322 			/* Detected possible disc change. Set the bit - this */
1323 			/* may be used if there are filesystems using this device */
1324 			sdp->device->changed = 1;
1325 		}
1326 	}
1327 	/* Rely on write phase to clean out srp status values, so no "else" */
1328 
1329 	scsi_release_request(SRpnt);
1330 	SRpnt = NULL;
1331 	if (sfp->closed) {	/* whoops this fd already released, cleanup */
1332 		SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1333 		sg_finish_rem_req(srp);
1334 		srp = NULL;
1335 		if (NULL == sfp->headrp) {
1336 			SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
1337 			if (0 == sg_remove_sfp(sdp, sfp)) {	/* device still present */
1338 				scsi_device_put(sdp->device);
1339 			}
1340 			sfp = NULL;
1341 		}
1342 	} else if (srp && srp->orphan) {
1343 		if (sfp->keep_orphan)
1344 			srp->sg_io_owned = 0;
1345 		else {
1346 			sg_finish_rem_req(srp);
1347 			srp = NULL;
1348 		}
1349 	}
1350 	if (sfp && srp) {
1351 		/* Now wake up any sg_read() that is waiting for this packet. */
1352 		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1353 		write_lock_irqsave(&sfp->rq_list_lock, iflags);
1354 		srp->done = 1;
1355 		wake_up_interruptible(&sfp->read_wait);
1356 		write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1357 	}
1358 }
1359 
1360 static struct file_operations sg_fops = {
1361 	.owner = THIS_MODULE,
1362 	.read = sg_read,
1363 	.write = sg_write,
1364 	.poll = sg_poll,
1365 	.ioctl = sg_ioctl,
1366 #ifdef CONFIG_COMPAT
1367 	.compat_ioctl = sg_compat_ioctl,
1368 #endif
1369 	.open = sg_open,
1370 	.mmap = sg_mmap,
1371 	.release = sg_release,
1372 	.fasync = sg_fasync,
1373 };
1374 
1375 static struct class_simple * sg_sysfs_class;
1376 
1377 static int sg_sysfs_valid = 0;
1378 
1379 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1380 {
1381 	Sg_device *sdp;
1382 	unsigned long iflags;
1383 	void *old_sg_dev_arr = NULL;
1384 	int k, error;
1385 
1386 	sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);
1387 	if (!sdp) {
1388 		printk(KERN_WARNING "kmalloc Sg_device failure\n");
1389 		return -ENOMEM;
1390 	}
1391 
1392 	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1393 	if (unlikely(sg_nr_dev >= sg_dev_max)) {	/* try to resize */
1394 		Sg_device **tmp_da;
1395 		int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
1396 		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1397 
1398 		tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
1399 		if (unlikely(!tmp_da))
1400 			goto expand_failed;
1401 
1402 		write_lock_irqsave(&sg_dev_arr_lock, iflags);
1403 		memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
1404 		memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
1405 		old_sg_dev_arr = sg_dev_arr;
1406 		sg_dev_arr = tmp_da;
1407 		sg_dev_max = tmp_dev_max;
1408 	}
1409 
1410 	for (k = 0; k < sg_dev_max; k++)
1411 		if (!sg_dev_arr[k])
1412 			break;
1413 	if (unlikely(k >= SG_MAX_DEVS))
1414 		goto overflow;
1415 
1416 	memset(sdp, 0, sizeof(*sdp));
1417 	SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1418 	sprintf(disk->disk_name, "sg%d", k);
1419 	disk->first_minor = k;
1420 	sdp->disk = disk;
1421 	sdp->device = scsidp;
1422 	init_waitqueue_head(&sdp->o_excl_wait);
1423 	sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;
1424 
1425 	sg_nr_dev++;
1426 	sg_dev_arr[k] = sdp;
1427 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1428 	error = k;
1429 
1430  out:
1431 	if (error < 0)
1432 		kfree(sdp);
1433 	kfree(old_sg_dev_arr);
1434 	return error;
1435 
1436  expand_failed:
1437 	printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
1438 	error = -ENOMEM;
1439 	goto out;
1440 
1441  overflow:
1442 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1443 	printk(KERN_WARNING
1444 	       "Unable to attach sg device <%d, %d, %d, %d> type=%d, minor "
1445 	       "number exceeds %d\n", scsidp->host->host_no, scsidp->channel,
1446 	       scsidp->id, scsidp->lun, scsidp->type, SG_MAX_DEVS - 1);
1447 	error = -ENODEV;
1448 	goto out;
1449 }
1450 
1451 static int
1452 sg_add(struct class_device *cl_dev)
1453 {
1454 	struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1455 	struct gendisk *disk;
1456 	Sg_device *sdp = NULL;
1457 	struct cdev * cdev = NULL;
1458 	int error, k;
1459 
1460 	disk = alloc_disk(1);
1461 	if (!disk) {
1462 		printk(KERN_WARNING "alloc_disk failed\n");
1463 		return -ENOMEM;
1464 	}
1465 	disk->major = SCSI_GENERIC_MAJOR;
1466 
1467 	error = -ENOMEM;
1468 	cdev = cdev_alloc();
1469 	if (!cdev) {
1470 		printk(KERN_WARNING "cdev_alloc failed\n");
1471 		goto out;
1472 	}
1473 	cdev->owner = THIS_MODULE;
1474 	cdev->ops = &sg_fops;
1475 
1476 	error = sg_alloc(disk, scsidp);
1477 	if (error < 0) {
1478 		printk(KERN_WARNING "sg_alloc failed\n");
1479 		goto out;
1480 	}
1481 	k = error;
1482 	sdp = sg_dev_arr[k];
1483 
1484 	devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
1485 			S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
1486 			"%s/generic", scsidp->devfs_name);
1487 	error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1488 	if (error) {
1489 		devfs_remove("%s/generic", scsidp->devfs_name);
1490 		goto out;
1491 	}
1492 	sdp->cdev = cdev;
1493 	if (sg_sysfs_valid) {
1494 		struct class_device * sg_class_member;
1495 
1496 		sg_class_member = class_simple_device_add(sg_sysfs_class,
1497 				MKDEV(SCSI_GENERIC_MAJOR, k),
1498 				cl_dev->dev, "%s",
1499 				disk->disk_name);
1500 		if (IS_ERR(sg_class_member))
1501 			printk(KERN_WARNING "sg_add: "
1502 				"class_simple_device_add failed\n");
1503 		class_set_devdata(sg_class_member, sdp);
1504 		error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1505 					  &sg_class_member->kobj, "generic");
1506 		if (error)
1507 			printk(KERN_ERR "sg_add: unable to make symlink "
1508 					"'generic' back to sg%d\n", k);
1509 	} else
1510 		printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
1511 
1512 	printk(KERN_NOTICE
1513 	       "Attached scsi generic sg%d at scsi%d, channel"
1514 	       " %d, id %d, lun %d,  type %d\n", k,
1515 	       scsidp->host->host_no, scsidp->channel, scsidp->id,
1516 	       scsidp->lun, scsidp->type);
1517 
1518 	return 0;
1519 
1520 out:
1521 	put_disk(disk);
1522 	if (cdev)
1523 		cdev_del(cdev);
1524 	return error;
1525 }
1526 
1527 static void
1528 sg_remove(struct class_device *cl_dev)
1529 {
1530 	struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1531 	Sg_device *sdp = NULL;
1532 	unsigned long iflags;
1533 	Sg_fd *sfp;
1534 	Sg_fd *tsfp;
1535 	Sg_request *srp;
1536 	Sg_request *tsrp;
1537 	int k, delay;
1538 
1539 	if (NULL == sg_dev_arr)
1540 		return;
1541 	delay = 0;
1542 	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1543 	for (k = 0; k < sg_dev_max; k++) {
1544 		sdp = sg_dev_arr[k];
1545 		if ((NULL == sdp) || (sdp->device != scsidp))
1546 			continue;	/* dirty but lowers nesting */
1547 		if (sdp->headfp) {
1548 			sdp->detached = 1;
1549 			for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1550 				tsfp = sfp->nextfp;
1551 				for (srp = sfp->headrp; srp; srp = tsrp) {
1552 					tsrp = srp->nextrp;
1553 					if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1554 						sg_finish_rem_req(srp);
1555 				}
1556 				if (sfp->closed) {
1557 					scsi_device_put(sdp->device);
1558 					__sg_remove_sfp(sdp, sfp);
1559 				} else {
1560 					delay = 1;
1561 					wake_up_interruptible(&sfp->read_wait);
1562 					kill_fasync(&sfp->async_qp, SIGPOLL,
1563 						    POLL_HUP);
1564 				}
1565 			}
1566 			SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
1567 			if (NULL == sdp->headfp) {
1568 				sg_dev_arr[k] = NULL;
1569 			}
1570 		} else {	/* nothing active, simple case */
1571 			SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1572 			sg_dev_arr[k] = NULL;
1573 		}
1574 		sg_nr_dev--;
1575 		break;
1576 	}
1577 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1578 
1579 	if (sdp) {
1580 		sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1581 		class_simple_device_remove(MKDEV(SCSI_GENERIC_MAJOR, k));
1582 		cdev_del(sdp->cdev);
1583 		sdp->cdev = NULL;
1584 		devfs_remove("%s/generic", scsidp->devfs_name);
1585 		put_disk(sdp->disk);
1586 		sdp->disk = NULL;
1587 		if (NULL == sdp->headfp)
1588 			kfree((char *) sdp);
1589 	}
1590 
1591 	if (delay)
1592 		msleep(10);	/* dirty detach so delay device destruction */
1593 }
1594 
1595 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
1596  * of sysfs parameters (which module_param doesn't yet support).
1597  * Sysfs parameters defined explicitly below.
1598  */
1599 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
1600 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1601 
1602 MODULE_AUTHOR("Douglas Gilbert");
1603 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1604 MODULE_LICENSE("GPL");
1605 MODULE_VERSION(SG_VERSION_STR);
1606 
1607 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1608 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1609 
1610 static int __init
1611 init_sg(void)
1612 {
1613 	int rc;
1614 
1615 	if (def_reserved_size >= 0)
1616 		sg_big_buff = def_reserved_size;
1617 
1618 	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1619 				    SG_MAX_DEVS, "sg");
1620 	if (rc)
1621 		return rc;
1622         sg_sysfs_class = class_simple_create(THIS_MODULE, "scsi_generic");
1623         if ( IS_ERR(sg_sysfs_class) ) {
1624 		rc = PTR_ERR(sg_sysfs_class);
1625 		goto err_out;
1626         }
1627 	sg_sysfs_valid = 1;
1628 	rc = scsi_register_interface(&sg_interface);
1629 	if (0 == rc) {
1630 #ifdef CONFIG_SCSI_PROC_FS
1631 		sg_proc_init();
1632 #endif				/* CONFIG_SCSI_PROC_FS */
1633 		return 0;
1634 	}
1635 	class_simple_destroy(sg_sysfs_class);
1636 err_out:
1637 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1638 	return rc;
1639 }
1640 
1641 static void __exit
1642 exit_sg(void)
1643 {
1644 #ifdef CONFIG_SCSI_PROC_FS
1645 	sg_proc_cleanup();
1646 #endif				/* CONFIG_SCSI_PROC_FS */
1647 	scsi_unregister_interface(&sg_interface);
1648 	class_simple_destroy(sg_sysfs_class);
1649 	sg_sysfs_valid = 0;
1650 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1651 				 SG_MAX_DEVS);
1652 	if (sg_dev_arr != NULL) {
1653 		kfree((char *) sg_dev_arr);
1654 		sg_dev_arr = NULL;
1655 	}
1656 	sg_dev_max = 0;
1657 }
1658 
1659 static int
1660 sg_start_req(Sg_request * srp)
1661 {
1662 	int res;
1663 	Sg_fd *sfp = srp->parentfp;
1664 	sg_io_hdr_t *hp = &srp->header;
1665 	int dxfer_len = (int) hp->dxfer_len;
1666 	int dxfer_dir = hp->dxfer_direction;
1667 	Sg_scatter_hold *req_schp = &srp->data;
1668 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
1669 
1670 	SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1671 	if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1672 		return 0;
1673 	if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1674 	    (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1675 	    (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1676 		res = sg_build_direct(srp, sfp, dxfer_len);
1677 		if (res <= 0)	/* -ve -> error, 0 -> done, 1 -> try indirect */
1678 			return res;
1679 	}
1680 	if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1681 		sg_link_reserve(sfp, srp, dxfer_len);
1682 	else {
1683 		res = sg_build_indirect(req_schp, sfp, dxfer_len);
1684 		if (res) {
1685 			sg_remove_scat(req_schp);
1686 			return res;
1687 		}
1688 	}
1689 	return 0;
1690 }
1691 
1692 static void
1693 sg_finish_rem_req(Sg_request * srp)
1694 {
1695 	Sg_fd *sfp = srp->parentfp;
1696 	Sg_scatter_hold *req_schp = &srp->data;
1697 
1698 	SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1699 	if (srp->res_used)
1700 		sg_unlink_reserve(sfp, srp);
1701 	else
1702 		sg_remove_scat(req_schp);
1703 	sg_remove_request(sfp, srp);
1704 }
1705 
1706 static int
1707 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1708 {
1709 	int ret_sz;
1710 	int elem_sz = sizeof (struct scatterlist);
1711 	int sg_bufflen = tablesize * elem_sz;
1712 	int mx_sc_elems = tablesize;
1713 
1714 	schp->buffer = sg_page_malloc(sg_bufflen, sfp->low_dma, &ret_sz);
1715 	if (!schp->buffer)
1716 		return -ENOMEM;
1717 	else if (ret_sz != sg_bufflen) {
1718 		sg_bufflen = ret_sz;
1719 		mx_sc_elems = sg_bufflen / elem_sz;
1720 	}
1721 	schp->sglist_len = sg_bufflen;
1722 	memset(schp->buffer, 0, sg_bufflen);
1723 	return mx_sc_elems;	/* number of scat_gath elements allocated */
1724 }
1725 
1726 #ifdef SG_ALLOW_DIO_CODE
1727 /* vvvvvvvv  following code borrowed from st driver's direct IO vvvvvvvvv */
1728 	/* hopefully this generic code will moved to a library */
1729 
1730 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1731    - mapping of all pages not successful
1732    - any page is above max_pfn
1733    (i.e., either completely successful or fails)
1734 */
1735 static int
1736 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1737 	          unsigned long uaddr, size_t count, int rw,
1738 	          unsigned long max_pfn)
1739 {
1740 	int res, i, j;
1741 	unsigned int nr_pages;
1742 	struct page **pages;
1743 
1744 	nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
1745 
1746 	/* User attempted Overflow! */
1747 	if ((uaddr + count) < uaddr)
1748 		return -EINVAL;
1749 
1750 	/* Too big */
1751         if (nr_pages > max_pages)
1752 		return -ENOMEM;
1753 
1754 	/* Hmm? */
1755 	if (count == 0)
1756 		return 0;
1757 
1758 	if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1759 		return -ENOMEM;
1760 
1761         /* Try to fault in all of the necessary pages */
1762 	down_read(&current->mm->mmap_sem);
1763         /* rw==READ means read from drive, write into memory area */
1764 	res = get_user_pages(
1765 		current,
1766 		current->mm,
1767 		uaddr,
1768 		nr_pages,
1769 		rw == READ,
1770 		0, /* don't force */
1771 		pages,
1772 		NULL);
1773 	up_read(&current->mm->mmap_sem);
1774 
1775 	/* Errors and no page mapped should return here */
1776 	if (res < nr_pages)
1777 		goto out_unmap;
1778 
1779         for (i=0; i < nr_pages; i++) {
1780                 /* FIXME: flush superflous for rw==READ,
1781                  * probably wrong function for rw==WRITE
1782                  */
1783 		flush_dcache_page(pages[i]);
1784 		if (page_to_pfn(pages[i]) > max_pfn)
1785 			goto out_unlock;
1786 		/* ?? Is locking needed? I don't think so */
1787 		/* if (TestSetPageLocked(pages[i]))
1788 		   goto out_unlock; */
1789         }
1790 
1791 	/* Populate the scatter/gather list */
1792 	sgl[0].page = pages[0];
1793 	sgl[0].offset = uaddr & ~PAGE_MASK;
1794 	if (nr_pages > 1) {
1795 		sgl[0].length = PAGE_SIZE - sgl[0].offset;
1796 		count -= sgl[0].length;
1797 		for (i=1; i < nr_pages ; i++) {
1798 			sgl[i].offset = 0;
1799 			sgl[i].page = pages[i];
1800 			sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1801 			count -= PAGE_SIZE;
1802 		}
1803 	}
1804 	else {
1805 		sgl[0].length = count;
1806 	}
1807 
1808 	kfree(pages);
1809 	return nr_pages;
1810 
1811  out_unlock:
1812 	/* for (j=0; j < i; j++)
1813 	   unlock_page(pages[j]); */
1814 	res = 0;
1815  out_unmap:
1816 	if (res > 0)
1817 		for (j=0; j < res; j++)
1818 			page_cache_release(pages[j]);
1819 	kfree(pages);
1820 	return res;
1821 }
1822 
1823 
1824 /* And unmap them... */
1825 static int
1826 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1827 		    int dirtied)
1828 {
1829 	int i;
1830 
1831 	for (i=0; i < nr_pages; i++) {
1832 		if (dirtied && !PageReserved(sgl[i].page))
1833 			SetPageDirty(sgl[i].page);
1834 		/* unlock_page(sgl[i].page); */
1835 		/* FIXME: cache flush missing for rw==READ
1836 		 * FIXME: call the correct reference counting function
1837 		 */
1838 		page_cache_release(sgl[i].page);
1839 	}
1840 
1841 	return 0;
1842 }
1843 
1844 /* ^^^^^^^^  above code borrowed from st driver's direct IO ^^^^^^^^^ */
1845 #endif
1846 
1847 
1848 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1849 static int
1850 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1851 {
1852 #ifdef SG_ALLOW_DIO_CODE
1853 	sg_io_hdr_t *hp = &srp->header;
1854 	Sg_scatter_hold *schp = &srp->data;
1855 	int sg_tablesize = sfp->parentdp->sg_tablesize;
1856 	struct scatterlist *sgl;
1857 	int mx_sc_elems, res;
1858 	struct scsi_device *sdev = sfp->parentdp->device;
1859 
1860 	if (((unsigned long)hp->dxferp &
1861 			queue_dma_alignment(sdev->request_queue)) != 0)
1862 		return 1;
1863 	mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1864         if (mx_sc_elems <= 0) {
1865                 return 1;
1866         }
1867 	sgl = (struct scatterlist *)schp->buffer;
1868 	res = st_map_user_pages(sgl, mx_sc_elems, (unsigned long)hp->dxferp, dxfer_len,
1869 				(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0, ULONG_MAX);
1870 	if (res <= 0)
1871 		return 1;
1872 	schp->k_use_sg = res;
1873 	schp->dio_in_use = 1;
1874 	hp->info |= SG_INFO_DIRECT_IO;
1875 	return 0;
1876 #else
1877 	return 1;
1878 #endif
1879 }
1880 
1881 static int
1882 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1883 {
1884 	int ret_sz;
1885 	int blk_size = buff_size;
1886 	unsigned char *p = NULL;
1887 
1888 	if ((blk_size < 0) || (!sfp))
1889 		return -EFAULT;
1890 	if (0 == blk_size)
1891 		++blk_size;	/* don't know why */
1892 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1893 	blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1894 	SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1895 				   buff_size, blk_size));
1896 	if (blk_size <= SG_SCATTER_SZ) {
1897 		p = sg_page_malloc(blk_size, sfp->low_dma, &ret_sz);
1898 		if (!p)
1899 			return -ENOMEM;
1900 		if (blk_size == ret_sz) {	/* got it on the first attempt */
1901 			schp->k_use_sg = 0;
1902 			schp->buffer = p;
1903 			schp->bufflen = blk_size;
1904 			schp->b_malloc_len = blk_size;
1905 			return 0;
1906 		}
1907 	} else {
1908 		p = sg_page_malloc(SG_SCATTER_SZ, sfp->low_dma, &ret_sz);
1909 		if (!p)
1910 			return -ENOMEM;
1911 	}
1912 /* Want some local declarations, so start new block ... */
1913 	{			/* lets try and build a scatter gather list */
1914 		struct scatterlist *sclp;
1915 		int k, rem_sz, num;
1916 		int mx_sc_elems;
1917 		int sg_tablesize = sfp->parentdp->sg_tablesize;
1918 		int first = 1;
1919 
1920 		/* N.B. ret_sz carried into this block ... */
1921 		mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1922 		if (mx_sc_elems < 0)
1923 			return mx_sc_elems;	/* most likely -ENOMEM */
1924 
1925 		for (k = 0, sclp = schp->buffer, rem_sz = blk_size;
1926 		     (rem_sz > 0) && (k < mx_sc_elems);
1927 		     ++k, rem_sz -= ret_sz, ++sclp) {
1928 			if (first)
1929 				first = 0;
1930 			else {
1931 				num =
1932 				    (rem_sz >
1933 				     SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1934 				p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1935 				if (!p)
1936 					break;
1937 			}
1938 			sclp->page = virt_to_page(p);
1939 			sclp->offset = offset_in_page(p);
1940 			sclp->length = ret_sz;
1941 
1942 			SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
1943 					  k, sg_scatg2virt(sclp), ret_sz));
1944 		}		/* end of for loop */
1945 		schp->k_use_sg = k;
1946 		SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
1947 		schp->bufflen = blk_size;
1948 		if (rem_sz > 0)	/* must have failed */
1949 			return -ENOMEM;
1950 	}
1951 	return 0;
1952 }
1953 
1954 static int
1955 sg_write_xfer(Sg_request * srp)
1956 {
1957 	sg_io_hdr_t *hp = &srp->header;
1958 	Sg_scatter_hold *schp = &srp->data;
1959 	int num_xfer = 0;
1960 	int j, k, onum, usglen, ksglen, res;
1961 	int iovec_count = (int) hp->iovec_count;
1962 	int dxfer_dir = hp->dxfer_direction;
1963 	unsigned char *p;
1964 	unsigned char __user *up;
1965 	int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1966 
1967 	if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1968 	    (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1969 		num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1970 		if (schp->bufflen < num_xfer)
1971 			num_xfer = schp->bufflen;
1972 	}
1973 	if ((num_xfer <= 0) || (schp->dio_in_use) ||
1974 	    (new_interface
1975 	     && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1976 		return 0;
1977 
1978 	SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1979 			  num_xfer, iovec_count, schp->k_use_sg));
1980 	if (iovec_count) {
1981 		onum = iovec_count;
1982 		if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1983 			return -EFAULT;
1984 	} else
1985 		onum = 1;
1986 
1987 	if (0 == schp->k_use_sg) {	/* kernel has single buffer */
1988 		for (j = 0, p = schp->buffer; j < onum; ++j) {
1989 			res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1990 			if (res)
1991 				return res;
1992 			usglen = (num_xfer > usglen) ? usglen : num_xfer;
1993 			if (__copy_from_user(p, up, usglen))
1994 				return -EFAULT;
1995 			p += usglen;
1996 			num_xfer -= usglen;
1997 			if (num_xfer <= 0)
1998 				return 0;
1999 		}
2000 	} else {		/* kernel using scatter gather list */
2001 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2002 
2003 		ksglen = (int) sclp->length;
2004 		p = sg_scatg2virt(sclp);
2005 		for (j = 0, k = 0; j < onum; ++j) {
2006 			res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
2007 			if (res)
2008 				return res;
2009 
2010 			for (; p; ++sclp, ksglen = (int) sclp->length,
2011 				  p = sg_scatg2virt(sclp)) {
2012 				if (usglen <= 0)
2013 					break;
2014 				if (ksglen > usglen) {
2015 					if (usglen >= num_xfer) {
2016 						if (__copy_from_user
2017 						    (p, up, num_xfer))
2018 							return -EFAULT;
2019 						return 0;
2020 					}
2021 					if (__copy_from_user(p, up, usglen))
2022 						return -EFAULT;
2023 					p += usglen;
2024 					ksglen -= usglen;
2025 					break;
2026 				} else {
2027 					if (ksglen >= num_xfer) {
2028 						if (__copy_from_user
2029 						    (p, up, num_xfer))
2030 							return -EFAULT;
2031 						return 0;
2032 					}
2033 					if (__copy_from_user(p, up, ksglen))
2034 						return -EFAULT;
2035 					up += ksglen;
2036 					usglen -= ksglen;
2037 				}
2038 				++k;
2039 				if (k >= schp->k_use_sg)
2040 					return 0;
2041 			}
2042 		}
2043 	}
2044 	return 0;
2045 }
2046 
2047 static int
2048 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
2049 	   int wr_xf, int *countp, unsigned char __user **up)
2050 {
2051 	int num_xfer = (int) hp->dxfer_len;
2052 	unsigned char __user *p = hp->dxferp;
2053 	int count;
2054 
2055 	if (0 == sg_num) {
2056 		if (wr_xf && ('\0' == hp->interface_id))
2057 			count = (int) hp->flags;	/* holds "old" input_size */
2058 		else
2059 			count = num_xfer;
2060 	} else {
2061 		sg_iovec_t iovec;
2062 		if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2063 			return -EFAULT;
2064 		p = iovec.iov_base;
2065 		count = (int) iovec.iov_len;
2066 	}
2067 	if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2068 		return -EFAULT;
2069 	if (up)
2070 		*up = p;
2071 	if (countp)
2072 		*countp = count;
2073 	return 0;
2074 }
2075 
2076 static void
2077 sg_remove_scat(Sg_scatter_hold * schp)
2078 {
2079 	SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2080 	if (schp->buffer && (schp->sglist_len > 0)) {
2081 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2082 
2083 		if (schp->dio_in_use) {
2084 #ifdef SG_ALLOW_DIO_CODE
2085 			st_unmap_user_pages(sclp, schp->k_use_sg, TRUE);
2086 #endif
2087 		} else {
2088 			int k;
2089 
2090 			for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp);
2091 			     ++k, ++sclp) {
2092 				SCSI_LOG_TIMEOUT(5, printk(
2093 				    "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
2094 				    k, sg_scatg2virt(sclp), sclp->length));
2095 				sg_page_free(sg_scatg2virt(sclp), sclp->length);
2096 				sclp->page = NULL;
2097 				sclp->offset = 0;
2098 				sclp->length = 0;
2099 			}
2100 		}
2101 		sg_page_free(schp->buffer, schp->sglist_len);
2102 	} else if (schp->buffer)
2103 		sg_page_free(schp->buffer, schp->b_malloc_len);
2104 	memset(schp, 0, sizeof (*schp));
2105 }
2106 
2107 static int
2108 sg_read_xfer(Sg_request * srp)
2109 {
2110 	sg_io_hdr_t *hp = &srp->header;
2111 	Sg_scatter_hold *schp = &srp->data;
2112 	int num_xfer = 0;
2113 	int j, k, onum, usglen, ksglen, res;
2114 	int iovec_count = (int) hp->iovec_count;
2115 	int dxfer_dir = hp->dxfer_direction;
2116 	unsigned char *p;
2117 	unsigned char __user *up;
2118 	int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2119 
2120 	if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2121 	    || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2122 		num_xfer = hp->dxfer_len;
2123 		if (schp->bufflen < num_xfer)
2124 			num_xfer = schp->bufflen;
2125 	}
2126 	if ((num_xfer <= 0) || (schp->dio_in_use) ||
2127 	    (new_interface
2128 	     && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2129 		return 0;
2130 
2131 	SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2132 			  num_xfer, iovec_count, schp->k_use_sg));
2133 	if (iovec_count) {
2134 		onum = iovec_count;
2135 		if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2136 			return -EFAULT;
2137 	} else
2138 		onum = 1;
2139 
2140 	if (0 == schp->k_use_sg) {	/* kernel has single buffer */
2141 		for (j = 0, p = schp->buffer; j < onum; ++j) {
2142 			res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2143 			if (res)
2144 				return res;
2145 			usglen = (num_xfer > usglen) ? usglen : num_xfer;
2146 			if (__copy_to_user(up, p, usglen))
2147 				return -EFAULT;
2148 			p += usglen;
2149 			num_xfer -= usglen;
2150 			if (num_xfer <= 0)
2151 				return 0;
2152 		}
2153 	} else {		/* kernel using scatter gather list */
2154 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2155 
2156 		ksglen = (int) sclp->length;
2157 		p = sg_scatg2virt(sclp);
2158 		for (j = 0, k = 0; j < onum; ++j) {
2159 			res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2160 			if (res)
2161 				return res;
2162 
2163 			for (; p; ++sclp, ksglen = (int) sclp->length,
2164 				  p = sg_scatg2virt(sclp)) {
2165 				if (usglen <= 0)
2166 					break;
2167 				if (ksglen > usglen) {
2168 					if (usglen >= num_xfer) {
2169 						if (__copy_to_user
2170 						    (up, p, num_xfer))
2171 							return -EFAULT;
2172 						return 0;
2173 					}
2174 					if (__copy_to_user(up, p, usglen))
2175 						return -EFAULT;
2176 					p += usglen;
2177 					ksglen -= usglen;
2178 					break;
2179 				} else {
2180 					if (ksglen >= num_xfer) {
2181 						if (__copy_to_user
2182 						    (up, p, num_xfer))
2183 							return -EFAULT;
2184 						return 0;
2185 					}
2186 					if (__copy_to_user(up, p, ksglen))
2187 						return -EFAULT;
2188 					up += ksglen;
2189 					usglen -= ksglen;
2190 				}
2191 				++k;
2192 				if (k >= schp->k_use_sg)
2193 					return 0;
2194 			}
2195 		}
2196 	}
2197 	return 0;
2198 }
2199 
2200 static int
2201 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2202 {
2203 	Sg_scatter_hold *schp = &srp->data;
2204 
2205 	SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2206 				   num_read_xfer));
2207 	if ((!outp) || (num_read_xfer <= 0))
2208 		return 0;
2209 	if (schp->k_use_sg > 0) {
2210 		int k, num;
2211 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2212 
2213 		for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp);
2214 		     ++k, ++sclp) {
2215 			num = (int) sclp->length;
2216 			if (num > num_read_xfer) {
2217 				if (__copy_to_user
2218 				    (outp, sg_scatg2virt(sclp), num_read_xfer))
2219 					return -EFAULT;
2220 				break;
2221 			} else {
2222 				if (__copy_to_user
2223 				    (outp, sg_scatg2virt(sclp), num))
2224 					return -EFAULT;
2225 				num_read_xfer -= num;
2226 				if (num_read_xfer <= 0)
2227 					break;
2228 				outp += num;
2229 			}
2230 		}
2231 	} else {
2232 		if (__copy_to_user(outp, schp->buffer, num_read_xfer))
2233 			return -EFAULT;
2234 	}
2235 	return 0;
2236 }
2237 
2238 static void
2239 sg_build_reserve(Sg_fd * sfp, int req_size)
2240 {
2241 	Sg_scatter_hold *schp = &sfp->reserve;
2242 
2243 	SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2244 	do {
2245 		if (req_size < PAGE_SIZE)
2246 			req_size = PAGE_SIZE;
2247 		if (0 == sg_build_indirect(schp, sfp, req_size))
2248 			return;
2249 		else
2250 			sg_remove_scat(schp);
2251 		req_size >>= 1;	/* divide by 2 */
2252 	} while (req_size > (PAGE_SIZE / 2));
2253 }
2254 
2255 static void
2256 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2257 {
2258 	Sg_scatter_hold *req_schp = &srp->data;
2259 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2260 
2261 	srp->res_used = 1;
2262 	SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2263 	size = (size + 1) & (~1);	/* round to even for aha1542 */
2264 	if (rsv_schp->k_use_sg > 0) {
2265 		int k, num;
2266 		int rem = size;
2267 		struct scatterlist *sclp =
2268 		    (struct scatterlist *) rsv_schp->buffer;
2269 
2270 		for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
2271 			num = (int) sclp->length;
2272 			if (rem <= num) {
2273 				if (0 == k) {
2274 					req_schp->k_use_sg = 0;
2275 					req_schp->buffer = sg_scatg2virt(sclp);
2276 				} else {
2277 					sfp->save_scat_len = num;
2278 					sclp->length = (unsigned) rem;
2279 					req_schp->k_use_sg = k + 1;
2280 					req_schp->sglist_len =
2281 					    rsv_schp->sglist_len;
2282 					req_schp->buffer = rsv_schp->buffer;
2283 				}
2284 				req_schp->bufflen = size;
2285 				req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2286 				break;
2287 			} else
2288 				rem -= num;
2289 		}
2290 		if (k >= rsv_schp->k_use_sg)
2291 			SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2292 	} else {
2293 		req_schp->k_use_sg = 0;
2294 		req_schp->bufflen = size;
2295 		req_schp->buffer = rsv_schp->buffer;
2296 		req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2297 	}
2298 }
2299 
2300 static void
2301 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2302 {
2303 	Sg_scatter_hold *req_schp = &srp->data;
2304 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2305 
2306 	SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2307 				   (int) req_schp->k_use_sg));
2308 	if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2309 		struct scatterlist *sclp =
2310 		    (struct scatterlist *) rsv_schp->buffer;
2311 
2312 		if (sfp->save_scat_len > 0)
2313 			(sclp + (req_schp->k_use_sg - 1))->length =
2314 			    (unsigned) sfp->save_scat_len;
2315 		else
2316 			SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2317 	}
2318 	req_schp->k_use_sg = 0;
2319 	req_schp->bufflen = 0;
2320 	req_schp->buffer = NULL;
2321 	req_schp->sglist_len = 0;
2322 	sfp->save_scat_len = 0;
2323 	srp->res_used = 0;
2324 }
2325 
2326 static Sg_request *
2327 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2328 {
2329 	Sg_request *resp;
2330 	unsigned long iflags;
2331 
2332 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2333 	for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2334 		/* look for requests that are ready + not SG_IO owned */
2335 		if ((1 == resp->done) && (!resp->sg_io_owned) &&
2336 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2337 			resp->done = 2;	/* guard against other readers */
2338 			break;
2339 		}
2340 	}
2341 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2342 	return resp;
2343 }
2344 
2345 #ifdef CONFIG_SCSI_PROC_FS
2346 static Sg_request *
2347 sg_get_nth_request(Sg_fd * sfp, int nth)
2348 {
2349 	Sg_request *resp;
2350 	unsigned long iflags;
2351 	int k;
2352 
2353 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2354 	for (k = 0, resp = sfp->headrp; resp && (k < nth);
2355 	     ++k, resp = resp->nextrp) ;
2356 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2357 	return resp;
2358 }
2359 #endif
2360 
2361 /* always adds to end of list */
2362 static Sg_request *
2363 sg_add_request(Sg_fd * sfp)
2364 {
2365 	int k;
2366 	unsigned long iflags;
2367 	Sg_request *resp;
2368 	Sg_request *rp = sfp->req_arr;
2369 
2370 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2371 	resp = sfp->headrp;
2372 	if (!resp) {
2373 		memset(rp, 0, sizeof (Sg_request));
2374 		rp->parentfp = sfp;
2375 		resp = rp;
2376 		sfp->headrp = resp;
2377 	} else {
2378 		if (0 == sfp->cmd_q)
2379 			resp = NULL;	/* command queuing disallowed */
2380 		else {
2381 			for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2382 				if (!rp->parentfp)
2383 					break;
2384 			}
2385 			if (k < SG_MAX_QUEUE) {
2386 				memset(rp, 0, sizeof (Sg_request));
2387 				rp->parentfp = sfp;
2388 				while (resp->nextrp)
2389 					resp = resp->nextrp;
2390 				resp->nextrp = rp;
2391 				resp = rp;
2392 			} else
2393 				resp = NULL;
2394 		}
2395 	}
2396 	if (resp) {
2397 		resp->nextrp = NULL;
2398 		resp->header.duration = jiffies;
2399 		resp->my_cmdp = NULL;
2400 	}
2401 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2402 	return resp;
2403 }
2404 
2405 /* Return of 1 for found; 0 for not found */
2406 static int
2407 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2408 {
2409 	Sg_request *prev_rp;
2410 	Sg_request *rp;
2411 	unsigned long iflags;
2412 	int res = 0;
2413 
2414 	if ((!sfp) || (!srp) || (!sfp->headrp))
2415 		return res;
2416 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2417 	prev_rp = sfp->headrp;
2418 	if (srp == prev_rp) {
2419 		sfp->headrp = prev_rp->nextrp;
2420 		prev_rp->parentfp = NULL;
2421 		res = 1;
2422 	} else {
2423 		while ((rp = prev_rp->nextrp)) {
2424 			if (srp == rp) {
2425 				prev_rp->nextrp = rp->nextrp;
2426 				rp->parentfp = NULL;
2427 				res = 1;
2428 				break;
2429 			}
2430 			prev_rp = rp;
2431 		}
2432 	}
2433 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2434 	return res;
2435 }
2436 
2437 #ifdef CONFIG_SCSI_PROC_FS
2438 static Sg_fd *
2439 sg_get_nth_sfp(Sg_device * sdp, int nth)
2440 {
2441 	Sg_fd *resp;
2442 	unsigned long iflags;
2443 	int k;
2444 
2445 	read_lock_irqsave(&sg_dev_arr_lock, iflags);
2446 	for (k = 0, resp = sdp->headfp; resp && (k < nth);
2447 	     ++k, resp = resp->nextfp) ;
2448 	read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2449 	return resp;
2450 }
2451 #endif
2452 
2453 static Sg_fd *
2454 sg_add_sfp(Sg_device * sdp, int dev)
2455 {
2456 	Sg_fd *sfp;
2457 	unsigned long iflags;
2458 
2459 	sfp = (Sg_fd *) sg_page_malloc(sizeof (Sg_fd), 0, NULL);
2460 	if (!sfp)
2461 		return NULL;
2462 	memset(sfp, 0, sizeof (Sg_fd));
2463 	init_waitqueue_head(&sfp->read_wait);
2464 	rwlock_init(&sfp->rq_list_lock);
2465 
2466 	sfp->timeout = SG_DEFAULT_TIMEOUT;
2467 	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2468 	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2469 	sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2470 	    sdp->device->host->unchecked_isa_dma : 1;
2471 	sfp->cmd_q = SG_DEF_COMMAND_Q;
2472 	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2473 	sfp->parentdp = sdp;
2474 	write_lock_irqsave(&sg_dev_arr_lock, iflags);
2475 	if (!sdp->headfp)
2476 		sdp->headfp = sfp;
2477 	else {			/* add to tail of existing list */
2478 		Sg_fd *pfp = sdp->headfp;
2479 		while (pfp->nextfp)
2480 			pfp = pfp->nextfp;
2481 		pfp->nextfp = sfp;
2482 	}
2483 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2484 	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2485 	sg_build_reserve(sfp, sg_big_buff);
2486 	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
2487 			   sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2488 	return sfp;
2489 }
2490 
2491 static void
2492 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2493 {
2494 	Sg_fd *fp;
2495 	Sg_fd *prev_fp;
2496 
2497 	prev_fp = sdp->headfp;
2498 	if (sfp == prev_fp)
2499 		sdp->headfp = prev_fp->nextfp;
2500 	else {
2501 		while ((fp = prev_fp->nextfp)) {
2502 			if (sfp == fp) {
2503 				prev_fp->nextfp = fp->nextfp;
2504 				break;
2505 			}
2506 			prev_fp = fp;
2507 		}
2508 	}
2509 	if (sfp->reserve.bufflen > 0) {
2510 		SCSI_LOG_TIMEOUT(6,
2511 			printk("__sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
2512 			(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2513 		if (sfp->mmap_called)
2514 			sg_rb_correct4mmap(&sfp->reserve, 0);	/* undo correction */
2515 		sg_remove_scat(&sfp->reserve);
2516 	}
2517 	sfp->parentdp = NULL;
2518 	SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp:    sfp=0x%p\n", sfp));
2519 	sg_page_free((char *) sfp, sizeof (Sg_fd));
2520 }
2521 
2522 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2523 static int
2524 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2525 {
2526 	Sg_request *srp;
2527 	Sg_request *tsrp;
2528 	int dirty = 0;
2529 	int res = 0;
2530 
2531 	for (srp = sfp->headrp; srp; srp = tsrp) {
2532 		tsrp = srp->nextrp;
2533 		if (sg_srp_done(srp, sfp))
2534 			sg_finish_rem_req(srp);
2535 		else
2536 			++dirty;
2537 	}
2538 	if (0 == dirty) {
2539 		unsigned long iflags;
2540 
2541 		write_lock_irqsave(&sg_dev_arr_lock, iflags);
2542 		__sg_remove_sfp(sdp, sfp);
2543 		if (sdp->detached && (NULL == sdp->headfp)) {
2544 			int k, maxd;
2545 
2546 			maxd = sg_dev_max;
2547 			for (k = 0; k < maxd; ++k) {
2548 				if (sdp == sg_dev_arr[k])
2549 					break;
2550 			}
2551 			if (k < maxd)
2552 				sg_dev_arr[k] = NULL;
2553 			kfree((char *) sdp);
2554 			res = 1;
2555 		}
2556 		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2557 	} else {
2558 		/* MOD_INC's to inhibit unloading sg and associated adapter driver */
2559 		/* only bump the access_count if we actually succeeded in
2560 		 * throwing another counter on the host module */
2561 		scsi_device_get(sdp->device);	/* XXX: retval ignored? */
2562 		sfp->closed = 1;	/* flag dirty state on this fd */
2563 		SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2564 				  dirty));
2565 	}
2566 	return res;
2567 }
2568 
2569 static int
2570 sg_res_in_use(Sg_fd * sfp)
2571 {
2572 	const Sg_request *srp;
2573 	unsigned long iflags;
2574 
2575 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2576 	for (srp = sfp->headrp; srp; srp = srp->nextrp)
2577 		if (srp->res_used)
2578 			break;
2579 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2580 	return srp ? 1 : 0;
2581 }
2582 
2583 /* If retSzp==NULL want exact size or fail */
2584 static char *
2585 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2586 {
2587 	char *resp = NULL;
2588 	int page_mask;
2589 	int order, a_size;
2590 	int resSz = rqSz;
2591 
2592 	if (rqSz <= 0)
2593 		return resp;
2594 
2595 	if (lowDma)
2596 		page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
2597 	else
2598 		page_mask = GFP_ATOMIC | __GFP_NOWARN;
2599 
2600 	for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2601 	     order++, a_size <<= 1) ;
2602 	resp = (char *) __get_free_pages(page_mask, order);
2603 	while ((!resp) && order && retSzp) {
2604 		--order;
2605 		a_size >>= 1;	/* divide by 2, until PAGE_SIZE */
2606 		resp = (char *) __get_free_pages(page_mask, order);	/* try half */
2607 		resSz = a_size;
2608 	}
2609 	if (resp) {
2610 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2611 			memset(resp, 0, resSz);
2612 		if (retSzp)
2613 			*retSzp = resSz;
2614 	}
2615 	return resp;
2616 }
2617 
2618 static void
2619 sg_page_free(char *buff, int size)
2620 {
2621 	int order, a_size;
2622 
2623 	if (!buff)
2624 		return;
2625 	for (order = 0, a_size = PAGE_SIZE; a_size < size;
2626 	     order++, a_size <<= 1) ;
2627 	free_pages((unsigned long) buff, order);
2628 }
2629 
2630 #ifndef MAINTENANCE_IN_CMD
2631 #define MAINTENANCE_IN_CMD 0xa3
2632 #endif
2633 
2634 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2635 	INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2636 	READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2637 	SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2638 };
2639 
2640 static int
2641 sg_allow_access(unsigned char opcode, char dev_type)
2642 {
2643 	int k;
2644 
2645 	if (TYPE_SCANNER == dev_type)	/* TYPE_ROM maybe burner */
2646 		return 1;
2647 	for (k = 0; k < sizeof (allow_ops); ++k) {
2648 		if (opcode == allow_ops[k])
2649 			return 1;
2650 	}
2651 	return 0;
2652 }
2653 
2654 #ifdef CONFIG_SCSI_PROC_FS
2655 static int
2656 sg_last_dev(void)
2657 {
2658 	int k;
2659 	unsigned long iflags;
2660 
2661 	read_lock_irqsave(&sg_dev_arr_lock, iflags);
2662 	for (k = sg_dev_max - 1; k >= 0; --k)
2663 		if (sg_dev_arr[k] && sg_dev_arr[k]->device)
2664 			break;
2665 	read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2666 	return k + 1;		/* origin 1 */
2667 }
2668 #endif
2669 
2670 static Sg_device *
2671 sg_get_dev(int dev)
2672 {
2673 	Sg_device *sdp = NULL;
2674 	unsigned long iflags;
2675 
2676 	if (sg_dev_arr && (dev >= 0)) {
2677 		read_lock_irqsave(&sg_dev_arr_lock, iflags);
2678 		if (dev < sg_dev_max)
2679 			sdp = sg_dev_arr[dev];
2680 		read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2681 	}
2682 	return sdp;
2683 }
2684 
2685 #ifdef CONFIG_SCSI_PROC_FS
2686 
2687 static struct proc_dir_entry *sg_proc_sgp = NULL;
2688 
2689 static char sg_proc_sg_dirname[] = "scsi/sg";
2690 
2691 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2692 
2693 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2694 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2695 			          size_t count, loff_t *off);
2696 static struct file_operations adio_fops = {
2697 	/* .owner, .read and .llseek added in sg_proc_init() */
2698 	.open = sg_proc_single_open_adio,
2699 	.write = sg_proc_write_adio,
2700 	.release = single_release,
2701 };
2702 
2703 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2704 static ssize_t sg_proc_write_dressz(struct file *filp,
2705 		const char __user *buffer, size_t count, loff_t *off);
2706 static struct file_operations dressz_fops = {
2707 	.open = sg_proc_single_open_dressz,
2708 	.write = sg_proc_write_dressz,
2709 	.release = single_release,
2710 };
2711 
2712 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2713 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2714 static struct file_operations version_fops = {
2715 	.open = sg_proc_single_open_version,
2716 	.release = single_release,
2717 };
2718 
2719 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2720 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2721 static struct file_operations devhdr_fops = {
2722 	.open = sg_proc_single_open_devhdr,
2723 	.release = single_release,
2724 };
2725 
2726 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2727 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2728 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2729 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2730 static void dev_seq_stop(struct seq_file *s, void *v);
2731 static struct file_operations dev_fops = {
2732 	.open = sg_proc_open_dev,
2733 	.release = seq_release,
2734 };
2735 static struct seq_operations dev_seq_ops = {
2736 	.start = dev_seq_start,
2737 	.next  = dev_seq_next,
2738 	.stop  = dev_seq_stop,
2739 	.show  = sg_proc_seq_show_dev,
2740 };
2741 
2742 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2743 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2744 static struct file_operations devstrs_fops = {
2745 	.open = sg_proc_open_devstrs,
2746 	.release = seq_release,
2747 };
2748 static struct seq_operations devstrs_seq_ops = {
2749 	.start = dev_seq_start,
2750 	.next  = dev_seq_next,
2751 	.stop  = dev_seq_stop,
2752 	.show  = sg_proc_seq_show_devstrs,
2753 };
2754 
2755 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2756 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2757 static struct file_operations debug_fops = {
2758 	.open = sg_proc_open_debug,
2759 	.release = seq_release,
2760 };
2761 static struct seq_operations debug_seq_ops = {
2762 	.start = dev_seq_start,
2763 	.next  = dev_seq_next,
2764 	.stop  = dev_seq_stop,
2765 	.show  = sg_proc_seq_show_debug,
2766 };
2767 
2768 
2769 struct sg_proc_leaf {
2770 	const char * name;
2771 	struct file_operations * fops;
2772 };
2773 
2774 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2775 	{"allow_dio", &adio_fops},
2776 	{"debug", &debug_fops},
2777 	{"def_reserved_size", &dressz_fops},
2778 	{"device_hdr", &devhdr_fops},
2779 	{"devices", &dev_fops},
2780 	{"device_strs", &devstrs_fops},
2781 	{"version", &version_fops}
2782 };
2783 
2784 static int
2785 sg_proc_init(void)
2786 {
2787 	int k, mask;
2788 	int num_leaves =
2789 	    sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2790 	struct proc_dir_entry *pdep;
2791 	struct sg_proc_leaf * leaf;
2792 
2793 	sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname,
2794 					S_IFDIR | S_IRUGO | S_IXUGO, NULL);
2795 	if (!sg_proc_sgp)
2796 		return 1;
2797 	for (k = 0; k < num_leaves; ++k) {
2798 		leaf = &sg_proc_leaf_arr[k];
2799 		mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2800 		pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
2801 		if (pdep) {
2802 			leaf->fops->owner = THIS_MODULE,
2803 			leaf->fops->read = seq_read,
2804 			leaf->fops->llseek = seq_lseek,
2805 			pdep->proc_fops = leaf->fops;
2806 		}
2807 	}
2808 	return 0;
2809 }
2810 
2811 static void
2812 sg_proc_cleanup(void)
2813 {
2814 	int k;
2815 	int num_leaves =
2816 	    sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2817 
2818 	if (!sg_proc_sgp)
2819 		return;
2820 	for (k = 0; k < num_leaves; ++k)
2821 		remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2822 	remove_proc_entry(sg_proc_sg_dirname, NULL);
2823 }
2824 
2825 
2826 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2827 {
2828 	seq_printf(s, "%d\n", *((int *)s->private));
2829 	return 0;
2830 }
2831 
2832 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2833 {
2834 	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2835 }
2836 
2837 static ssize_t
2838 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2839 		   size_t count, loff_t *off)
2840 {
2841 	int num;
2842 	char buff[11];
2843 
2844 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2845 		return -EACCES;
2846 	num = (count < 10) ? count : 10;
2847 	if (copy_from_user(buff, buffer, num))
2848 		return -EFAULT;
2849 	buff[num] = '\0';
2850 	sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2851 	return count;
2852 }
2853 
2854 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2855 {
2856 	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2857 }
2858 
2859 static ssize_t
2860 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2861 		     size_t count, loff_t *off)
2862 {
2863 	int num;
2864 	unsigned long k = ULONG_MAX;
2865 	char buff[11];
2866 
2867 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2868 		return -EACCES;
2869 	num = (count < 10) ? count : 10;
2870 	if (copy_from_user(buff, buffer, num))
2871 		return -EFAULT;
2872 	buff[num] = '\0';
2873 	k = simple_strtoul(buff, NULL, 10);
2874 	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
2875 		sg_big_buff = k;
2876 		return count;
2877 	}
2878 	return -ERANGE;
2879 }
2880 
2881 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2882 {
2883 	seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2884 		   sg_version_date);
2885 	return 0;
2886 }
2887 
2888 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2889 {
2890 	return single_open(file, sg_proc_seq_show_version, NULL);
2891 }
2892 
2893 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2894 {
2895 	seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2896 		   "online\n");
2897 	return 0;
2898 }
2899 
2900 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2901 {
2902 	return single_open(file, sg_proc_seq_show_devhdr, NULL);
2903 }
2904 
2905 struct sg_proc_deviter {
2906 	loff_t	index;
2907 	size_t	max;
2908 };
2909 
2910 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2911 {
2912 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2913 
2914 	if (! it)
2915 		return NULL;
2916 	if (NULL == sg_dev_arr)
2917 		goto err1;
2918 	it->index = *pos;
2919 	it->max = sg_last_dev();
2920 	if (it->index >= it->max)
2921 		goto err1;
2922 	return it;
2923 err1:
2924 	kfree(it);
2925 	return NULL;
2926 }
2927 
2928 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2929 {
2930 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2931 
2932 	*pos = ++it->index;
2933 	return (it->index < it->max) ? it : NULL;
2934 }
2935 
2936 static void dev_seq_stop(struct seq_file *s, void *v)
2937 {
2938 	kfree (v);
2939 }
2940 
2941 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2942 {
2943         return seq_open(file, &dev_seq_ops);
2944 }
2945 
2946 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2947 {
2948 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2949 	Sg_device *sdp;
2950 	struct scsi_device *scsidp;
2951 
2952 	sdp = it ? sg_get_dev(it->index) : NULL;
2953 	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2954 		seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2955 			      scsidp->host->host_no, scsidp->channel,
2956 			      scsidp->id, scsidp->lun, (int) scsidp->type,
2957 			      1,
2958 			      (int) scsidp->queue_depth,
2959 			      (int) scsidp->device_busy,
2960 			      (int) scsi_device_online(scsidp));
2961 	else
2962 		seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2963 	return 0;
2964 }
2965 
2966 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2967 {
2968         return seq_open(file, &devstrs_seq_ops);
2969 }
2970 
2971 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2972 {
2973 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2974 	Sg_device *sdp;
2975 	struct scsi_device *scsidp;
2976 
2977 	sdp = it ? sg_get_dev(it->index) : NULL;
2978 	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2979 		seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2980 			   scsidp->vendor, scsidp->model, scsidp->rev);
2981 	else
2982 		seq_printf(s, "<no active device>\n");
2983 	return 0;
2984 }
2985 
2986 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2987 {
2988 	int k, m, new_interface, blen, usg;
2989 	Sg_request *srp;
2990 	Sg_fd *fp;
2991 	const sg_io_hdr_t *hp;
2992 	const char * cp;
2993 
2994 	for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2995 		seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
2996 			   "(res)sgat=%d low_dma=%d\n", k + 1,
2997 			   jiffies_to_msecs(fp->timeout),
2998 			   fp->reserve.bufflen,
2999 			   (int) fp->reserve.k_use_sg,
3000 			   (int) fp->low_dma);
3001 		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
3002 			   (int) fp->cmd_q, (int) fp->force_packid,
3003 			   (int) fp->keep_orphan, (int) fp->closed);
3004 		for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
3005 			hp = &srp->header;
3006 			new_interface = (hp->interface_id == '\0') ? 0 : 1;
3007 			if (srp->res_used) {
3008 				if (new_interface &&
3009 				    (SG_FLAG_MMAP_IO & hp->flags))
3010 					cp = "     mmap>> ";
3011 				else
3012 					cp = "     rb>> ";
3013 			} else {
3014 				if (SG_INFO_DIRECT_IO_MASK & hp->info)
3015 					cp = "     dio>> ";
3016 				else
3017 					cp = "     ";
3018 			}
3019 			seq_printf(s, cp);
3020 			blen = srp->my_cmdp ?
3021 				srp->my_cmdp->sr_bufflen : srp->data.bufflen;
3022 			usg = srp->my_cmdp ?
3023 				srp->my_cmdp->sr_use_sg : srp->data.k_use_sg;
3024 			seq_printf(s, srp->done ?
3025 				   ((1 == srp->done) ?  "rcv:" : "fin:")
3026 				   : (srp->my_cmdp ? "act:" : "prior:"));
3027 			seq_printf(s, " id=%d blen=%d",
3028 				   srp->header.pack_id, blen);
3029 			if (srp->done)
3030 				seq_printf(s, " dur=%d", hp->duration);
3031 			else
3032 				seq_printf(s, " t_o/elap=%d/%d",
3033 				  new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout),
3034 				  jiffies_to_msecs(hp->duration ? (jiffies - hp->duration) : 0));
3035 			seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
3036 				   (int) srp->data.cmd_opcode);
3037 		}
3038 		if (0 == m)
3039 			seq_printf(s, "     No requests active\n");
3040 	}
3041 }
3042 
3043 static int sg_proc_open_debug(struct inode *inode, struct file *file)
3044 {
3045         return seq_open(file, &debug_seq_ops);
3046 }
3047 
3048 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
3049 {
3050 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3051 	Sg_device *sdp;
3052 
3053 	if (it && (0 == it->index)) {
3054 		seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
3055 			   "(origin 1)\n", sg_dev_max, (int)it->max);
3056 		seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
3057 	}
3058 	sdp = it ? sg_get_dev(it->index) : NULL;
3059 	if (sdp) {
3060 		struct scsi_device *scsidp = sdp->device;
3061 
3062 		if (NULL == scsidp) {
3063 			seq_printf(s, "device %d detached ??\n",
3064 				   (int)it->index);
3065 			return 0;
3066 		}
3067 
3068 		if (sg_get_nth_sfp(sdp, 0)) {
3069 			seq_printf(s, " >>> device=%s ",
3070 				sdp->disk->disk_name);
3071 			if (sdp->detached)
3072 				seq_printf(s, "detached pending close ");
3073 			else
3074 				seq_printf
3075 				    (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
3076 				     scsidp->host->host_no,
3077 				     scsidp->channel, scsidp->id,
3078 				     scsidp->lun,
3079 				     scsidp->host->hostt->emulated);
3080 			seq_printf(s, " sg_tablesize=%d excl=%d\n",
3081 				   sdp->sg_tablesize, sdp->exclude);
3082 		}
3083 		sg_proc_debug_helper(s, sdp);
3084 	}
3085 	return 0;
3086 }
3087 
3088 #endif				/* CONFIG_SCSI_PROC_FS */
3089 
3090 module_init(init_sg);
3091 module_exit(exit_sg);
3092 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
3093