1 /* 2 * History: 3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), 4 * to allow user process control of SCSI devices. 5 * Development Sponsored by Killy Corp. NY NY 6 * 7 * Original driver (sg.c): 8 * Copyright (C) 1992 Lawrence Foard 9 * Version 2 and 3 extensions to driver: 10 * Copyright (C) 1998 - 2005 Douglas Gilbert 11 * 12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 */ 20 21 static int sg_version_num = 30534; /* 2 digits for each component */ 22 #define SG_VERSION_STR "3.5.34" 23 24 /* 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First 27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING 28 * (otherwise the macros compile to empty statements). 29 * 30 */ 31 #include <linux/module.h> 32 33 #include <linux/fs.h> 34 #include <linux/kernel.h> 35 #include <linux/sched.h> 36 #include <linux/string.h> 37 #include <linux/mm.h> 38 #include <linux/errno.h> 39 #include <linux/mtio.h> 40 #include <linux/ioctl.h> 41 #include <linux/fcntl.h> 42 #include <linux/init.h> 43 #include <linux/poll.h> 44 #include <linux/moduleparam.h> 45 #include <linux/cdev.h> 46 #include <linux/seq_file.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/scatterlist.h> 50 51 #include "scsi.h" 52 #include <scsi/scsi_dbg.h> 53 #include <scsi/scsi_host.h> 54 #include <scsi/scsi_driver.h> 55 #include <scsi/scsi_ioctl.h> 56 #include <scsi/sg.h> 57 58 #include "scsi_logging.h" 59 60 #ifdef CONFIG_SCSI_PROC_FS 61 #include <linux/proc_fs.h> 62 static char *sg_version_date = "20061027"; 63 64 static int sg_proc_init(void); 65 static void sg_proc_cleanup(void); 66 #endif 67 68 #define SG_ALLOW_DIO_DEF 0 69 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */ 70 71 #define SG_MAX_DEVS 32768 72 73 /* 74 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) 75 * Then when using 32 bit integers x * m may overflow during the calculation. 76 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m 77 * calculates the same, but prevents the overflow when both m and d 78 * are "small" numbers (like HZ and USER_HZ). 79 * Of course an overflow is inavoidable if the result of muldiv doesn't fit 80 * in 32 bits. 81 */ 82 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) 83 84 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) 85 86 int sg_big_buff = SG_DEF_RESERVED_SIZE; 87 /* N.B. This variable is readable and writeable via 88 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer 89 of this size (or less if there is not enough memory) will be reserved 90 for use by this file descriptor. [Deprecated usage: this variable is also 91 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into 92 the kernel (i.e. it is not a module).] */ 93 static int def_reserved_size = -1; /* picks up init parameter */ 94 static int sg_allow_dio = SG_ALLOW_DIO_DEF; 95 96 static int scatter_elem_sz = SG_SCATTER_SZ; 97 static int scatter_elem_sz_prev = SG_SCATTER_SZ; 98 99 #define SG_SECTOR_SZ 512 100 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) 101 102 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */ 103 104 static int sg_add(struct class_device *, struct class_interface *); 105 static void sg_remove(struct class_device *, struct class_interface *); 106 107 static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock 108 file descriptor list for device */ 109 110 static struct class_interface sg_interface = { 111 .add = sg_add, 112 .remove = sg_remove, 113 }; 114 115 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 117 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */ 119 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 120 struct scatterlist *buffer;/* scatter list */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 122 unsigned char cmd_opcode; /* first byte of command */ 123 } Sg_scatter_hold; 124 125 struct sg_device; /* forward declarations */ 126 struct sg_fd; 127 128 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ 129 struct sg_request *nextrp; /* NULL -> tail request (slist) */ 130 struct sg_fd *parentfp; /* NULL -> not in use */ 131 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ 132 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ 133 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; 134 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */ 138 } Sg_request; 139 140 typedef struct sg_fd { /* holds the state of a file descriptor */ 141 struct sg_fd *nextfp; /* NULL when last opened fd on this device */ 142 struct sg_device *parentdp; /* owning device */ 143 wait_queue_head_t read_wait; /* queue read until command done */ 144 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 145 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ 146 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ 147 Sg_scatter_hold reserve; /* buffer held for this file descriptor */ 148 unsigned save_scat_len; /* original length of trunc. scat. element */ 149 Sg_request *headrp; /* head of request slist, NULL->empty */ 150 struct fasync_struct *async_qp; /* used by asynchronous notification */ 151 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ 152 char low_dma; /* as in parent but possibly overridden to 1 */ 153 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ 154 volatile char closed; /* 1 -> fd closed but request(s) outstanding */ 155 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ 156 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ 157 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ 158 char mmap_called; /* 0 -> mmap() never called on this fd */ 159 } Sg_fd; 160 161 typedef struct sg_device { /* holds the state of each scsi generic device */ 162 struct scsi_device *device; 163 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 164 int sg_tablesize; /* adapter's max scatter-gather table size */ 165 Sg_fd *headfp; /* first open fd belonging to this device */ 166 volatile char detached; /* 0->attached, 1->detached pending removal */ 167 volatile char exclude; /* opened for exclusive access */ 168 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 169 struct gendisk *disk; 170 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 171 } Sg_device; 172 173 static int sg_fasync(int fd, struct file *filp, int mode); 174 /* tasklet or soft irq callback */ 175 static void sg_cmd_done(void *data, char *sense, int result, int resid); 176 static int sg_start_req(Sg_request * srp); 177 static void sg_finish_rem_req(Sg_request * srp); 178 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 179 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 180 int tablesize); 181 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 182 Sg_request * srp); 183 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 184 int blocking, int read_only, Sg_request ** o_srp); 185 static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 186 unsigned char *cmnd, int timeout, int blocking); 187 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 188 int wr_xf, int *countp, unsigned char __user **up); 189 static int sg_write_xfer(Sg_request * srp); 190 static int sg_read_xfer(Sg_request * srp); 191 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 192 static void sg_remove_scat(Sg_scatter_hold * schp); 193 static void sg_build_reserve(Sg_fd * sfp, int req_size); 194 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 195 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 196 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp); 197 static void sg_page_free(struct page *page, int size); 198 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 199 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 200 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 201 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); 202 static Sg_request *sg_add_request(Sg_fd * sfp); 203 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 204 static int sg_res_in_use(Sg_fd * sfp); 205 static int sg_allow_access(unsigned char opcode, char dev_type); 206 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); 207 static Sg_device *sg_get_dev(int dev); 208 #ifdef CONFIG_SCSI_PROC_FS 209 static int sg_last_dev(void); 210 #endif 211 212 static Sg_device **sg_dev_arr = NULL; 213 static int sg_dev_max; 214 static int sg_nr_dev; 215 216 #define SZ_SG_HEADER sizeof(struct sg_header) 217 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) 218 #define SZ_SG_IOVEC sizeof(sg_iovec_t) 219 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 220 221 static int 222 sg_open(struct inode *inode, struct file *filp) 223 { 224 int dev = iminor(inode); 225 int flags = filp->f_flags; 226 struct request_queue *q; 227 Sg_device *sdp; 228 Sg_fd *sfp; 229 int res; 230 int retval; 231 232 nonseekable_open(inode, filp); 233 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 234 sdp = sg_get_dev(dev); 235 if ((!sdp) || (!sdp->device)) 236 return -ENXIO; 237 if (sdp->detached) 238 return -ENODEV; 239 240 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 241 /* Prevent the device driver from vanishing while we sleep */ 242 retval = scsi_device_get(sdp->device); 243 if (retval) 244 return retval; 245 246 if (!((flags & O_NONBLOCK) || 247 scsi_block_when_processing_errors(sdp->device))) { 248 retval = -ENXIO; 249 /* we are in error recovery for this device */ 250 goto error_out; 251 } 252 253 if (flags & O_EXCL) { 254 if (O_RDONLY == (flags & O_ACCMODE)) { 255 retval = -EPERM; /* Can't lock it with read only access */ 256 goto error_out; 257 } 258 if (sdp->headfp && (flags & O_NONBLOCK)) { 259 retval = -EBUSY; 260 goto error_out; 261 } 262 res = 0; 263 __wait_event_interruptible(sdp->o_excl_wait, 264 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); 265 if (res) { 266 retval = res; /* -ERESTARTSYS because signal hit process */ 267 goto error_out; 268 } 269 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */ 270 if (flags & O_NONBLOCK) { 271 retval = -EBUSY; 272 goto error_out; 273 } 274 res = 0; 275 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude), 276 res); 277 if (res) { 278 retval = res; /* -ERESTARTSYS because signal hit process */ 279 goto error_out; 280 } 281 } 282 if (sdp->detached) { 283 retval = -ENODEV; 284 goto error_out; 285 } 286 if (!sdp->headfp) { /* no existing opens on this device */ 287 sdp->sgdebug = 0; 288 q = sdp->device->request_queue; 289 sdp->sg_tablesize = min(q->max_hw_segments, 290 q->max_phys_segments); 291 } 292 if ((sfp = sg_add_sfp(sdp, dev))) 293 filp->private_data = sfp; 294 else { 295 if (flags & O_EXCL) 296 sdp->exclude = 0; /* undo if error */ 297 retval = -ENOMEM; 298 goto error_out; 299 } 300 return 0; 301 302 error_out: 303 scsi_device_put(sdp->device); 304 return retval; 305 } 306 307 /* Following function was formerly called 'sg_close' */ 308 static int 309 sg_release(struct inode *inode, struct file *filp) 310 { 311 Sg_device *sdp; 312 Sg_fd *sfp; 313 314 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 315 return -ENXIO; 316 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 317 sg_fasync(-1, filp, 0); /* remove filp from async notification list */ 318 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 319 if (!sdp->detached) { 320 scsi_device_put(sdp->device); 321 } 322 sdp->exclude = 0; 323 wake_up_interruptible(&sdp->o_excl_wait); 324 } 325 return 0; 326 } 327 328 static ssize_t 329 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) 330 { 331 Sg_device *sdp; 332 Sg_fd *sfp; 333 Sg_request *srp; 334 int req_pack_id = -1; 335 sg_io_hdr_t *hp; 336 struct sg_header *old_hdr = NULL; 337 int retval = 0; 338 339 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 340 return -ENXIO; 341 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", 342 sdp->disk->disk_name, (int) count)); 343 344 if (!access_ok(VERIFY_WRITE, buf, count)) 345 return -EFAULT; 346 if (sfp->force_packid && (count >= SZ_SG_HEADER)) { 347 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 348 if (!old_hdr) 349 return -ENOMEM; 350 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { 351 retval = -EFAULT; 352 goto free_old_hdr; 353 } 354 if (old_hdr->reply_len < 0) { 355 if (count >= SZ_SG_IO_HDR) { 356 sg_io_hdr_t *new_hdr; 357 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); 358 if (!new_hdr) { 359 retval = -ENOMEM; 360 goto free_old_hdr; 361 } 362 retval =__copy_from_user 363 (new_hdr, buf, SZ_SG_IO_HDR); 364 req_pack_id = new_hdr->pack_id; 365 kfree(new_hdr); 366 if (retval) { 367 retval = -EFAULT; 368 goto free_old_hdr; 369 } 370 } 371 } else 372 req_pack_id = old_hdr->pack_id; 373 } 374 srp = sg_get_rq_mark(sfp, req_pack_id); 375 if (!srp) { /* now wait on packet to arrive */ 376 if (sdp->detached) { 377 retval = -ENODEV; 378 goto free_old_hdr; 379 } 380 if (filp->f_flags & O_NONBLOCK) { 381 retval = -EAGAIN; 382 goto free_old_hdr; 383 } 384 while (1) { 385 retval = 0; /* following macro beats race condition */ 386 __wait_event_interruptible(sfp->read_wait, 387 (sdp->detached || 388 (srp = sg_get_rq_mark(sfp, req_pack_id))), 389 retval); 390 if (sdp->detached) { 391 retval = -ENODEV; 392 goto free_old_hdr; 393 } 394 if (0 == retval) 395 break; 396 397 /* -ERESTARTSYS as signal hit process */ 398 goto free_old_hdr; 399 } 400 } 401 if (srp->header.interface_id != '\0') { 402 retval = sg_new_read(sfp, buf, count, srp); 403 goto free_old_hdr; 404 } 405 406 hp = &srp->header; 407 if (old_hdr == NULL) { 408 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 409 if (! old_hdr) { 410 retval = -ENOMEM; 411 goto free_old_hdr; 412 } 413 } 414 memset(old_hdr, 0, SZ_SG_HEADER); 415 old_hdr->reply_len = (int) hp->timeout; 416 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ 417 old_hdr->pack_id = hp->pack_id; 418 old_hdr->twelve_byte = 419 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; 420 old_hdr->target_status = hp->masked_status; 421 old_hdr->host_status = hp->host_status; 422 old_hdr->driver_status = hp->driver_status; 423 if ((CHECK_CONDITION & hp->masked_status) || 424 (DRIVER_SENSE & hp->driver_status)) 425 memcpy(old_hdr->sense_buffer, srp->sense_b, 426 sizeof (old_hdr->sense_buffer)); 427 switch (hp->host_status) { 428 /* This setup of 'result' is for backward compatibility and is best 429 ignored by the user who should use target, host + driver status */ 430 case DID_OK: 431 case DID_PASSTHROUGH: 432 case DID_SOFT_ERROR: 433 old_hdr->result = 0; 434 break; 435 case DID_NO_CONNECT: 436 case DID_BUS_BUSY: 437 case DID_TIME_OUT: 438 old_hdr->result = EBUSY; 439 break; 440 case DID_BAD_TARGET: 441 case DID_ABORT: 442 case DID_PARITY: 443 case DID_RESET: 444 case DID_BAD_INTR: 445 old_hdr->result = EIO; 446 break; 447 case DID_ERROR: 448 old_hdr->result = (srp->sense_b[0] == 0 && 449 hp->masked_status == GOOD) ? 0 : EIO; 450 break; 451 default: 452 old_hdr->result = EIO; 453 break; 454 } 455 456 /* Now copy the result back to the user buffer. */ 457 if (count >= SZ_SG_HEADER) { 458 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { 459 retval = -EFAULT; 460 goto free_old_hdr; 461 } 462 buf += SZ_SG_HEADER; 463 if (count > old_hdr->reply_len) 464 count = old_hdr->reply_len; 465 if (count > SZ_SG_HEADER) { 466 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { 467 retval = -EFAULT; 468 goto free_old_hdr; 469 } 470 } 471 } else 472 count = (old_hdr->result == 0) ? 0 : -EIO; 473 sg_finish_rem_req(srp); 474 retval = count; 475 free_old_hdr: 476 kfree(old_hdr); 477 return retval; 478 } 479 480 static ssize_t 481 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 482 { 483 sg_io_hdr_t *hp = &srp->header; 484 int err = 0; 485 int len; 486 487 if (count < SZ_SG_IO_HDR) { 488 err = -EINVAL; 489 goto err_out; 490 } 491 hp->sb_len_wr = 0; 492 if ((hp->mx_sb_len > 0) && hp->sbp) { 493 if ((CHECK_CONDITION & hp->masked_status) || 494 (DRIVER_SENSE & hp->driver_status)) { 495 int sb_len = SCSI_SENSE_BUFFERSIZE; 496 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; 497 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ 498 len = (len > sb_len) ? sb_len : len; 499 if (copy_to_user(hp->sbp, srp->sense_b, len)) { 500 err = -EFAULT; 501 goto err_out; 502 } 503 hp->sb_len_wr = len; 504 } 505 } 506 if (hp->masked_status || hp->host_status || hp->driver_status) 507 hp->info |= SG_INFO_CHECK; 508 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { 509 err = -EFAULT; 510 goto err_out; 511 } 512 err = sg_read_xfer(srp); 513 err_out: 514 sg_finish_rem_req(srp); 515 return (0 == err) ? count : err; 516 } 517 518 static ssize_t 519 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) 520 { 521 int mxsize, cmd_size, k; 522 int input_size, blocking; 523 unsigned char opcode; 524 Sg_device *sdp; 525 Sg_fd *sfp; 526 Sg_request *srp; 527 struct sg_header old_hdr; 528 sg_io_hdr_t *hp; 529 unsigned char cmnd[MAX_COMMAND_SIZE]; 530 531 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 532 return -ENXIO; 533 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", 534 sdp->disk->disk_name, (int) count)); 535 if (sdp->detached) 536 return -ENODEV; 537 if (!((filp->f_flags & O_NONBLOCK) || 538 scsi_block_when_processing_errors(sdp->device))) 539 return -ENXIO; 540 541 if (!access_ok(VERIFY_READ, buf, count)) 542 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 543 if (count < SZ_SG_HEADER) 544 return -EIO; 545 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) 546 return -EFAULT; 547 blocking = !(filp->f_flags & O_NONBLOCK); 548 if (old_hdr.reply_len < 0) 549 return sg_new_write(sfp, buf, count, blocking, 0, NULL); 550 if (count < (SZ_SG_HEADER + 6)) 551 return -EIO; /* The minimum scsi command length is 6 bytes. */ 552 553 if (!(srp = sg_add_request(sfp))) { 554 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); 555 return -EDOM; 556 } 557 buf += SZ_SG_HEADER; 558 __get_user(opcode, buf); 559 if (sfp->next_cmd_len > 0) { 560 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { 561 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); 562 sfp->next_cmd_len = 0; 563 sg_remove_request(sfp, srp); 564 return -EIO; 565 } 566 cmd_size = sfp->next_cmd_len; 567 sfp->next_cmd_len = 0; /* reset so only this write() effected */ 568 } else { 569 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ 570 if ((opcode >= 0xc0) && old_hdr.twelve_byte) 571 cmd_size = 12; 572 } 573 SCSI_LOG_TIMEOUT(4, printk( 574 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); 575 /* Determine buffer size. */ 576 input_size = count - cmd_size; 577 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; 578 mxsize -= SZ_SG_HEADER; 579 input_size -= SZ_SG_HEADER; 580 if (input_size < 0) { 581 sg_remove_request(sfp, srp); 582 return -EIO; /* User did not pass enough bytes for this command. */ 583 } 584 hp = &srp->header; 585 hp->interface_id = '\0'; /* indicator of old interface tunnelled */ 586 hp->cmd_len = (unsigned char) cmd_size; 587 hp->iovec_count = 0; 588 hp->mx_sb_len = 0; 589 if (input_size > 0) 590 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? 591 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; 592 else 593 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 594 hp->dxfer_len = mxsize; 595 hp->dxferp = (char __user *)buf + cmd_size; 596 hp->sbp = NULL; 597 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 598 hp->flags = input_size; /* structure abuse ... */ 599 hp->pack_id = old_hdr.pack_id; 600 hp->usr_ptr = NULL; 601 if (__copy_from_user(cmnd, buf, cmd_size)) 602 return -EFAULT; 603 /* 604 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, 605 * but is is possible that the app intended SG_DXFER_TO_DEV, because there 606 * is a non-zero input_size, so emit a warning. 607 */ 608 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) 609 if (printk_ratelimit()) 610 printk(KERN_WARNING 611 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" 612 "guessing data in;\n" KERN_WARNING " " 613 "program %s not setting count and/or reply_len properly\n", 614 old_hdr.reply_len - (int)SZ_SG_HEADER, 615 input_size, (unsigned int) cmnd[0], 616 current->comm); 617 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); 618 return (k < 0) ? k : count; 619 } 620 621 static ssize_t 622 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 623 int blocking, int read_only, Sg_request ** o_srp) 624 { 625 int k; 626 Sg_request *srp; 627 sg_io_hdr_t *hp; 628 unsigned char cmnd[MAX_COMMAND_SIZE]; 629 int timeout; 630 unsigned long ul_timeout; 631 632 if (count < SZ_SG_IO_HDR) 633 return -EINVAL; 634 if (!access_ok(VERIFY_READ, buf, count)) 635 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 636 637 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ 638 if (!(srp = sg_add_request(sfp))) { 639 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); 640 return -EDOM; 641 } 642 hp = &srp->header; 643 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { 644 sg_remove_request(sfp, srp); 645 return -EFAULT; 646 } 647 if (hp->interface_id != 'S') { 648 sg_remove_request(sfp, srp); 649 return -ENOSYS; 650 } 651 if (hp->flags & SG_FLAG_MMAP_IO) { 652 if (hp->dxfer_len > sfp->reserve.bufflen) { 653 sg_remove_request(sfp, srp); 654 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ 655 } 656 if (hp->flags & SG_FLAG_DIRECT_IO) { 657 sg_remove_request(sfp, srp); 658 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ 659 } 660 if (sg_res_in_use(sfp)) { 661 sg_remove_request(sfp, srp); 662 return -EBUSY; /* reserve buffer already being used */ 663 } 664 } 665 ul_timeout = msecs_to_jiffies(srp->header.timeout); 666 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; 667 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { 668 sg_remove_request(sfp, srp); 669 return -EMSGSIZE; 670 } 671 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { 672 sg_remove_request(sfp, srp); 673 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 674 } 675 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { 676 sg_remove_request(sfp, srp); 677 return -EFAULT; 678 } 679 if (read_only && 680 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) { 681 sg_remove_request(sfp, srp); 682 return -EPERM; 683 } 684 k = sg_common_write(sfp, srp, cmnd, timeout, blocking); 685 if (k < 0) 686 return k; 687 if (o_srp) 688 *o_srp = srp; 689 return count; 690 } 691 692 static int 693 sg_common_write(Sg_fd * sfp, Sg_request * srp, 694 unsigned char *cmnd, int timeout, int blocking) 695 { 696 int k, data_dir; 697 Sg_device *sdp = sfp->parentdp; 698 sg_io_hdr_t *hp = &srp->header; 699 700 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ 701 hp->status = 0; 702 hp->masked_status = 0; 703 hp->msg_status = 0; 704 hp->info = 0; 705 hp->host_status = 0; 706 hp->driver_status = 0; 707 hp->resid = 0; 708 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 709 (int) cmnd[0], (int) hp->cmd_len)); 710 711 if ((k = sg_start_req(srp))) { 712 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 713 sg_finish_rem_req(srp); 714 return k; /* probably out of space --> ENOMEM */ 715 } 716 if ((k = sg_write_xfer(srp))) { 717 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n")); 718 sg_finish_rem_req(srp); 719 return k; 720 } 721 if (sdp->detached) { 722 sg_finish_rem_req(srp); 723 return -ENODEV; 724 } 725 726 switch (hp->dxfer_direction) { 727 case SG_DXFER_TO_FROM_DEV: 728 case SG_DXFER_FROM_DEV: 729 data_dir = DMA_FROM_DEVICE; 730 break; 731 case SG_DXFER_TO_DEV: 732 data_dir = DMA_TO_DEVICE; 733 break; 734 case SG_DXFER_UNKNOWN: 735 data_dir = DMA_BIDIRECTIONAL; 736 break; 737 default: 738 data_dir = DMA_NONE; 739 break; 740 } 741 hp->duration = jiffies_to_msecs(jiffies); 742 /* Now send everything of to mid-level. The next time we hear about this 743 packet is when sg_cmd_done() is called (i.e. a callback). */ 744 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 745 hp->dxfer_len, srp->data.k_use_sg, timeout, 746 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 747 GFP_ATOMIC)) { 748 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n")); 749 /* 750 * most likely out of mem, but could also be a bad map 751 */ 752 sg_finish_rem_req(srp); 753 return -ENOMEM; 754 } else 755 return 0; 756 } 757 758 static int 759 sg_srp_done(Sg_request *srp, Sg_fd *sfp) 760 { 761 unsigned long iflags; 762 int done; 763 764 read_lock_irqsave(&sfp->rq_list_lock, iflags); 765 done = srp->done; 766 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 767 return done; 768 } 769 770 static int 771 sg_ioctl(struct inode *inode, struct file *filp, 772 unsigned int cmd_in, unsigned long arg) 773 { 774 void __user *p = (void __user *)arg; 775 int __user *ip = p; 776 int result, val, read_only; 777 Sg_device *sdp; 778 Sg_fd *sfp; 779 Sg_request *srp; 780 unsigned long iflags; 781 782 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 783 return -ENXIO; 784 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 785 sdp->disk->disk_name, (int) cmd_in)); 786 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 787 788 switch (cmd_in) { 789 case SG_IO: 790 { 791 int blocking = 1; /* ignore O_NONBLOCK flag */ 792 793 if (sdp->detached) 794 return -ENODEV; 795 if (!scsi_block_when_processing_errors(sdp->device)) 796 return -ENXIO; 797 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 798 return -EFAULT; 799 result = 800 sg_new_write(sfp, p, SZ_SG_IO_HDR, 801 blocking, read_only, &srp); 802 if (result < 0) 803 return result; 804 srp->sg_io_owned = 1; 805 while (1) { 806 result = 0; /* following macro to beat race condition */ 807 __wait_event_interruptible(sfp->read_wait, 808 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), 809 result); 810 if (sdp->detached) 811 return -ENODEV; 812 if (sfp->closed) 813 return 0; /* request packet dropped already */ 814 if (0 == result) 815 break; 816 srp->orphan = 1; 817 return result; /* -ERESTARTSYS because signal hit process */ 818 } 819 write_lock_irqsave(&sfp->rq_list_lock, iflags); 820 srp->done = 2; 821 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 822 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); 823 return (result < 0) ? result : 0; 824 } 825 case SG_SET_TIMEOUT: 826 result = get_user(val, ip); 827 if (result) 828 return result; 829 if (val < 0) 830 return -EIO; 831 if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) 832 val = MULDIV (INT_MAX, USER_HZ, HZ); 833 sfp->timeout_user = val; 834 sfp->timeout = MULDIV (val, HZ, USER_HZ); 835 836 return 0; 837 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ 838 /* strange ..., for backward compatibility */ 839 return sfp->timeout_user; 840 case SG_SET_FORCE_LOW_DMA: 841 result = get_user(val, ip); 842 if (result) 843 return result; 844 if (val) { 845 sfp->low_dma = 1; 846 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { 847 val = (int) sfp->reserve.bufflen; 848 sg_remove_scat(&sfp->reserve); 849 sg_build_reserve(sfp, val); 850 } 851 } else { 852 if (sdp->detached) 853 return -ENODEV; 854 sfp->low_dma = sdp->device->host->unchecked_isa_dma; 855 } 856 return 0; 857 case SG_GET_LOW_DMA: 858 return put_user((int) sfp->low_dma, ip); 859 case SG_GET_SCSI_ID: 860 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) 861 return -EFAULT; 862 else { 863 sg_scsi_id_t __user *sg_idp = p; 864 865 if (sdp->detached) 866 return -ENODEV; 867 __put_user((int) sdp->device->host->host_no, 868 &sg_idp->host_no); 869 __put_user((int) sdp->device->channel, 870 &sg_idp->channel); 871 __put_user((int) sdp->device->id, &sg_idp->scsi_id); 872 __put_user((int) sdp->device->lun, &sg_idp->lun); 873 __put_user((int) sdp->device->type, &sg_idp->scsi_type); 874 __put_user((short) sdp->device->host->cmd_per_lun, 875 &sg_idp->h_cmd_per_lun); 876 __put_user((short) sdp->device->queue_depth, 877 &sg_idp->d_queue_depth); 878 __put_user(0, &sg_idp->unused[0]); 879 __put_user(0, &sg_idp->unused[1]); 880 return 0; 881 } 882 case SG_SET_FORCE_PACK_ID: 883 result = get_user(val, ip); 884 if (result) 885 return result; 886 sfp->force_packid = val ? 1 : 0; 887 return 0; 888 case SG_GET_PACK_ID: 889 if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) 890 return -EFAULT; 891 read_lock_irqsave(&sfp->rq_list_lock, iflags); 892 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 893 if ((1 == srp->done) && (!srp->sg_io_owned)) { 894 read_unlock_irqrestore(&sfp->rq_list_lock, 895 iflags); 896 __put_user(srp->header.pack_id, ip); 897 return 0; 898 } 899 } 900 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 901 __put_user(-1, ip); 902 return 0; 903 case SG_GET_NUM_WAITING: 904 read_lock_irqsave(&sfp->rq_list_lock, iflags); 905 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { 906 if ((1 == srp->done) && (!srp->sg_io_owned)) 907 ++val; 908 } 909 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 910 return put_user(val, ip); 911 case SG_GET_SG_TABLESIZE: 912 return put_user(sdp->sg_tablesize, ip); 913 case SG_SET_RESERVED_SIZE: 914 result = get_user(val, ip); 915 if (result) 916 return result; 917 if (val < 0) 918 return -EINVAL; 919 val = min_t(int, val, 920 sdp->device->request_queue->max_sectors * 512); 921 if (val != sfp->reserve.bufflen) { 922 if (sg_res_in_use(sfp) || sfp->mmap_called) 923 return -EBUSY; 924 sg_remove_scat(&sfp->reserve); 925 sg_build_reserve(sfp, val); 926 } 927 return 0; 928 case SG_GET_RESERVED_SIZE: 929 val = min_t(int, sfp->reserve.bufflen, 930 sdp->device->request_queue->max_sectors * 512); 931 return put_user(val, ip); 932 case SG_SET_COMMAND_Q: 933 result = get_user(val, ip); 934 if (result) 935 return result; 936 sfp->cmd_q = val ? 1 : 0; 937 return 0; 938 case SG_GET_COMMAND_Q: 939 return put_user((int) sfp->cmd_q, ip); 940 case SG_SET_KEEP_ORPHAN: 941 result = get_user(val, ip); 942 if (result) 943 return result; 944 sfp->keep_orphan = val; 945 return 0; 946 case SG_GET_KEEP_ORPHAN: 947 return put_user((int) sfp->keep_orphan, ip); 948 case SG_NEXT_CMD_LEN: 949 result = get_user(val, ip); 950 if (result) 951 return result; 952 sfp->next_cmd_len = (val > 0) ? val : 0; 953 return 0; 954 case SG_GET_VERSION_NUM: 955 return put_user(sg_version_num, ip); 956 case SG_GET_ACCESS_COUNT: 957 /* faked - we don't have a real access count anymore */ 958 val = (sdp->device ? 1 : 0); 959 return put_user(val, ip); 960 case SG_GET_REQUEST_TABLE: 961 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 962 return -EFAULT; 963 else { 964 sg_req_info_t *rinfo; 965 unsigned int ms; 966 967 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, 968 GFP_KERNEL); 969 if (!rinfo) 970 return -ENOMEM; 971 read_lock_irqsave(&sfp->rq_list_lock, iflags); 972 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; 973 ++val, srp = srp ? srp->nextrp : srp) { 974 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 975 if (srp) { 976 rinfo[val].req_state = srp->done + 1; 977 rinfo[val].problem = 978 srp->header.masked_status & 979 srp->header.host_status & 980 srp->header.driver_status; 981 if (srp->done) 982 rinfo[val].duration = 983 srp->header.duration; 984 else { 985 ms = jiffies_to_msecs(jiffies); 986 rinfo[val].duration = 987 (ms > srp->header.duration) ? 988 (ms - srp->header.duration) : 0; 989 } 990 rinfo[val].orphan = srp->orphan; 991 rinfo[val].sg_io_owned = 992 srp->sg_io_owned; 993 rinfo[val].pack_id = 994 srp->header.pack_id; 995 rinfo[val].usr_ptr = 996 srp->header.usr_ptr; 997 } 998 } 999 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1000 result = __copy_to_user(p, rinfo, 1001 SZ_SG_REQ_INFO * SG_MAX_QUEUE); 1002 result = result ? -EFAULT : 0; 1003 kfree(rinfo); 1004 return result; 1005 } 1006 case SG_EMULATED_HOST: 1007 if (sdp->detached) 1008 return -ENODEV; 1009 return put_user(sdp->device->host->hostt->emulated, ip); 1010 case SG_SCSI_RESET: 1011 if (sdp->detached) 1012 return -ENODEV; 1013 if (filp->f_flags & O_NONBLOCK) { 1014 if (scsi_host_in_recovery(sdp->device->host)) 1015 return -EBUSY; 1016 } else if (!scsi_block_when_processing_errors(sdp->device)) 1017 return -EBUSY; 1018 result = get_user(val, ip); 1019 if (result) 1020 return result; 1021 if (SG_SCSI_RESET_NOTHING == val) 1022 return 0; 1023 switch (val) { 1024 case SG_SCSI_RESET_DEVICE: 1025 val = SCSI_TRY_RESET_DEVICE; 1026 break; 1027 case SG_SCSI_RESET_BUS: 1028 val = SCSI_TRY_RESET_BUS; 1029 break; 1030 case SG_SCSI_RESET_HOST: 1031 val = SCSI_TRY_RESET_HOST; 1032 break; 1033 default: 1034 return -EINVAL; 1035 } 1036 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 1037 return -EACCES; 1038 return (scsi_reset_provider(sdp->device, val) == 1039 SUCCESS) ? 0 : -EIO; 1040 case SCSI_IOCTL_SEND_COMMAND: 1041 if (sdp->detached) 1042 return -ENODEV; 1043 if (read_only) { 1044 unsigned char opcode = WRITE_6; 1045 Scsi_Ioctl_Command __user *siocp = p; 1046 1047 if (copy_from_user(&opcode, siocp->data, 1)) 1048 return -EFAULT; 1049 if (!sg_allow_access(opcode, sdp->device->type)) 1050 return -EPERM; 1051 } 1052 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1053 case SG_SET_DEBUG: 1054 result = get_user(val, ip); 1055 if (result) 1056 return result; 1057 sdp->sgdebug = (char) val; 1058 return 0; 1059 case SCSI_IOCTL_GET_IDLUN: 1060 case SCSI_IOCTL_GET_BUS_NUMBER: 1061 case SCSI_IOCTL_PROBE_HOST: 1062 case SG_GET_TRANSFORM: 1063 if (sdp->detached) 1064 return -ENODEV; 1065 return scsi_ioctl(sdp->device, cmd_in, p); 1066 case BLKSECTGET: 1067 return put_user(sdp->device->request_queue->max_sectors * 512, 1068 ip); 1069 default: 1070 if (read_only) 1071 return -EPERM; /* don't know so take safe approach */ 1072 return scsi_ioctl(sdp->device, cmd_in, p); 1073 } 1074 } 1075 1076 #ifdef CONFIG_COMPAT 1077 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 1078 { 1079 Sg_device *sdp; 1080 Sg_fd *sfp; 1081 struct scsi_device *sdev; 1082 1083 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1084 return -ENXIO; 1085 1086 sdev = sdp->device; 1087 if (sdev->host->hostt->compat_ioctl) { 1088 int ret; 1089 1090 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); 1091 1092 return ret; 1093 } 1094 1095 return -ENOIOCTLCMD; 1096 } 1097 #endif 1098 1099 static unsigned int 1100 sg_poll(struct file *filp, poll_table * wait) 1101 { 1102 unsigned int res = 0; 1103 Sg_device *sdp; 1104 Sg_fd *sfp; 1105 Sg_request *srp; 1106 int count = 0; 1107 unsigned long iflags; 1108 1109 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)) 1110 || sfp->closed) 1111 return POLLERR; 1112 poll_wait(filp, &sfp->read_wait, wait); 1113 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1114 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 1115 /* if any read waiting, flag it */ 1116 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) 1117 res = POLLIN | POLLRDNORM; 1118 ++count; 1119 } 1120 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1121 1122 if (sdp->detached) 1123 res |= POLLHUP; 1124 else if (!sfp->cmd_q) { 1125 if (0 == count) 1126 res |= POLLOUT | POLLWRNORM; 1127 } else if (count < SG_MAX_QUEUE) 1128 res |= POLLOUT | POLLWRNORM; 1129 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n", 1130 sdp->disk->disk_name, (int) res)); 1131 return res; 1132 } 1133 1134 static int 1135 sg_fasync(int fd, struct file *filp, int mode) 1136 { 1137 int retval; 1138 Sg_device *sdp; 1139 Sg_fd *sfp; 1140 1141 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1142 return -ENXIO; 1143 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n", 1144 sdp->disk->disk_name, mode)); 1145 1146 retval = fasync_helper(fd, filp, mode, &sfp->async_qp); 1147 return (retval < 0) ? retval : 0; 1148 } 1149 1150 static struct page * 1151 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) 1152 { 1153 Sg_fd *sfp; 1154 struct page *page = NOPAGE_SIGBUS; 1155 unsigned long offset, len, sa; 1156 Sg_scatter_hold *rsv_schp; 1157 struct scatterlist *sg; 1158 int k; 1159 1160 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1161 return page; 1162 rsv_schp = &sfp->reserve; 1163 offset = addr - vma->vm_start; 1164 if (offset >= rsv_schp->bufflen) 1165 return page; 1166 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n", 1167 offset, rsv_schp->k_use_sg)); 1168 sg = rsv_schp->buffer; 1169 sa = vma->vm_start; 1170 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1171 ++k, ++sg) { 1172 len = vma->vm_end - sa; 1173 len = (len < sg->length) ? len : sg->length; 1174 if (offset < len) { 1175 page = virt_to_page(page_address(sg->page) + offset); 1176 get_page(page); /* increment page count */ 1177 break; 1178 } 1179 sa += len; 1180 offset -= len; 1181 } 1182 1183 if (type) 1184 *type = VM_FAULT_MINOR; 1185 return page; 1186 } 1187 1188 static struct vm_operations_struct sg_mmap_vm_ops = { 1189 .nopage = sg_vma_nopage, 1190 }; 1191 1192 static int 1193 sg_mmap(struct file *filp, struct vm_area_struct *vma) 1194 { 1195 Sg_fd *sfp; 1196 unsigned long req_sz, len, sa; 1197 Sg_scatter_hold *rsv_schp; 1198 int k; 1199 struct scatterlist *sg; 1200 1201 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1202 return -ENXIO; 1203 req_sz = vma->vm_end - vma->vm_start; 1204 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", 1205 (void *) vma->vm_start, (int) req_sz)); 1206 if (vma->vm_pgoff) 1207 return -EINVAL; /* want no offset */ 1208 rsv_schp = &sfp->reserve; 1209 if (req_sz > rsv_schp->bufflen) 1210 return -ENOMEM; /* cannot map more than reserved buffer */ 1211 1212 sa = vma->vm_start; 1213 sg = rsv_schp->buffer; 1214 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1215 ++k, ++sg) { 1216 len = vma->vm_end - sa; 1217 len = (len < sg->length) ? len : sg->length; 1218 sa += len; 1219 } 1220 1221 sfp->mmap_called = 1; 1222 vma->vm_flags |= VM_RESERVED; 1223 vma->vm_private_data = sfp; 1224 vma->vm_ops = &sg_mmap_vm_ops; 1225 return 0; 1226 } 1227 1228 /* This function is a "bottom half" handler that is called by the 1229 * mid level when a command is completed (or has failed). */ 1230 static void 1231 sg_cmd_done(void *data, char *sense, int result, int resid) 1232 { 1233 Sg_request *srp = data; 1234 Sg_device *sdp = NULL; 1235 Sg_fd *sfp; 1236 unsigned long iflags; 1237 unsigned int ms; 1238 1239 if (NULL == srp) { 1240 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1241 return; 1242 } 1243 sfp = srp->parentfp; 1244 if (sfp) 1245 sdp = sfp->parentdp; 1246 if ((NULL == sdp) || sdp->detached) { 1247 printk(KERN_INFO "sg_cmd_done: device detached\n"); 1248 return; 1249 } 1250 1251 1252 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1253 sdp->disk->disk_name, srp->header.pack_id, result)); 1254 srp->header.resid = resid; 1255 ms = jiffies_to_msecs(jiffies); 1256 srp->header.duration = (ms > srp->header.duration) ? 1257 (ms - srp->header.duration) : 0; 1258 if (0 != result) { 1259 struct scsi_sense_hdr sshdr; 1260 1261 memcpy(srp->sense_b, sense, sizeof (srp->sense_b)); 1262 srp->header.status = 0xff & result; 1263 srp->header.masked_status = status_byte(result); 1264 srp->header.msg_status = msg_byte(result); 1265 srp->header.host_status = host_byte(result); 1266 srp->header.driver_status = driver_byte(result); 1267 if ((sdp->sgdebug > 0) && 1268 ((CHECK_CONDITION == srp->header.masked_status) || 1269 (COMMAND_TERMINATED == srp->header.masked_status))) 1270 __scsi_print_sense("sg_cmd_done", sense, 1271 SCSI_SENSE_BUFFERSIZE); 1272 1273 /* Following if statement is a patch supplied by Eric Youngdale */ 1274 if (driver_byte(result) != 0 1275 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) 1276 && !scsi_sense_is_deferred(&sshdr) 1277 && sshdr.sense_key == UNIT_ATTENTION 1278 && sdp->device->removable) { 1279 /* Detected possible disc change. Set the bit - this */ 1280 /* may be used if there are filesystems using this device */ 1281 sdp->device->changed = 1; 1282 } 1283 } 1284 /* Rely on write phase to clean out srp status values, so no "else" */ 1285 1286 if (sfp->closed) { /* whoops this fd already released, cleanup */ 1287 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); 1288 sg_finish_rem_req(srp); 1289 srp = NULL; 1290 if (NULL == sfp->headrp) { 1291 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n")); 1292 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */ 1293 scsi_device_put(sdp->device); 1294 } 1295 sfp = NULL; 1296 } 1297 } else if (srp && srp->orphan) { 1298 if (sfp->keep_orphan) 1299 srp->sg_io_owned = 0; 1300 else { 1301 sg_finish_rem_req(srp); 1302 srp = NULL; 1303 } 1304 } 1305 if (sfp && srp) { 1306 /* Now wake up any sg_read() that is waiting for this packet. */ 1307 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); 1308 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1309 srp->done = 1; 1310 wake_up_interruptible(&sfp->read_wait); 1311 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1312 } 1313 } 1314 1315 static struct file_operations sg_fops = { 1316 .owner = THIS_MODULE, 1317 .read = sg_read, 1318 .write = sg_write, 1319 .poll = sg_poll, 1320 .ioctl = sg_ioctl, 1321 #ifdef CONFIG_COMPAT 1322 .compat_ioctl = sg_compat_ioctl, 1323 #endif 1324 .open = sg_open, 1325 .mmap = sg_mmap, 1326 .release = sg_release, 1327 .fasync = sg_fasync, 1328 }; 1329 1330 static struct class *sg_sysfs_class; 1331 1332 static int sg_sysfs_valid = 0; 1333 1334 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) 1335 { 1336 struct request_queue *q = scsidp->request_queue; 1337 Sg_device *sdp; 1338 unsigned long iflags; 1339 void *old_sg_dev_arr = NULL; 1340 int k, error; 1341 1342 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); 1343 if (!sdp) { 1344 printk(KERN_WARNING "kmalloc Sg_device failure\n"); 1345 return -ENOMEM; 1346 } 1347 1348 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1349 if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */ 1350 Sg_device **tmp_da; 1351 int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP; 1352 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1353 1354 tmp_da = kzalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL); 1355 if (unlikely(!tmp_da)) 1356 goto expand_failed; 1357 1358 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1359 memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *)); 1360 old_sg_dev_arr = sg_dev_arr; 1361 sg_dev_arr = tmp_da; 1362 sg_dev_max = tmp_dev_max; 1363 } 1364 1365 for (k = 0; k < sg_dev_max; k++) 1366 if (!sg_dev_arr[k]) 1367 break; 1368 if (unlikely(k >= SG_MAX_DEVS)) 1369 goto overflow; 1370 1371 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); 1372 sprintf(disk->disk_name, "sg%d", k); 1373 disk->first_minor = k; 1374 sdp->disk = disk; 1375 sdp->device = scsidp; 1376 init_waitqueue_head(&sdp->o_excl_wait); 1377 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1378 1379 sg_nr_dev++; 1380 sg_dev_arr[k] = sdp; 1381 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1382 error = k; 1383 1384 out: 1385 if (error < 0) 1386 kfree(sdp); 1387 kfree(old_sg_dev_arr); 1388 return error; 1389 1390 expand_failed: 1391 printk(KERN_WARNING "sg_alloc: device array cannot be resized\n"); 1392 error = -ENOMEM; 1393 goto out; 1394 1395 overflow: 1396 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1397 sdev_printk(KERN_WARNING, scsidp, 1398 "Unable to attach sg device type=%d, minor " 1399 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); 1400 error = -ENODEV; 1401 goto out; 1402 } 1403 1404 static int 1405 sg_add(struct class_device *cl_dev, struct class_interface *cl_intf) 1406 { 1407 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1408 struct gendisk *disk; 1409 Sg_device *sdp = NULL; 1410 struct cdev * cdev = NULL; 1411 int error, k; 1412 unsigned long iflags; 1413 1414 disk = alloc_disk(1); 1415 if (!disk) { 1416 printk(KERN_WARNING "alloc_disk failed\n"); 1417 return -ENOMEM; 1418 } 1419 disk->major = SCSI_GENERIC_MAJOR; 1420 1421 error = -ENOMEM; 1422 cdev = cdev_alloc(); 1423 if (!cdev) { 1424 printk(KERN_WARNING "cdev_alloc failed\n"); 1425 goto out; 1426 } 1427 cdev->owner = THIS_MODULE; 1428 cdev->ops = &sg_fops; 1429 1430 error = sg_alloc(disk, scsidp); 1431 if (error < 0) { 1432 printk(KERN_WARNING "sg_alloc failed\n"); 1433 goto out; 1434 } 1435 k = error; 1436 sdp = sg_dev_arr[k]; 1437 1438 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1); 1439 if (error) 1440 goto cdev_add_err; 1441 1442 sdp->cdev = cdev; 1443 if (sg_sysfs_valid) { 1444 struct class_device * sg_class_member; 1445 1446 sg_class_member = class_device_create(sg_sysfs_class, NULL, 1447 MKDEV(SCSI_GENERIC_MAJOR, k), 1448 cl_dev->dev, "%s", 1449 disk->disk_name); 1450 if (IS_ERR(sg_class_member)) 1451 printk(KERN_WARNING "sg_add: " 1452 "class_device_create failed\n"); 1453 class_set_devdata(sg_class_member, sdp); 1454 error = sysfs_create_link(&scsidp->sdev_gendev.kobj, 1455 &sg_class_member->kobj, "generic"); 1456 if (error) 1457 printk(KERN_ERR "sg_add: unable to make symlink " 1458 "'generic' back to sg%d\n", k); 1459 } else 1460 printk(KERN_WARNING "sg_add: sg_sys INvalid\n"); 1461 1462 sdev_printk(KERN_NOTICE, scsidp, 1463 "Attached scsi generic sg%d type %d\n", k,scsidp->type); 1464 1465 return 0; 1466 1467 cdev_add_err: 1468 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1469 kfree(sg_dev_arr[k]); 1470 sg_dev_arr[k] = NULL; 1471 sg_nr_dev--; 1472 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1473 1474 out: 1475 put_disk(disk); 1476 if (cdev) 1477 cdev_del(cdev); 1478 return error; 1479 } 1480 1481 static void 1482 sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf) 1483 { 1484 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1485 Sg_device *sdp = NULL; 1486 unsigned long iflags; 1487 Sg_fd *sfp; 1488 Sg_fd *tsfp; 1489 Sg_request *srp; 1490 Sg_request *tsrp; 1491 int k, delay; 1492 1493 if (NULL == sg_dev_arr) 1494 return; 1495 delay = 0; 1496 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1497 for (k = 0; k < sg_dev_max; k++) { 1498 sdp = sg_dev_arr[k]; 1499 if ((NULL == sdp) || (sdp->device != scsidp)) 1500 continue; /* dirty but lowers nesting */ 1501 if (sdp->headfp) { 1502 sdp->detached = 1; 1503 for (sfp = sdp->headfp; sfp; sfp = tsfp) { 1504 tsfp = sfp->nextfp; 1505 for (srp = sfp->headrp; srp; srp = tsrp) { 1506 tsrp = srp->nextrp; 1507 if (sfp->closed || (0 == sg_srp_done(srp, sfp))) 1508 sg_finish_rem_req(srp); 1509 } 1510 if (sfp->closed) { 1511 scsi_device_put(sdp->device); 1512 __sg_remove_sfp(sdp, sfp); 1513 } else { 1514 delay = 1; 1515 wake_up_interruptible(&sfp->read_wait); 1516 kill_fasync(&sfp->async_qp, SIGPOLL, 1517 POLL_HUP); 1518 } 1519 } 1520 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", k)); 1521 if (NULL == sdp->headfp) { 1522 sg_dev_arr[k] = NULL; 1523 } 1524 } else { /* nothing active, simple case */ 1525 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", k)); 1526 sg_dev_arr[k] = NULL; 1527 } 1528 sg_nr_dev--; 1529 break; 1530 } 1531 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1532 1533 if (sdp) { 1534 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1535 class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k)); 1536 cdev_del(sdp->cdev); 1537 sdp->cdev = NULL; 1538 put_disk(sdp->disk); 1539 sdp->disk = NULL; 1540 if (NULL == sdp->headfp) 1541 kfree((char *) sdp); 1542 } 1543 1544 if (delay) 1545 msleep(10); /* dirty detach so delay device destruction */ 1546 } 1547 1548 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); 1549 module_param_named(def_reserved_size, def_reserved_size, int, 1550 S_IRUGO | S_IWUSR); 1551 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); 1552 1553 MODULE_AUTHOR("Douglas Gilbert"); 1554 MODULE_DESCRIPTION("SCSI generic (sg) driver"); 1555 MODULE_LICENSE("GPL"); 1556 MODULE_VERSION(SG_VERSION_STR); 1557 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); 1558 1559 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " 1560 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); 1561 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); 1562 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); 1563 1564 static int __init 1565 init_sg(void) 1566 { 1567 int rc; 1568 1569 if (scatter_elem_sz < PAGE_SIZE) { 1570 scatter_elem_sz = PAGE_SIZE; 1571 scatter_elem_sz_prev = scatter_elem_sz; 1572 } 1573 if (def_reserved_size >= 0) 1574 sg_big_buff = def_reserved_size; 1575 else 1576 def_reserved_size = sg_big_buff; 1577 1578 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1579 SG_MAX_DEVS, "sg"); 1580 if (rc) 1581 return rc; 1582 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); 1583 if ( IS_ERR(sg_sysfs_class) ) { 1584 rc = PTR_ERR(sg_sysfs_class); 1585 goto err_out; 1586 } 1587 sg_sysfs_valid = 1; 1588 rc = scsi_register_interface(&sg_interface); 1589 if (0 == rc) { 1590 #ifdef CONFIG_SCSI_PROC_FS 1591 sg_proc_init(); 1592 #endif /* CONFIG_SCSI_PROC_FS */ 1593 return 0; 1594 } 1595 class_destroy(sg_sysfs_class); 1596 err_out: 1597 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); 1598 return rc; 1599 } 1600 1601 static void __exit 1602 exit_sg(void) 1603 { 1604 #ifdef CONFIG_SCSI_PROC_FS 1605 sg_proc_cleanup(); 1606 #endif /* CONFIG_SCSI_PROC_FS */ 1607 scsi_unregister_interface(&sg_interface); 1608 class_destroy(sg_sysfs_class); 1609 sg_sysfs_valid = 0; 1610 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1611 SG_MAX_DEVS); 1612 kfree((char *)sg_dev_arr); 1613 sg_dev_arr = NULL; 1614 sg_dev_max = 0; 1615 } 1616 1617 static int 1618 sg_start_req(Sg_request * srp) 1619 { 1620 int res; 1621 Sg_fd *sfp = srp->parentfp; 1622 sg_io_hdr_t *hp = &srp->header; 1623 int dxfer_len = (int) hp->dxfer_len; 1624 int dxfer_dir = hp->dxfer_direction; 1625 Sg_scatter_hold *req_schp = &srp->data; 1626 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1627 1628 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); 1629 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1630 return 0; 1631 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1632 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1633 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1634 res = sg_build_direct(srp, sfp, dxfer_len); 1635 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1636 return res; 1637 } 1638 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1639 sg_link_reserve(sfp, srp, dxfer_len); 1640 else { 1641 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1642 if (res) { 1643 sg_remove_scat(req_schp); 1644 return res; 1645 } 1646 } 1647 return 0; 1648 } 1649 1650 static void 1651 sg_finish_rem_req(Sg_request * srp) 1652 { 1653 Sg_fd *sfp = srp->parentfp; 1654 Sg_scatter_hold *req_schp = &srp->data; 1655 1656 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); 1657 if (srp->res_used) 1658 sg_unlink_reserve(sfp, srp); 1659 else 1660 sg_remove_scat(req_schp); 1661 sg_remove_request(sfp, srp); 1662 } 1663 1664 static int 1665 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1666 { 1667 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1668 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1669 1670 /* 1671 * TODO: test without low_dma, we should not need it since 1672 * the block layer will bounce the buffer for us 1673 * 1674 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list. 1675 */ 1676 if (sfp->low_dma) 1677 gfp_flags |= GFP_DMA; 1678 schp->buffer = kzalloc(sg_bufflen, gfp_flags); 1679 if (!schp->buffer) 1680 return -ENOMEM; 1681 schp->sglist_len = sg_bufflen; 1682 return tablesize; /* number of scat_gath elements allocated */ 1683 } 1684 1685 #ifdef SG_ALLOW_DIO_CODE 1686 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ 1687 /* TODO: hopefully we can use the generic block layer code */ 1688 1689 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if 1690 - mapping of all pages not successful 1691 (i.e., either completely successful or fails) 1692 */ 1693 static int 1694 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 1695 unsigned long uaddr, size_t count, int rw) 1696 { 1697 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 1698 unsigned long start = uaddr >> PAGE_SHIFT; 1699 const int nr_pages = end - start; 1700 int res, i, j; 1701 struct page **pages; 1702 1703 /* User attempted Overflow! */ 1704 if ((uaddr + count) < uaddr) 1705 return -EINVAL; 1706 1707 /* Too big */ 1708 if (nr_pages > max_pages) 1709 return -ENOMEM; 1710 1711 /* Hmm? */ 1712 if (count == 0) 1713 return 0; 1714 1715 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL) 1716 return -ENOMEM; 1717 1718 /* Try to fault in all of the necessary pages */ 1719 down_read(¤t->mm->mmap_sem); 1720 /* rw==READ means read from drive, write into memory area */ 1721 res = get_user_pages( 1722 current, 1723 current->mm, 1724 uaddr, 1725 nr_pages, 1726 rw == READ, 1727 0, /* don't force */ 1728 pages, 1729 NULL); 1730 up_read(¤t->mm->mmap_sem); 1731 1732 /* Errors and no page mapped should return here */ 1733 if (res < nr_pages) 1734 goto out_unmap; 1735 1736 for (i=0; i < nr_pages; i++) { 1737 /* FIXME: flush superflous for rw==READ, 1738 * probably wrong function for rw==WRITE 1739 */ 1740 flush_dcache_page(pages[i]); 1741 /* ?? Is locking needed? I don't think so */ 1742 /* if (TestSetPageLocked(pages[i])) 1743 goto out_unlock; */ 1744 } 1745 1746 sgl[0].page = pages[0]; 1747 sgl[0].offset = uaddr & ~PAGE_MASK; 1748 if (nr_pages > 1) { 1749 sgl[0].length = PAGE_SIZE - sgl[0].offset; 1750 count -= sgl[0].length; 1751 for (i=1; i < nr_pages ; i++) { 1752 sgl[i].page = pages[i]; 1753 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; 1754 count -= PAGE_SIZE; 1755 } 1756 } 1757 else { 1758 sgl[0].length = count; 1759 } 1760 1761 kfree(pages); 1762 return nr_pages; 1763 1764 out_unmap: 1765 if (res > 0) { 1766 for (j=0; j < res; j++) 1767 page_cache_release(pages[j]); 1768 res = 0; 1769 } 1770 kfree(pages); 1771 return res; 1772 } 1773 1774 1775 /* And unmap them... */ 1776 static int 1777 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 1778 int dirtied) 1779 { 1780 int i; 1781 1782 for (i=0; i < nr_pages; i++) { 1783 struct page *page = sgl[i].page; 1784 1785 if (dirtied) 1786 SetPageDirty(page); 1787 /* unlock_page(page); */ 1788 /* FIXME: cache flush missing for rw==READ 1789 * FIXME: call the correct reference counting function 1790 */ 1791 page_cache_release(page); 1792 } 1793 1794 return 0; 1795 } 1796 1797 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */ 1798 #endif 1799 1800 1801 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ 1802 static int 1803 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) 1804 { 1805 #ifdef SG_ALLOW_DIO_CODE 1806 sg_io_hdr_t *hp = &srp->header; 1807 Sg_scatter_hold *schp = &srp->data; 1808 int sg_tablesize = sfp->parentdp->sg_tablesize; 1809 int mx_sc_elems, res; 1810 struct scsi_device *sdev = sfp->parentdp->device; 1811 1812 if (((unsigned long)hp->dxferp & 1813 queue_dma_alignment(sdev->request_queue)) != 0) 1814 return 1; 1815 1816 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1817 if (mx_sc_elems <= 0) { 1818 return 1; 1819 } 1820 res = st_map_user_pages(schp->buffer, mx_sc_elems, 1821 (unsigned long)hp->dxferp, dxfer_len, 1822 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); 1823 if (res <= 0) { 1824 sg_remove_scat(schp); 1825 return 1; 1826 } 1827 schp->k_use_sg = res; 1828 schp->dio_in_use = 1; 1829 hp->info |= SG_INFO_DIRECT_IO; 1830 return 0; 1831 #else 1832 return 1; 1833 #endif 1834 } 1835 1836 static int 1837 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1838 { 1839 struct scatterlist *sg; 1840 int ret_sz = 0, k, rem_sz, num, mx_sc_elems; 1841 int sg_tablesize = sfp->parentdp->sg_tablesize; 1842 int blk_size = buff_size; 1843 struct page *p = NULL; 1844 1845 if ((blk_size < 0) || (!sfp)) 1846 return -EFAULT; 1847 if (0 == blk_size) 1848 ++blk_size; /* don't know why */ 1849 /* round request up to next highest SG_SECTOR_SZ byte boundary */ 1850 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); 1851 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", 1852 buff_size, blk_size)); 1853 1854 /* N.B. ret_sz carried into this block ... */ 1855 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1856 if (mx_sc_elems < 0) 1857 return mx_sc_elems; /* most likely -ENOMEM */ 1858 1859 num = scatter_elem_sz; 1860 if (unlikely(num != scatter_elem_sz_prev)) { 1861 if (num < PAGE_SIZE) { 1862 scatter_elem_sz = PAGE_SIZE; 1863 scatter_elem_sz_prev = PAGE_SIZE; 1864 } else 1865 scatter_elem_sz_prev = num; 1866 } 1867 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1868 (rem_sz > 0) && (k < mx_sc_elems); 1869 ++k, rem_sz -= ret_sz, ++sg) { 1870 1871 num = (rem_sz > scatter_elem_sz_prev) ? 1872 scatter_elem_sz_prev : rem_sz; 1873 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1874 if (!p) 1875 return -ENOMEM; 1876 1877 if (num == scatter_elem_sz_prev) { 1878 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1879 scatter_elem_sz = ret_sz; 1880 scatter_elem_sz_prev = ret_sz; 1881 } 1882 } 1883 sg->page = p; 1884 sg->length = (ret_sz > num) ? num : ret_sz; 1885 1886 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1887 "ret_sz=%d\n", k, num, ret_sz)); 1888 } /* end of for loop */ 1889 1890 schp->k_use_sg = k; 1891 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1892 "rem_sz=%d\n", k, rem_sz)); 1893 1894 schp->bufflen = blk_size; 1895 if (rem_sz > 0) /* must have failed */ 1896 return -ENOMEM; 1897 1898 return 0; 1899 } 1900 1901 static int 1902 sg_write_xfer(Sg_request * srp) 1903 { 1904 sg_io_hdr_t *hp = &srp->header; 1905 Sg_scatter_hold *schp = &srp->data; 1906 struct scatterlist *sg = schp->buffer; 1907 int num_xfer = 0; 1908 int j, k, onum, usglen, ksglen, res; 1909 int iovec_count = (int) hp->iovec_count; 1910 int dxfer_dir = hp->dxfer_direction; 1911 unsigned char *p; 1912 unsigned char __user *up; 1913 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 1914 1915 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || 1916 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 1917 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags); 1918 if (schp->bufflen < num_xfer) 1919 num_xfer = schp->bufflen; 1920 } 1921 if ((num_xfer <= 0) || (schp->dio_in_use) || 1922 (new_interface 1923 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 1924 return 0; 1925 1926 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 1927 num_xfer, iovec_count, schp->k_use_sg)); 1928 if (iovec_count) { 1929 onum = iovec_count; 1930 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 1931 return -EFAULT; 1932 } else 1933 onum = 1; 1934 1935 ksglen = sg->length; 1936 p = page_address(sg->page); 1937 for (j = 0, k = 0; j < onum; ++j) { 1938 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); 1939 if (res) 1940 return res; 1941 1942 for (; p; ++sg, ksglen = sg->length, 1943 p = page_address(sg->page)) { 1944 if (usglen <= 0) 1945 break; 1946 if (ksglen > usglen) { 1947 if (usglen >= num_xfer) { 1948 if (__copy_from_user(p, up, num_xfer)) 1949 return -EFAULT; 1950 return 0; 1951 } 1952 if (__copy_from_user(p, up, usglen)) 1953 return -EFAULT; 1954 p += usglen; 1955 ksglen -= usglen; 1956 break; 1957 } else { 1958 if (ksglen >= num_xfer) { 1959 if (__copy_from_user(p, up, num_xfer)) 1960 return -EFAULT; 1961 return 0; 1962 } 1963 if (__copy_from_user(p, up, ksglen)) 1964 return -EFAULT; 1965 up += ksglen; 1966 usglen -= ksglen; 1967 } 1968 ++k; 1969 if (k >= schp->k_use_sg) 1970 return 0; 1971 } 1972 } 1973 1974 return 0; 1975 } 1976 1977 static int 1978 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1979 int wr_xf, int *countp, unsigned char __user **up) 1980 { 1981 int num_xfer = (int) hp->dxfer_len; 1982 unsigned char __user *p = hp->dxferp; 1983 int count; 1984 1985 if (0 == sg_num) { 1986 if (wr_xf && ('\0' == hp->interface_id)) 1987 count = (int) hp->flags; /* holds "old" input_size */ 1988 else 1989 count = num_xfer; 1990 } else { 1991 sg_iovec_t iovec; 1992 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC)) 1993 return -EFAULT; 1994 p = iovec.iov_base; 1995 count = (int) iovec.iov_len; 1996 } 1997 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)) 1998 return -EFAULT; 1999 if (up) 2000 *up = p; 2001 if (countp) 2002 *countp = count; 2003 return 0; 2004 } 2005 2006 static void 2007 sg_remove_scat(Sg_scatter_hold * schp) 2008 { 2009 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 2010 if (schp->buffer && (schp->sglist_len > 0)) { 2011 struct scatterlist *sg = schp->buffer; 2012 2013 if (schp->dio_in_use) { 2014 #ifdef SG_ALLOW_DIO_CODE 2015 st_unmap_user_pages(sg, schp->k_use_sg, TRUE); 2016 #endif 2017 } else { 2018 int k; 2019 2020 for (k = 0; (k < schp->k_use_sg) && sg->page; 2021 ++k, ++sg) { 2022 SCSI_LOG_TIMEOUT(5, printk( 2023 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 2024 k, sg->page, sg->length)); 2025 sg_page_free(sg->page, sg->length); 2026 } 2027 } 2028 kfree(schp->buffer); 2029 } 2030 memset(schp, 0, sizeof (*schp)); 2031 } 2032 2033 static int 2034 sg_read_xfer(Sg_request * srp) 2035 { 2036 sg_io_hdr_t *hp = &srp->header; 2037 Sg_scatter_hold *schp = &srp->data; 2038 struct scatterlist *sg = schp->buffer; 2039 int num_xfer = 0; 2040 int j, k, onum, usglen, ksglen, res; 2041 int iovec_count = (int) hp->iovec_count; 2042 int dxfer_dir = hp->dxfer_direction; 2043 unsigned char *p; 2044 unsigned char __user *up; 2045 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 2046 2047 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) 2048 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 2049 num_xfer = hp->dxfer_len; 2050 if (schp->bufflen < num_xfer) 2051 num_xfer = schp->bufflen; 2052 } 2053 if ((num_xfer <= 0) || (schp->dio_in_use) || 2054 (new_interface 2055 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 2056 return 0; 2057 2058 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 2059 num_xfer, iovec_count, schp->k_use_sg)); 2060 if (iovec_count) { 2061 onum = iovec_count; 2062 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 2063 return -EFAULT; 2064 } else 2065 onum = 1; 2066 2067 p = page_address(sg->page); 2068 ksglen = sg->length; 2069 for (j = 0, k = 0; j < onum; ++j) { 2070 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); 2071 if (res) 2072 return res; 2073 2074 for (; p; ++sg, ksglen = sg->length, 2075 p = page_address(sg->page)) { 2076 if (usglen <= 0) 2077 break; 2078 if (ksglen > usglen) { 2079 if (usglen >= num_xfer) { 2080 if (__copy_to_user(up, p, num_xfer)) 2081 return -EFAULT; 2082 return 0; 2083 } 2084 if (__copy_to_user(up, p, usglen)) 2085 return -EFAULT; 2086 p += usglen; 2087 ksglen -= usglen; 2088 break; 2089 } else { 2090 if (ksglen >= num_xfer) { 2091 if (__copy_to_user(up, p, num_xfer)) 2092 return -EFAULT; 2093 return 0; 2094 } 2095 if (__copy_to_user(up, p, ksglen)) 2096 return -EFAULT; 2097 up += ksglen; 2098 usglen -= ksglen; 2099 } 2100 ++k; 2101 if (k >= schp->k_use_sg) 2102 return 0; 2103 } 2104 } 2105 2106 return 0; 2107 } 2108 2109 static int 2110 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 2111 { 2112 Sg_scatter_hold *schp = &srp->data; 2113 struct scatterlist *sg = schp->buffer; 2114 int k, num; 2115 2116 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 2117 num_read_xfer)); 2118 if ((!outp) || (num_read_xfer <= 0)) 2119 return 0; 2120 2121 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) { 2122 num = sg->length; 2123 if (num > num_read_xfer) { 2124 if (__copy_to_user(outp, page_address(sg->page), 2125 num_read_xfer)) 2126 return -EFAULT; 2127 break; 2128 } else { 2129 if (__copy_to_user(outp, page_address(sg->page), 2130 num)) 2131 return -EFAULT; 2132 num_read_xfer -= num; 2133 if (num_read_xfer <= 0) 2134 break; 2135 outp += num; 2136 } 2137 } 2138 2139 return 0; 2140 } 2141 2142 static void 2143 sg_build_reserve(Sg_fd * sfp, int req_size) 2144 { 2145 Sg_scatter_hold *schp = &sfp->reserve; 2146 2147 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); 2148 do { 2149 if (req_size < PAGE_SIZE) 2150 req_size = PAGE_SIZE; 2151 if (0 == sg_build_indirect(schp, sfp, req_size)) 2152 return; 2153 else 2154 sg_remove_scat(schp); 2155 req_size >>= 1; /* divide by 2 */ 2156 } while (req_size > (PAGE_SIZE / 2)); 2157 } 2158 2159 static void 2160 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) 2161 { 2162 Sg_scatter_hold *req_schp = &srp->data; 2163 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2164 struct scatterlist *sg = rsv_schp->buffer; 2165 int k, num, rem; 2166 2167 srp->res_used = 1; 2168 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 2169 rem = size; 2170 2171 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { 2172 num = sg->length; 2173 if (rem <= num) { 2174 sfp->save_scat_len = num; 2175 sg->length = rem; 2176 req_schp->k_use_sg = k + 1; 2177 req_schp->sglist_len = rsv_schp->sglist_len; 2178 req_schp->buffer = rsv_schp->buffer; 2179 2180 req_schp->bufflen = size; 2181 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 2182 break; 2183 } else 2184 rem -= num; 2185 } 2186 2187 if (k >= rsv_schp->k_use_sg) 2188 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); 2189 } 2190 2191 static void 2192 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 2193 { 2194 Sg_scatter_hold *req_schp = &srp->data; 2195 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2196 2197 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 2198 (int) req_schp->k_use_sg)); 2199 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { 2200 struct scatterlist *sg = rsv_schp->buffer; 2201 2202 if (sfp->save_scat_len > 0) 2203 (sg + (req_schp->k_use_sg - 1))->length = 2204 (unsigned) sfp->save_scat_len; 2205 else 2206 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); 2207 } 2208 req_schp->k_use_sg = 0; 2209 req_schp->bufflen = 0; 2210 req_schp->buffer = NULL; 2211 req_schp->sglist_len = 0; 2212 sfp->save_scat_len = 0; 2213 srp->res_used = 0; 2214 } 2215 2216 static Sg_request * 2217 sg_get_rq_mark(Sg_fd * sfp, int pack_id) 2218 { 2219 Sg_request *resp; 2220 unsigned long iflags; 2221 2222 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2223 for (resp = sfp->headrp; resp; resp = resp->nextrp) { 2224 /* look for requests that are ready + not SG_IO owned */ 2225 if ((1 == resp->done) && (!resp->sg_io_owned) && 2226 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2227 resp->done = 2; /* guard against other readers */ 2228 break; 2229 } 2230 } 2231 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2232 return resp; 2233 } 2234 2235 #ifdef CONFIG_SCSI_PROC_FS 2236 static Sg_request * 2237 sg_get_nth_request(Sg_fd * sfp, int nth) 2238 { 2239 Sg_request *resp; 2240 unsigned long iflags; 2241 int k; 2242 2243 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2244 for (k = 0, resp = sfp->headrp; resp && (k < nth); 2245 ++k, resp = resp->nextrp) ; 2246 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2247 return resp; 2248 } 2249 #endif 2250 2251 /* always adds to end of list */ 2252 static Sg_request * 2253 sg_add_request(Sg_fd * sfp) 2254 { 2255 int k; 2256 unsigned long iflags; 2257 Sg_request *resp; 2258 Sg_request *rp = sfp->req_arr; 2259 2260 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2261 resp = sfp->headrp; 2262 if (!resp) { 2263 memset(rp, 0, sizeof (Sg_request)); 2264 rp->parentfp = sfp; 2265 resp = rp; 2266 sfp->headrp = resp; 2267 } else { 2268 if (0 == sfp->cmd_q) 2269 resp = NULL; /* command queuing disallowed */ 2270 else { 2271 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { 2272 if (!rp->parentfp) 2273 break; 2274 } 2275 if (k < SG_MAX_QUEUE) { 2276 memset(rp, 0, sizeof (Sg_request)); 2277 rp->parentfp = sfp; 2278 while (resp->nextrp) 2279 resp = resp->nextrp; 2280 resp->nextrp = rp; 2281 resp = rp; 2282 } else 2283 resp = NULL; 2284 } 2285 } 2286 if (resp) { 2287 resp->nextrp = NULL; 2288 resp->header.duration = jiffies_to_msecs(jiffies); 2289 } 2290 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2291 return resp; 2292 } 2293 2294 /* Return of 1 for found; 0 for not found */ 2295 static int 2296 sg_remove_request(Sg_fd * sfp, Sg_request * srp) 2297 { 2298 Sg_request *prev_rp; 2299 Sg_request *rp; 2300 unsigned long iflags; 2301 int res = 0; 2302 2303 if ((!sfp) || (!srp) || (!sfp->headrp)) 2304 return res; 2305 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2306 prev_rp = sfp->headrp; 2307 if (srp == prev_rp) { 2308 sfp->headrp = prev_rp->nextrp; 2309 prev_rp->parentfp = NULL; 2310 res = 1; 2311 } else { 2312 while ((rp = prev_rp->nextrp)) { 2313 if (srp == rp) { 2314 prev_rp->nextrp = rp->nextrp; 2315 rp->parentfp = NULL; 2316 res = 1; 2317 break; 2318 } 2319 prev_rp = rp; 2320 } 2321 } 2322 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2323 return res; 2324 } 2325 2326 #ifdef CONFIG_SCSI_PROC_FS 2327 static Sg_fd * 2328 sg_get_nth_sfp(Sg_device * sdp, int nth) 2329 { 2330 Sg_fd *resp; 2331 unsigned long iflags; 2332 int k; 2333 2334 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2335 for (k = 0, resp = sdp->headfp; resp && (k < nth); 2336 ++k, resp = resp->nextfp) ; 2337 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2338 return resp; 2339 } 2340 #endif 2341 2342 static Sg_fd * 2343 sg_add_sfp(Sg_device * sdp, int dev) 2344 { 2345 Sg_fd *sfp; 2346 unsigned long iflags; 2347 int bufflen; 2348 2349 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2350 if (!sfp) 2351 return NULL; 2352 2353 init_waitqueue_head(&sfp->read_wait); 2354 rwlock_init(&sfp->rq_list_lock); 2355 2356 sfp->timeout = SG_DEFAULT_TIMEOUT; 2357 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; 2358 sfp->force_packid = SG_DEF_FORCE_PACK_ID; 2359 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? 2360 sdp->device->host->unchecked_isa_dma : 1; 2361 sfp->cmd_q = SG_DEF_COMMAND_Q; 2362 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2363 sfp->parentdp = sdp; 2364 write_lock_irqsave(&sg_dev_arr_lock, iflags); 2365 if (!sdp->headfp) 2366 sdp->headfp = sfp; 2367 else { /* add to tail of existing list */ 2368 Sg_fd *pfp = sdp->headfp; 2369 while (pfp->nextfp) 2370 pfp = pfp->nextfp; 2371 pfp->nextfp = sfp; 2372 } 2373 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2374 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2375 if (unlikely(sg_big_buff != def_reserved_size)) 2376 sg_big_buff = def_reserved_size; 2377 2378 bufflen = min_t(int, sg_big_buff, 2379 sdp->device->request_queue->max_sectors * 512); 2380 sg_build_reserve(sfp, bufflen); 2381 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2382 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2383 return sfp; 2384 } 2385 2386 static void 2387 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2388 { 2389 Sg_fd *fp; 2390 Sg_fd *prev_fp; 2391 2392 prev_fp = sdp->headfp; 2393 if (sfp == prev_fp) 2394 sdp->headfp = prev_fp->nextfp; 2395 else { 2396 while ((fp = prev_fp->nextfp)) { 2397 if (sfp == fp) { 2398 prev_fp->nextfp = fp->nextfp; 2399 break; 2400 } 2401 prev_fp = fp; 2402 } 2403 } 2404 if (sfp->reserve.bufflen > 0) { 2405 SCSI_LOG_TIMEOUT(6, 2406 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", 2407 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); 2408 sg_remove_scat(&sfp->reserve); 2409 } 2410 sfp->parentdp = NULL; 2411 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); 2412 kfree(sfp); 2413 } 2414 2415 /* Returns 0 in normal case, 1 when detached and sdp object removed */ 2416 static int 2417 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2418 { 2419 Sg_request *srp; 2420 Sg_request *tsrp; 2421 int dirty = 0; 2422 int res = 0; 2423 2424 for (srp = sfp->headrp; srp; srp = tsrp) { 2425 tsrp = srp->nextrp; 2426 if (sg_srp_done(srp, sfp)) 2427 sg_finish_rem_req(srp); 2428 else 2429 ++dirty; 2430 } 2431 if (0 == dirty) { 2432 unsigned long iflags; 2433 2434 write_lock_irqsave(&sg_dev_arr_lock, iflags); 2435 __sg_remove_sfp(sdp, sfp); 2436 if (sdp->detached && (NULL == sdp->headfp)) { 2437 int k, maxd; 2438 2439 maxd = sg_dev_max; 2440 for (k = 0; k < maxd; ++k) { 2441 if (sdp == sg_dev_arr[k]) 2442 break; 2443 } 2444 if (k < maxd) 2445 sg_dev_arr[k] = NULL; 2446 kfree((char *) sdp); 2447 res = 1; 2448 } 2449 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2450 } else { 2451 /* MOD_INC's to inhibit unloading sg and associated adapter driver */ 2452 /* only bump the access_count if we actually succeeded in 2453 * throwing another counter on the host module */ 2454 scsi_device_get(sdp->device); /* XXX: retval ignored? */ 2455 sfp->closed = 1; /* flag dirty state on this fd */ 2456 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n", 2457 dirty)); 2458 } 2459 return res; 2460 } 2461 2462 static int 2463 sg_res_in_use(Sg_fd * sfp) 2464 { 2465 const Sg_request *srp; 2466 unsigned long iflags; 2467 2468 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2469 for (srp = sfp->headrp; srp; srp = srp->nextrp) 2470 if (srp->res_used) 2471 break; 2472 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2473 return srp ? 1 : 0; 2474 } 2475 2476 /* The size fetched (value output via retSzp) set when non-NULL return */ 2477 static struct page * 2478 sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2479 { 2480 struct page *resp = NULL; 2481 gfp_t page_mask; 2482 int order, a_size; 2483 int resSz; 2484 2485 if ((rqSz <= 0) || (NULL == retSzp)) 2486 return resp; 2487 2488 if (lowDma) 2489 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN; 2490 else 2491 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; 2492 2493 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; 2494 order++, a_size <<= 1) ; 2495 resSz = a_size; /* rounded up if necessary */ 2496 resp = alloc_pages(page_mask, order); 2497 while ((!resp) && order) { 2498 --order; 2499 a_size >>= 1; /* divide by 2, until PAGE_SIZE */ 2500 resp = alloc_pages(page_mask, order); /* try half */ 2501 resSz = a_size; 2502 } 2503 if (resp) { 2504 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2505 memset(page_address(resp), 0, resSz); 2506 *retSzp = resSz; 2507 } 2508 return resp; 2509 } 2510 2511 static void 2512 sg_page_free(struct page *page, int size) 2513 { 2514 int order, a_size; 2515 2516 if (!page) 2517 return; 2518 for (order = 0, a_size = PAGE_SIZE; a_size < size; 2519 order++, a_size <<= 1) ; 2520 __free_pages(page, order); 2521 } 2522 2523 #ifndef MAINTENANCE_IN_CMD 2524 #define MAINTENANCE_IN_CMD 0xa3 2525 #endif 2526 2527 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE, 2528 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12, 2529 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS, 2530 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD 2531 }; 2532 2533 static int 2534 sg_allow_access(unsigned char opcode, char dev_type) 2535 { 2536 int k; 2537 2538 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */ 2539 return 1; 2540 for (k = 0; k < sizeof (allow_ops); ++k) { 2541 if (opcode == allow_ops[k]) 2542 return 1; 2543 } 2544 return 0; 2545 } 2546 2547 #ifdef CONFIG_SCSI_PROC_FS 2548 static int 2549 sg_last_dev(void) 2550 { 2551 int k; 2552 unsigned long iflags; 2553 2554 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2555 for (k = sg_dev_max - 1; k >= 0; --k) 2556 if (sg_dev_arr[k] && sg_dev_arr[k]->device) 2557 break; 2558 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2559 return k + 1; /* origin 1 */ 2560 } 2561 #endif 2562 2563 static Sg_device * 2564 sg_get_dev(int dev) 2565 { 2566 Sg_device *sdp = NULL; 2567 unsigned long iflags; 2568 2569 if (sg_dev_arr && (dev >= 0)) { 2570 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2571 if (dev < sg_dev_max) 2572 sdp = sg_dev_arr[dev]; 2573 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2574 } 2575 return sdp; 2576 } 2577 2578 #ifdef CONFIG_SCSI_PROC_FS 2579 2580 static struct proc_dir_entry *sg_proc_sgp = NULL; 2581 2582 static char sg_proc_sg_dirname[] = "scsi/sg"; 2583 2584 static int sg_proc_seq_show_int(struct seq_file *s, void *v); 2585 2586 static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2587 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2588 size_t count, loff_t *off); 2589 static struct file_operations adio_fops = { 2590 /* .owner, .read and .llseek added in sg_proc_init() */ 2591 .open = sg_proc_single_open_adio, 2592 .write = sg_proc_write_adio, 2593 .release = single_release, 2594 }; 2595 2596 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2597 static ssize_t sg_proc_write_dressz(struct file *filp, 2598 const char __user *buffer, size_t count, loff_t *off); 2599 static struct file_operations dressz_fops = { 2600 .open = sg_proc_single_open_dressz, 2601 .write = sg_proc_write_dressz, 2602 .release = single_release, 2603 }; 2604 2605 static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2606 static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2607 static struct file_operations version_fops = { 2608 .open = sg_proc_single_open_version, 2609 .release = single_release, 2610 }; 2611 2612 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2613 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2614 static struct file_operations devhdr_fops = { 2615 .open = sg_proc_single_open_devhdr, 2616 .release = single_release, 2617 }; 2618 2619 static int sg_proc_seq_show_dev(struct seq_file *s, void *v); 2620 static int sg_proc_open_dev(struct inode *inode, struct file *file); 2621 static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2622 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2623 static void dev_seq_stop(struct seq_file *s, void *v); 2624 static struct file_operations dev_fops = { 2625 .open = sg_proc_open_dev, 2626 .release = seq_release, 2627 }; 2628 static struct seq_operations dev_seq_ops = { 2629 .start = dev_seq_start, 2630 .next = dev_seq_next, 2631 .stop = dev_seq_stop, 2632 .show = sg_proc_seq_show_dev, 2633 }; 2634 2635 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2636 static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2637 static struct file_operations devstrs_fops = { 2638 .open = sg_proc_open_devstrs, 2639 .release = seq_release, 2640 }; 2641 static struct seq_operations devstrs_seq_ops = { 2642 .start = dev_seq_start, 2643 .next = dev_seq_next, 2644 .stop = dev_seq_stop, 2645 .show = sg_proc_seq_show_devstrs, 2646 }; 2647 2648 static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2649 static int sg_proc_open_debug(struct inode *inode, struct file *file); 2650 static struct file_operations debug_fops = { 2651 .open = sg_proc_open_debug, 2652 .release = seq_release, 2653 }; 2654 static struct seq_operations debug_seq_ops = { 2655 .start = dev_seq_start, 2656 .next = dev_seq_next, 2657 .stop = dev_seq_stop, 2658 .show = sg_proc_seq_show_debug, 2659 }; 2660 2661 2662 struct sg_proc_leaf { 2663 const char * name; 2664 struct file_operations * fops; 2665 }; 2666 2667 static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2668 {"allow_dio", &adio_fops}, 2669 {"debug", &debug_fops}, 2670 {"def_reserved_size", &dressz_fops}, 2671 {"device_hdr", &devhdr_fops}, 2672 {"devices", &dev_fops}, 2673 {"device_strs", &devstrs_fops}, 2674 {"version", &version_fops} 2675 }; 2676 2677 static int 2678 sg_proc_init(void) 2679 { 2680 int k, mask; 2681 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); 2682 struct proc_dir_entry *pdep; 2683 struct sg_proc_leaf * leaf; 2684 2685 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); 2686 if (!sg_proc_sgp) 2687 return 1; 2688 for (k = 0; k < num_leaves; ++k) { 2689 leaf = &sg_proc_leaf_arr[k]; 2690 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2691 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp); 2692 if (pdep) { 2693 leaf->fops->owner = THIS_MODULE, 2694 leaf->fops->read = seq_read, 2695 leaf->fops->llseek = seq_lseek, 2696 pdep->proc_fops = leaf->fops; 2697 } 2698 } 2699 return 0; 2700 } 2701 2702 static void 2703 sg_proc_cleanup(void) 2704 { 2705 int k; 2706 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); 2707 2708 if (!sg_proc_sgp) 2709 return; 2710 for (k = 0; k < num_leaves; ++k) 2711 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); 2712 remove_proc_entry(sg_proc_sg_dirname, NULL); 2713 } 2714 2715 2716 static int sg_proc_seq_show_int(struct seq_file *s, void *v) 2717 { 2718 seq_printf(s, "%d\n", *((int *)s->private)); 2719 return 0; 2720 } 2721 2722 static int sg_proc_single_open_adio(struct inode *inode, struct file *file) 2723 { 2724 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); 2725 } 2726 2727 static ssize_t 2728 sg_proc_write_adio(struct file *filp, const char __user *buffer, 2729 size_t count, loff_t *off) 2730 { 2731 int num; 2732 char buff[11]; 2733 2734 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2735 return -EACCES; 2736 num = (count < 10) ? count : 10; 2737 if (copy_from_user(buff, buffer, num)) 2738 return -EFAULT; 2739 buff[num] = '\0'; 2740 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0; 2741 return count; 2742 } 2743 2744 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) 2745 { 2746 return single_open(file, sg_proc_seq_show_int, &sg_big_buff); 2747 } 2748 2749 static ssize_t 2750 sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2751 size_t count, loff_t *off) 2752 { 2753 int num; 2754 unsigned long k = ULONG_MAX; 2755 char buff[11]; 2756 2757 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2758 return -EACCES; 2759 num = (count < 10) ? count : 10; 2760 if (copy_from_user(buff, buffer, num)) 2761 return -EFAULT; 2762 buff[num] = '\0'; 2763 k = simple_strtoul(buff, NULL, 10); 2764 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2765 sg_big_buff = k; 2766 return count; 2767 } 2768 return -ERANGE; 2769 } 2770 2771 static int sg_proc_seq_show_version(struct seq_file *s, void *v) 2772 { 2773 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, 2774 sg_version_date); 2775 return 0; 2776 } 2777 2778 static int sg_proc_single_open_version(struct inode *inode, struct file *file) 2779 { 2780 return single_open(file, sg_proc_seq_show_version, NULL); 2781 } 2782 2783 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) 2784 { 2785 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" 2786 "online\n"); 2787 return 0; 2788 } 2789 2790 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) 2791 { 2792 return single_open(file, sg_proc_seq_show_devhdr, NULL); 2793 } 2794 2795 struct sg_proc_deviter { 2796 loff_t index; 2797 size_t max; 2798 }; 2799 2800 static void * dev_seq_start(struct seq_file *s, loff_t *pos) 2801 { 2802 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); 2803 2804 s->private = it; 2805 if (! it) 2806 return NULL; 2807 2808 if (NULL == sg_dev_arr) 2809 return NULL; 2810 it->index = *pos; 2811 it->max = sg_last_dev(); 2812 if (it->index >= it->max) 2813 return NULL; 2814 return it; 2815 } 2816 2817 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) 2818 { 2819 struct sg_proc_deviter * it = s->private; 2820 2821 *pos = ++it->index; 2822 return (it->index < it->max) ? it : NULL; 2823 } 2824 2825 static void dev_seq_stop(struct seq_file *s, void *v) 2826 { 2827 kfree(s->private); 2828 } 2829 2830 static int sg_proc_open_dev(struct inode *inode, struct file *file) 2831 { 2832 return seq_open(file, &dev_seq_ops); 2833 } 2834 2835 static int sg_proc_seq_show_dev(struct seq_file *s, void *v) 2836 { 2837 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2838 Sg_device *sdp; 2839 struct scsi_device *scsidp; 2840 2841 sdp = it ? sg_get_dev(it->index) : NULL; 2842 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2843 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 2844 scsidp->host->host_no, scsidp->channel, 2845 scsidp->id, scsidp->lun, (int) scsidp->type, 2846 1, 2847 (int) scsidp->queue_depth, 2848 (int) scsidp->device_busy, 2849 (int) scsi_device_online(scsidp)); 2850 else 2851 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); 2852 return 0; 2853 } 2854 2855 static int sg_proc_open_devstrs(struct inode *inode, struct file *file) 2856 { 2857 return seq_open(file, &devstrs_seq_ops); 2858 } 2859 2860 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) 2861 { 2862 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2863 Sg_device *sdp; 2864 struct scsi_device *scsidp; 2865 2866 sdp = it ? sg_get_dev(it->index) : NULL; 2867 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2868 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", 2869 scsidp->vendor, scsidp->model, scsidp->rev); 2870 else 2871 seq_printf(s, "<no active device>\n"); 2872 return 0; 2873 } 2874 2875 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2876 { 2877 int k, m, new_interface, blen, usg; 2878 Sg_request *srp; 2879 Sg_fd *fp; 2880 const sg_io_hdr_t *hp; 2881 const char * cp; 2882 unsigned int ms; 2883 2884 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { 2885 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " 2886 "(res)sgat=%d low_dma=%d\n", k + 1, 2887 jiffies_to_msecs(fp->timeout), 2888 fp->reserve.bufflen, 2889 (int) fp->reserve.k_use_sg, 2890 (int) fp->low_dma); 2891 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", 2892 (int) fp->cmd_q, (int) fp->force_packid, 2893 (int) fp->keep_orphan, (int) fp->closed); 2894 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { 2895 hp = &srp->header; 2896 new_interface = (hp->interface_id == '\0') ? 0 : 1; 2897 if (srp->res_used) { 2898 if (new_interface && 2899 (SG_FLAG_MMAP_IO & hp->flags)) 2900 cp = " mmap>> "; 2901 else 2902 cp = " rb>> "; 2903 } else { 2904 if (SG_INFO_DIRECT_IO_MASK & hp->info) 2905 cp = " dio>> "; 2906 else 2907 cp = " "; 2908 } 2909 seq_printf(s, cp); 2910 blen = srp->data.bufflen; 2911 usg = srp->data.k_use_sg; 2912 seq_printf(s, srp->done ? 2913 ((1 == srp->done) ? "rcv:" : "fin:") 2914 : "act:"); 2915 seq_printf(s, " id=%d blen=%d", 2916 srp->header.pack_id, blen); 2917 if (srp->done) 2918 seq_printf(s, " dur=%d", hp->duration); 2919 else { 2920 ms = jiffies_to_msecs(jiffies); 2921 seq_printf(s, " t_o/elap=%d/%d", 2922 (new_interface ? hp->timeout : 2923 jiffies_to_msecs(fp->timeout)), 2924 (ms > hp->duration ? ms - hp->duration : 0)); 2925 } 2926 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, 2927 (int) srp->data.cmd_opcode); 2928 } 2929 if (0 == m) 2930 seq_printf(s, " No requests active\n"); 2931 } 2932 } 2933 2934 static int sg_proc_open_debug(struct inode *inode, struct file *file) 2935 { 2936 return seq_open(file, &debug_seq_ops); 2937 } 2938 2939 static int sg_proc_seq_show_debug(struct seq_file *s, void *v) 2940 { 2941 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2942 Sg_device *sdp; 2943 2944 if (it && (0 == it->index)) { 2945 seq_printf(s, "dev_max(currently)=%d max_active_device=%d " 2946 "(origin 1)\n", sg_dev_max, (int)it->max); 2947 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); 2948 } 2949 sdp = it ? sg_get_dev(it->index) : NULL; 2950 if (sdp) { 2951 struct scsi_device *scsidp = sdp->device; 2952 2953 if (NULL == scsidp) { 2954 seq_printf(s, "device %d detached ??\n", 2955 (int)it->index); 2956 return 0; 2957 } 2958 2959 if (sg_get_nth_sfp(sdp, 0)) { 2960 seq_printf(s, " >>> device=%s ", 2961 sdp->disk->disk_name); 2962 if (sdp->detached) 2963 seq_printf(s, "detached pending close "); 2964 else 2965 seq_printf 2966 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2967 scsidp->host->host_no, 2968 scsidp->channel, scsidp->id, 2969 scsidp->lun, 2970 scsidp->host->hostt->emulated); 2971 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2972 sdp->sg_tablesize, sdp->exclude); 2973 } 2974 sg_proc_debug_helper(s, sdp); 2975 } 2976 return 0; 2977 } 2978 2979 #endif /* CONFIG_SCSI_PROC_FS */ 2980 2981 module_init(init_sg); 2982 module_exit(exit_sg); 2983