1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commctrl.c 26 * 27 * Abstract: Contains all routines for control of the AFA comm layer 28 * 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/spinlock.h> 36 #include <linux/slab.h> 37 #include <linux/completion.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> /* ssleep prototype */ 41 #include <linux/kthread.h> 42 #include <linux/semaphore.h> 43 #include <asm/uaccess.h> 44 #include <scsi/scsi_host.h> 45 46 #include "aacraid.h" 47 48 /** 49 * ioctl_send_fib - send a FIB from userspace 50 * @dev: adapter is being processed 51 * @arg: arguments to the ioctl call 52 * 53 * This routine sends a fib to the adapter on behalf of a user level 54 * program. 55 */ 56 # define AAC_DEBUG_PREAMBLE KERN_INFO 57 # define AAC_DEBUG_POSTAMBLE 58 59 static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) 60 { 61 struct hw_fib * kfib; 62 struct fib *fibptr; 63 struct hw_fib * hw_fib = (struct hw_fib *)0; 64 dma_addr_t hw_fib_pa = (dma_addr_t)0LL; 65 unsigned size; 66 int retval; 67 68 if (dev->in_reset) { 69 return -EBUSY; 70 } 71 fibptr = aac_fib_alloc(dev); 72 if(fibptr == NULL) { 73 return -ENOMEM; 74 } 75 76 kfib = fibptr->hw_fib_va; 77 /* 78 * First copy in the header so that we can check the size field. 79 */ 80 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 81 aac_fib_free(fibptr); 82 return -EFAULT; 83 } 84 /* 85 * Since we copy based on the fib header size, make sure that we 86 * will not overrun the buffer when we copy the memory. Return 87 * an error if we would. 88 */ 89 size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); 90 if (size < le16_to_cpu(kfib->header.SenderSize)) 91 size = le16_to_cpu(kfib->header.SenderSize); 92 if (size > dev->max_fib_size) { 93 dma_addr_t daddr; 94 95 if (size > 2048) { 96 retval = -EINVAL; 97 goto cleanup; 98 } 99 100 kfib = pci_alloc_consistent(dev->pdev, size, &daddr); 101 if (!kfib) { 102 retval = -ENOMEM; 103 goto cleanup; 104 } 105 106 /* Highjack the hw_fib */ 107 hw_fib = fibptr->hw_fib_va; 108 hw_fib_pa = fibptr->hw_fib_pa; 109 fibptr->hw_fib_va = kfib; 110 fibptr->hw_fib_pa = daddr; 111 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 112 memcpy(kfib, hw_fib, dev->max_fib_size); 113 } 114 115 if (copy_from_user(kfib, arg, size)) { 116 retval = -EFAULT; 117 goto cleanup; 118 } 119 120 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { 121 aac_adapter_interrupt(dev); 122 /* 123 * Since we didn't really send a fib, zero out the state to allow 124 * cleanup code not to assert. 125 */ 126 kfib->header.XferState = 0; 127 } else { 128 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, 129 le16_to_cpu(kfib->header.Size) , FsaNormal, 130 1, 1, NULL, NULL); 131 if (retval) { 132 goto cleanup; 133 } 134 if (aac_fib_complete(fibptr) != 0) { 135 retval = -EINVAL; 136 goto cleanup; 137 } 138 } 139 /* 140 * Make sure that the size returned by the adapter (which includes 141 * the header) is less than or equal to the size of a fib, so we 142 * don't corrupt application data. Then copy that size to the user 143 * buffer. (Don't try to add the header information again, since it 144 * was already included by the adapter.) 145 */ 146 147 retval = 0; 148 if (copy_to_user(arg, (void *)kfib, size)) 149 retval = -EFAULT; 150 cleanup: 151 if (hw_fib) { 152 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); 153 fibptr->hw_fib_pa = hw_fib_pa; 154 fibptr->hw_fib_va = hw_fib; 155 } 156 if (retval != -ERESTARTSYS) 157 aac_fib_free(fibptr); 158 return retval; 159 } 160 161 /** 162 * open_getadapter_fib - Get the next fib 163 * 164 * This routine will get the next Fib, if available, from the AdapterFibContext 165 * passed in from the user. 166 */ 167 168 static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) 169 { 170 struct aac_fib_context * fibctx; 171 int status; 172 173 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); 174 if (fibctx == NULL) { 175 status = -ENOMEM; 176 } else { 177 unsigned long flags; 178 struct list_head * entry; 179 struct aac_fib_context * context; 180 181 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; 182 fibctx->size = sizeof(struct aac_fib_context); 183 /* 184 * Yes yes, I know this could be an index, but we have a 185 * better guarantee of uniqueness for the locked loop below. 186 * Without the aid of a persistent history, this also helps 187 * reduce the chance that the opaque context would be reused. 188 */ 189 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); 190 /* 191 * Initialize the mutex used to wait for the next AIF. 192 */ 193 sema_init(&fibctx->wait_sem, 0); 194 fibctx->wait = 0; 195 /* 196 * Initialize the fibs and set the count of fibs on 197 * the list to 0. 198 */ 199 fibctx->count = 0; 200 INIT_LIST_HEAD(&fibctx->fib_list); 201 fibctx->jiffies = jiffies/HZ; 202 /* 203 * Now add this context onto the adapter's 204 * AdapterFibContext list. 205 */ 206 spin_lock_irqsave(&dev->fib_lock, flags); 207 /* Ensure that we have a unique identifier */ 208 entry = dev->fib_list.next; 209 while (entry != &dev->fib_list) { 210 context = list_entry(entry, struct aac_fib_context, next); 211 if (context->unique == fibctx->unique) { 212 /* Not unique (32 bits) */ 213 fibctx->unique++; 214 entry = dev->fib_list.next; 215 } else { 216 entry = entry->next; 217 } 218 } 219 list_add_tail(&fibctx->next, &dev->fib_list); 220 spin_unlock_irqrestore(&dev->fib_lock, flags); 221 if (copy_to_user(arg, &fibctx->unique, 222 sizeof(fibctx->unique))) { 223 status = -EFAULT; 224 } else { 225 status = 0; 226 } 227 } 228 return status; 229 } 230 231 /** 232 * next_getadapter_fib - get the next fib 233 * @dev: adapter to use 234 * @arg: ioctl argument 235 * 236 * This routine will get the next Fib, if available, from the AdapterFibContext 237 * passed in from the user. 238 */ 239 240 static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) 241 { 242 struct fib_ioctl f; 243 struct fib *fib; 244 struct aac_fib_context *fibctx; 245 int status; 246 struct list_head * entry; 247 unsigned long flags; 248 249 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) 250 return -EFAULT; 251 /* 252 * Verify that the HANDLE passed in was a valid AdapterFibContext 253 * 254 * Search the list of AdapterFibContext addresses on the adapter 255 * to be sure this is a valid address 256 */ 257 spin_lock_irqsave(&dev->fib_lock, flags); 258 entry = dev->fib_list.next; 259 fibctx = NULL; 260 261 while (entry != &dev->fib_list) { 262 fibctx = list_entry(entry, struct aac_fib_context, next); 263 /* 264 * Extract the AdapterFibContext from the Input parameters. 265 */ 266 if (fibctx->unique == f.fibctx) { /* We found a winner */ 267 break; 268 } 269 entry = entry->next; 270 fibctx = NULL; 271 } 272 if (!fibctx) { 273 spin_unlock_irqrestore(&dev->fib_lock, flags); 274 dprintk ((KERN_INFO "Fib Context not found\n")); 275 return -EINVAL; 276 } 277 278 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 279 (fibctx->size != sizeof(struct aac_fib_context))) { 280 spin_unlock_irqrestore(&dev->fib_lock, flags); 281 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 282 return -EINVAL; 283 } 284 status = 0; 285 /* 286 * If there are no fibs to send back, then either wait or return 287 * -EAGAIN 288 */ 289 return_fib: 290 if (!list_empty(&fibctx->fib_list)) { 291 /* 292 * Pull the next fib from the fibs 293 */ 294 entry = fibctx->fib_list.next; 295 list_del(entry); 296 297 fib = list_entry(entry, struct fib, fiblink); 298 fibctx->count--; 299 spin_unlock_irqrestore(&dev->fib_lock, flags); 300 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { 301 kfree(fib->hw_fib_va); 302 kfree(fib); 303 return -EFAULT; 304 } 305 /* 306 * Free the space occupied by this copy of the fib. 307 */ 308 kfree(fib->hw_fib_va); 309 kfree(fib); 310 status = 0; 311 } else { 312 spin_unlock_irqrestore(&dev->fib_lock, flags); 313 /* If someone killed the AIF aacraid thread, restart it */ 314 status = !dev->aif_thread; 315 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { 316 /* Be paranoid, be very paranoid! */ 317 kthread_stop(dev->thread); 318 ssleep(1); 319 dev->aif_thread = 0; 320 dev->thread = kthread_run(aac_command_thread, dev, dev->name); 321 ssleep(1); 322 } 323 if (f.wait) { 324 if(down_interruptible(&fibctx->wait_sem) < 0) { 325 status = -ERESTARTSYS; 326 } else { 327 /* Lock again and retry */ 328 spin_lock_irqsave(&dev->fib_lock, flags); 329 goto return_fib; 330 } 331 } else { 332 status = -EAGAIN; 333 } 334 } 335 fibctx->jiffies = jiffies/HZ; 336 return status; 337 } 338 339 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) 340 { 341 struct fib *fib; 342 343 /* 344 * First free any FIBs that have not been consumed. 345 */ 346 while (!list_empty(&fibctx->fib_list)) { 347 struct list_head * entry; 348 /* 349 * Pull the next fib from the fibs 350 */ 351 entry = fibctx->fib_list.next; 352 list_del(entry); 353 fib = list_entry(entry, struct fib, fiblink); 354 fibctx->count--; 355 /* 356 * Free the space occupied by this copy of the fib. 357 */ 358 kfree(fib->hw_fib_va); 359 kfree(fib); 360 } 361 /* 362 * Remove the Context from the AdapterFibContext List 363 */ 364 list_del(&fibctx->next); 365 /* 366 * Invalidate context 367 */ 368 fibctx->type = 0; 369 /* 370 * Free the space occupied by the Context 371 */ 372 kfree(fibctx); 373 return 0; 374 } 375 376 /** 377 * close_getadapter_fib - close down user fib context 378 * @dev: adapter 379 * @arg: ioctl arguments 380 * 381 * This routine will close down the fibctx passed in from the user. 382 */ 383 384 static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) 385 { 386 struct aac_fib_context *fibctx; 387 int status; 388 unsigned long flags; 389 struct list_head * entry; 390 391 /* 392 * Verify that the HANDLE passed in was a valid AdapterFibContext 393 * 394 * Search the list of AdapterFibContext addresses on the adapter 395 * to be sure this is a valid address 396 */ 397 398 entry = dev->fib_list.next; 399 fibctx = NULL; 400 401 while(entry != &dev->fib_list) { 402 fibctx = list_entry(entry, struct aac_fib_context, next); 403 /* 404 * Extract the fibctx from the input parameters 405 */ 406 if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ 407 break; 408 entry = entry->next; 409 fibctx = NULL; 410 } 411 412 if (!fibctx) 413 return 0; /* Already gone */ 414 415 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 416 (fibctx->size != sizeof(struct aac_fib_context))) 417 return -EINVAL; 418 spin_lock_irqsave(&dev->fib_lock, flags); 419 status = aac_close_fib_context(dev, fibctx); 420 spin_unlock_irqrestore(&dev->fib_lock, flags); 421 return status; 422 } 423 424 /** 425 * check_revision - close down user fib context 426 * @dev: adapter 427 * @arg: ioctl arguments 428 * 429 * This routine returns the driver version. 430 * Under Linux, there have been no version incompatibilities, so this is 431 * simple! 432 */ 433 434 static int check_revision(struct aac_dev *dev, void __user *arg) 435 { 436 struct revision response; 437 char *driver_version = aac_driver_version; 438 u32 version; 439 440 response.compat = 1; 441 version = (simple_strtol(driver_version, 442 &driver_version, 10) << 24) | 0x00000400; 443 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; 444 version += simple_strtol(driver_version + 1, NULL, 10); 445 response.version = cpu_to_le32(version); 446 # ifdef AAC_DRIVER_BUILD 447 response.build = cpu_to_le32(AAC_DRIVER_BUILD); 448 # else 449 response.build = cpu_to_le32(9999); 450 # endif 451 452 if (copy_to_user(arg, &response, sizeof(response))) 453 return -EFAULT; 454 return 0; 455 } 456 457 458 /** 459 * 460 * aac_send_raw_scb 461 * 462 */ 463 464 static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) 465 { 466 struct fib* srbfib; 467 int status; 468 struct aac_srb *srbcmd = NULL; 469 struct user_aac_srb *user_srbcmd = NULL; 470 struct user_aac_srb __user *user_srb = arg; 471 struct aac_srb_reply __user *user_reply; 472 struct aac_srb_reply* reply; 473 u32 fibsize = 0; 474 u32 flags = 0; 475 s32 rcode = 0; 476 u32 data_dir; 477 void __user *sg_user[32]; 478 void *sg_list[32]; 479 u32 sg_indx = 0; 480 u32 byte_count = 0; 481 u32 actual_fibsize64, actual_fibsize = 0; 482 int i; 483 484 485 if (dev->in_reset) { 486 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); 487 return -EBUSY; 488 } 489 if (!capable(CAP_SYS_ADMIN)){ 490 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); 491 return -EPERM; 492 } 493 /* 494 * Allocate and initialize a Fib then setup a SRB command 495 */ 496 if (!(srbfib = aac_fib_alloc(dev))) { 497 return -ENOMEM; 498 } 499 aac_fib_init(srbfib); 500 501 srbcmd = (struct aac_srb*) fib_data(srbfib); 502 503 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ 504 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ 505 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); 506 rcode = -EFAULT; 507 goto cleanup; 508 } 509 510 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { 511 rcode = -EINVAL; 512 goto cleanup; 513 } 514 515 user_srbcmd = kmalloc(fibsize, GFP_KERNEL); 516 if (!user_srbcmd) { 517 dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); 518 rcode = -ENOMEM; 519 goto cleanup; 520 } 521 if(copy_from_user(user_srbcmd, user_srb,fibsize)){ 522 dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); 523 rcode = -EFAULT; 524 goto cleanup; 525 } 526 527 user_reply = arg+fibsize; 528 529 flags = user_srbcmd->flags; /* from user in cpu order */ 530 // Fix up srb for endian and force some values 531 532 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 533 srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 534 srbcmd->id = cpu_to_le32(user_srbcmd->id); 535 srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 536 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 537 srbcmd->flags = cpu_to_le32(flags); 538 srbcmd->retry_limit = 0; // Obsolete parameter 539 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 540 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); 541 542 switch (flags & (SRB_DataIn | SRB_DataOut)) { 543 case SRB_DataOut: 544 data_dir = DMA_TO_DEVICE; 545 break; 546 case (SRB_DataIn | SRB_DataOut): 547 data_dir = DMA_BIDIRECTIONAL; 548 break; 549 case SRB_DataIn: 550 data_dir = DMA_FROM_DEVICE; 551 break; 552 default: 553 data_dir = DMA_NONE; 554 } 555 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { 556 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", 557 le32_to_cpu(srbcmd->sg.count))); 558 rcode = -EINVAL; 559 goto cleanup; 560 } 561 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + 562 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); 563 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * 564 (sizeof(struct sgentry64) - sizeof(struct sgentry)); 565 /* User made a mistake - should not continue */ 566 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { 567 dprintk((KERN_DEBUG"aacraid: Bad Size specified in " 568 "Raw SRB command calculated fibsize=%lu;%lu " 569 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " 570 "issued fibsize=%d\n", 571 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, 572 sizeof(struct aac_srb), sizeof(struct sgentry), 573 sizeof(struct sgentry64), fibsize)); 574 rcode = -EINVAL; 575 goto cleanup; 576 } 577 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { 578 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); 579 rcode = -EINVAL; 580 goto cleanup; 581 } 582 byte_count = 0; 583 if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { 584 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 585 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; 586 587 /* 588 * This should also catch if user used the 32 bit sgmap 589 */ 590 if (actual_fibsize64 == fibsize) { 591 actual_fibsize = actual_fibsize64; 592 for (i = 0; i < upsg->count; i++) { 593 u64 addr; 594 void* p; 595 if (upsg->sg[i].count > 596 ((dev->adapter_info.options & 597 AAC_OPT_NEW_COMM) ? 598 (dev->scsi_host_ptr->max_sectors << 9) : 599 65536)) { 600 rcode = -EINVAL; 601 goto cleanup; 602 } 603 /* Does this really need to be GFP_DMA? */ 604 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); 605 if(!p) { 606 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 607 upsg->sg[i].count,i,upsg->count)); 608 rcode = -ENOMEM; 609 goto cleanup; 610 } 611 addr = (u64)upsg->sg[i].addr[0]; 612 addr += ((u64)upsg->sg[i].addr[1]) << 32; 613 sg_user[i] = (void __user *)(uintptr_t)addr; 614 sg_list[i] = p; // save so we can clean up later 615 sg_indx = i; 616 617 if (flags & SRB_DataOut) { 618 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 619 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 620 rcode = -EFAULT; 621 goto cleanup; 622 } 623 } 624 addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); 625 626 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 627 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 628 byte_count += upsg->sg[i].count; 629 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 630 } 631 } else { 632 struct user_sgmap* usg; 633 usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) 634 + sizeof(struct sgmap), GFP_KERNEL); 635 if (!usg) { 636 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); 637 rcode = -ENOMEM; 638 goto cleanup; 639 } 640 memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) 641 + sizeof(struct sgmap)); 642 actual_fibsize = actual_fibsize64; 643 644 for (i = 0; i < usg->count; i++) { 645 u64 addr; 646 void* p; 647 if (usg->sg[i].count > 648 ((dev->adapter_info.options & 649 AAC_OPT_NEW_COMM) ? 650 (dev->scsi_host_ptr->max_sectors << 9) : 651 65536)) { 652 rcode = -EINVAL; 653 goto cleanup; 654 } 655 /* Does this really need to be GFP_DMA? */ 656 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 657 if(!p) { 658 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 659 usg->sg[i].count,i,usg->count)); 660 kfree(usg); 661 rcode = -ENOMEM; 662 goto cleanup; 663 } 664 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; 665 sg_list[i] = p; // save so we can clean up later 666 sg_indx = i; 667 668 if (flags & SRB_DataOut) { 669 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 670 kfree (usg); 671 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 672 rcode = -EFAULT; 673 goto cleanup; 674 } 675 } 676 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 677 678 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 679 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 680 byte_count += usg->sg[i].count; 681 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 682 } 683 kfree (usg); 684 } 685 srbcmd->count = cpu_to_le32(byte_count); 686 psg->count = cpu_to_le32(sg_indx+1); 687 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 688 } else { 689 struct user_sgmap* upsg = &user_srbcmd->sg; 690 struct sgmap* psg = &srbcmd->sg; 691 692 if (actual_fibsize64 == fibsize) { 693 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; 694 for (i = 0; i < upsg->count; i++) { 695 uintptr_t addr; 696 void* p; 697 if (usg->sg[i].count > 698 ((dev->adapter_info.options & 699 AAC_OPT_NEW_COMM) ? 700 (dev->scsi_host_ptr->max_sectors << 9) : 701 65536)) { 702 rcode = -EINVAL; 703 goto cleanup; 704 } 705 /* Does this really need to be GFP_DMA? */ 706 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 707 if(!p) { 708 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 709 usg->sg[i].count,i,usg->count)); 710 rcode = -ENOMEM; 711 goto cleanup; 712 } 713 addr = (u64)usg->sg[i].addr[0]; 714 addr += ((u64)usg->sg[i].addr[1]) << 32; 715 sg_user[i] = (void __user *)addr; 716 sg_list[i] = p; // save so we can clean up later 717 sg_indx = i; 718 719 if (flags & SRB_DataOut) { 720 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ 721 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 722 rcode = -EFAULT; 723 goto cleanup; 724 } 725 } 726 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 727 728 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); 729 byte_count += usg->sg[i].count; 730 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 731 } 732 } else { 733 for (i = 0; i < upsg->count; i++) { 734 dma_addr_t addr; 735 void* p; 736 if (upsg->sg[i].count > 737 ((dev->adapter_info.options & 738 AAC_OPT_NEW_COMM) ? 739 (dev->scsi_host_ptr->max_sectors << 9) : 740 65536)) { 741 rcode = -EINVAL; 742 goto cleanup; 743 } 744 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); 745 if (!p) { 746 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 747 upsg->sg[i].count, i, upsg->count)); 748 rcode = -ENOMEM; 749 goto cleanup; 750 } 751 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; 752 sg_list[i] = p; // save so we can clean up later 753 sg_indx = i; 754 755 if (flags & SRB_DataOut) { 756 if(copy_from_user(p, sg_user[i], 757 upsg->sg[i].count)) { 758 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 759 rcode = -EFAULT; 760 goto cleanup; 761 } 762 } 763 addr = pci_map_single(dev->pdev, p, 764 upsg->sg[i].count, data_dir); 765 766 psg->sg[i].addr = cpu_to_le32(addr); 767 byte_count += upsg->sg[i].count; 768 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 769 } 770 } 771 srbcmd->count = cpu_to_le32(byte_count); 772 psg->count = cpu_to_le32(sg_indx+1); 773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 774 } 775 if (status == -ERESTARTSYS) { 776 rcode = -ERESTARTSYS; 777 goto cleanup; 778 } 779 780 if (status != 0){ 781 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 782 rcode = -ENXIO; 783 goto cleanup; 784 } 785 786 if (flags & SRB_DataIn) { 787 for(i = 0 ; i <= sg_indx; i++){ 788 byte_count = le32_to_cpu( 789 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) 790 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count 791 : srbcmd->sg.sg[i].count); 792 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ 793 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); 794 rcode = -EFAULT; 795 goto cleanup; 796 797 } 798 } 799 } 800 801 reply = (struct aac_srb_reply *) fib_data(srbfib); 802 if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ 803 dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); 804 rcode = -EFAULT; 805 goto cleanup; 806 } 807 808 cleanup: 809 kfree(user_srbcmd); 810 for(i=0; i <= sg_indx; i++){ 811 kfree(sg_list[i]); 812 } 813 if (rcode != -ERESTARTSYS) { 814 aac_fib_complete(srbfib); 815 aac_fib_free(srbfib); 816 } 817 818 return rcode; 819 } 820 821 struct aac_pci_info { 822 u32 bus; 823 u32 slot; 824 }; 825 826 827 static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) 828 { 829 struct aac_pci_info pci_info; 830 831 pci_info.bus = dev->pdev->bus->number; 832 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 833 834 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 835 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 836 return -EFAULT; 837 } 838 return 0; 839 } 840 841 842 int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) 843 { 844 int status; 845 846 /* 847 * HBA gets first crack 848 */ 849 850 status = aac_dev_ioctl(dev, cmd, arg); 851 if (status != -ENOTTY) 852 return status; 853 854 switch (cmd) { 855 case FSACTL_MINIPORT_REV_CHECK: 856 status = check_revision(dev, arg); 857 break; 858 case FSACTL_SEND_LARGE_FIB: 859 case FSACTL_SENDFIB: 860 status = ioctl_send_fib(dev, arg); 861 break; 862 case FSACTL_OPEN_GET_ADAPTER_FIB: 863 status = open_getadapter_fib(dev, arg); 864 break; 865 case FSACTL_GET_NEXT_ADAPTER_FIB: 866 status = next_getadapter_fib(dev, arg); 867 break; 868 case FSACTL_CLOSE_GET_ADAPTER_FIB: 869 status = close_getadapter_fib(dev, arg); 870 break; 871 case FSACTL_SEND_RAW_SRB: 872 status = aac_send_raw_srb(dev,arg); 873 break; 874 case FSACTL_GET_PCI_INFO: 875 status = aac_get_pci_info(dev,arg); 876 break; 877 default: 878 status = -ENOTTY; 879 break; 880 } 881 return status; 882 } 883 884