1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * Module Name: 27 * commctrl.c 28 * 29 * Abstract: Contains all routines for control of the AFA comm layer 30 * 31 */ 32 33 #include <linux/kernel.h> 34 #include <linux/init.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/completion.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/blkdev.h> 42 #include <linux/delay.h> /* ssleep prototype */ 43 #include <linux/kthread.h> 44 #include <linux/semaphore.h> 45 #include <linux/uaccess.h> 46 #include <scsi/scsi_host.h> 47 48 #include "aacraid.h" 49 50 /** 51 * ioctl_send_fib - send a FIB from userspace 52 * @dev: adapter is being processed 53 * @arg: arguments to the ioctl call 54 * 55 * This routine sends a fib to the adapter on behalf of a user level 56 * program. 57 */ 58 # define AAC_DEBUG_PREAMBLE KERN_INFO 59 # define AAC_DEBUG_POSTAMBLE 60 61 static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) 62 { 63 struct hw_fib * kfib; 64 struct fib *fibptr; 65 struct hw_fib * hw_fib = (struct hw_fib *)0; 66 dma_addr_t hw_fib_pa = (dma_addr_t)0LL; 67 unsigned int size, osize; 68 int retval; 69 70 if (dev->in_reset) { 71 return -EBUSY; 72 } 73 fibptr = aac_fib_alloc(dev); 74 if(fibptr == NULL) { 75 return -ENOMEM; 76 } 77 78 kfib = fibptr->hw_fib_va; 79 /* 80 * First copy in the header so that we can check the size field. 81 */ 82 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 83 aac_fib_free(fibptr); 84 return -EFAULT; 85 } 86 /* 87 * Since we copy based on the fib header size, make sure that we 88 * will not overrun the buffer when we copy the memory. Return 89 * an error if we would. 90 */ 91 osize = size = le16_to_cpu(kfib->header.Size) + 92 sizeof(struct aac_fibhdr); 93 if (size < le16_to_cpu(kfib->header.SenderSize)) 94 size = le16_to_cpu(kfib->header.SenderSize); 95 if (size > dev->max_fib_size) { 96 dma_addr_t daddr; 97 98 if (size > 2048) { 99 retval = -EINVAL; 100 goto cleanup; 101 } 102 103 kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr, 104 GFP_KERNEL); 105 if (!kfib) { 106 retval = -ENOMEM; 107 goto cleanup; 108 } 109 110 /* Highjack the hw_fib */ 111 hw_fib = fibptr->hw_fib_va; 112 hw_fib_pa = fibptr->hw_fib_pa; 113 fibptr->hw_fib_va = kfib; 114 fibptr->hw_fib_pa = daddr; 115 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 116 memcpy(kfib, hw_fib, dev->max_fib_size); 117 } 118 119 if (copy_from_user(kfib, arg, size)) { 120 retval = -EFAULT; 121 goto cleanup; 122 } 123 124 /* Sanity check the second copy */ 125 if ((osize != le16_to_cpu(kfib->header.Size) + 126 sizeof(struct aac_fibhdr)) 127 || (size < le16_to_cpu(kfib->header.SenderSize))) { 128 retval = -EINVAL; 129 goto cleanup; 130 } 131 132 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { 133 aac_adapter_interrupt(dev); 134 /* 135 * Since we didn't really send a fib, zero out the state to allow 136 * cleanup code not to assert. 137 */ 138 kfib->header.XferState = 0; 139 } else { 140 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, 141 le16_to_cpu(kfib->header.Size) , FsaNormal, 142 1, 1, NULL, NULL); 143 if (retval) { 144 goto cleanup; 145 } 146 if (aac_fib_complete(fibptr) != 0) { 147 retval = -EINVAL; 148 goto cleanup; 149 } 150 } 151 /* 152 * Make sure that the size returned by the adapter (which includes 153 * the header) is less than or equal to the size of a fib, so we 154 * don't corrupt application data. Then copy that size to the user 155 * buffer. (Don't try to add the header information again, since it 156 * was already included by the adapter.) 157 */ 158 159 retval = 0; 160 if (copy_to_user(arg, (void *)kfib, size)) 161 retval = -EFAULT; 162 cleanup: 163 if (hw_fib) { 164 dma_free_coherent(&dev->pdev->dev, size, kfib, 165 fibptr->hw_fib_pa); 166 fibptr->hw_fib_pa = hw_fib_pa; 167 fibptr->hw_fib_va = hw_fib; 168 } 169 if (retval != -ERESTARTSYS) 170 aac_fib_free(fibptr); 171 return retval; 172 } 173 174 /** 175 * open_getadapter_fib - Get the next fib 176 * 177 * This routine will get the next Fib, if available, from the AdapterFibContext 178 * passed in from the user. 179 */ 180 181 static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) 182 { 183 struct aac_fib_context * fibctx; 184 int status; 185 186 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); 187 if (fibctx == NULL) { 188 status = -ENOMEM; 189 } else { 190 unsigned long flags; 191 struct list_head * entry; 192 struct aac_fib_context * context; 193 194 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; 195 fibctx->size = sizeof(struct aac_fib_context); 196 /* 197 * Yes yes, I know this could be an index, but we have a 198 * better guarantee of uniqueness for the locked loop below. 199 * Without the aid of a persistent history, this also helps 200 * reduce the chance that the opaque context would be reused. 201 */ 202 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); 203 /* 204 * Initialize the mutex used to wait for the next AIF. 205 */ 206 sema_init(&fibctx->wait_sem, 0); 207 fibctx->wait = 0; 208 /* 209 * Initialize the fibs and set the count of fibs on 210 * the list to 0. 211 */ 212 fibctx->count = 0; 213 INIT_LIST_HEAD(&fibctx->fib_list); 214 fibctx->jiffies = jiffies/HZ; 215 /* 216 * Now add this context onto the adapter's 217 * AdapterFibContext list. 218 */ 219 spin_lock_irqsave(&dev->fib_lock, flags); 220 /* Ensure that we have a unique identifier */ 221 entry = dev->fib_list.next; 222 while (entry != &dev->fib_list) { 223 context = list_entry(entry, struct aac_fib_context, next); 224 if (context->unique == fibctx->unique) { 225 /* Not unique (32 bits) */ 226 fibctx->unique++; 227 entry = dev->fib_list.next; 228 } else { 229 entry = entry->next; 230 } 231 } 232 list_add_tail(&fibctx->next, &dev->fib_list); 233 spin_unlock_irqrestore(&dev->fib_lock, flags); 234 if (copy_to_user(arg, &fibctx->unique, 235 sizeof(fibctx->unique))) { 236 status = -EFAULT; 237 } else { 238 status = 0; 239 } 240 } 241 return status; 242 } 243 244 /** 245 * next_getadapter_fib - get the next fib 246 * @dev: adapter to use 247 * @arg: ioctl argument 248 * 249 * This routine will get the next Fib, if available, from the AdapterFibContext 250 * passed in from the user. 251 */ 252 253 static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) 254 { 255 struct fib_ioctl f; 256 struct fib *fib; 257 struct aac_fib_context *fibctx; 258 int status; 259 struct list_head * entry; 260 unsigned long flags; 261 262 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) 263 return -EFAULT; 264 /* 265 * Verify that the HANDLE passed in was a valid AdapterFibContext 266 * 267 * Search the list of AdapterFibContext addresses on the adapter 268 * to be sure this is a valid address 269 */ 270 spin_lock_irqsave(&dev->fib_lock, flags); 271 entry = dev->fib_list.next; 272 fibctx = NULL; 273 274 while (entry != &dev->fib_list) { 275 fibctx = list_entry(entry, struct aac_fib_context, next); 276 /* 277 * Extract the AdapterFibContext from the Input parameters. 278 */ 279 if (fibctx->unique == f.fibctx) { /* We found a winner */ 280 break; 281 } 282 entry = entry->next; 283 fibctx = NULL; 284 } 285 if (!fibctx) { 286 spin_unlock_irqrestore(&dev->fib_lock, flags); 287 dprintk ((KERN_INFO "Fib Context not found\n")); 288 return -EINVAL; 289 } 290 291 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 292 (fibctx->size != sizeof(struct aac_fib_context))) { 293 spin_unlock_irqrestore(&dev->fib_lock, flags); 294 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 295 return -EINVAL; 296 } 297 status = 0; 298 /* 299 * If there are no fibs to send back, then either wait or return 300 * -EAGAIN 301 */ 302 return_fib: 303 if (!list_empty(&fibctx->fib_list)) { 304 /* 305 * Pull the next fib from the fibs 306 */ 307 entry = fibctx->fib_list.next; 308 list_del(entry); 309 310 fib = list_entry(entry, struct fib, fiblink); 311 fibctx->count--; 312 spin_unlock_irqrestore(&dev->fib_lock, flags); 313 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { 314 kfree(fib->hw_fib_va); 315 kfree(fib); 316 return -EFAULT; 317 } 318 /* 319 * Free the space occupied by this copy of the fib. 320 */ 321 kfree(fib->hw_fib_va); 322 kfree(fib); 323 status = 0; 324 } else { 325 spin_unlock_irqrestore(&dev->fib_lock, flags); 326 /* If someone killed the AIF aacraid thread, restart it */ 327 status = !dev->aif_thread; 328 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { 329 /* Be paranoid, be very paranoid! */ 330 kthread_stop(dev->thread); 331 ssleep(1); 332 dev->aif_thread = 0; 333 dev->thread = kthread_run(aac_command_thread, dev, 334 "%s", dev->name); 335 ssleep(1); 336 } 337 if (f.wait) { 338 if(down_interruptible(&fibctx->wait_sem) < 0) { 339 status = -ERESTARTSYS; 340 } else { 341 /* Lock again and retry */ 342 spin_lock_irqsave(&dev->fib_lock, flags); 343 goto return_fib; 344 } 345 } else { 346 status = -EAGAIN; 347 } 348 } 349 fibctx->jiffies = jiffies/HZ; 350 return status; 351 } 352 353 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) 354 { 355 struct fib *fib; 356 357 /* 358 * First free any FIBs that have not been consumed. 359 */ 360 while (!list_empty(&fibctx->fib_list)) { 361 struct list_head * entry; 362 /* 363 * Pull the next fib from the fibs 364 */ 365 entry = fibctx->fib_list.next; 366 list_del(entry); 367 fib = list_entry(entry, struct fib, fiblink); 368 fibctx->count--; 369 /* 370 * Free the space occupied by this copy of the fib. 371 */ 372 kfree(fib->hw_fib_va); 373 kfree(fib); 374 } 375 /* 376 * Remove the Context from the AdapterFibContext List 377 */ 378 list_del(&fibctx->next); 379 /* 380 * Invalidate context 381 */ 382 fibctx->type = 0; 383 /* 384 * Free the space occupied by the Context 385 */ 386 kfree(fibctx); 387 return 0; 388 } 389 390 /** 391 * close_getadapter_fib - close down user fib context 392 * @dev: adapter 393 * @arg: ioctl arguments 394 * 395 * This routine will close down the fibctx passed in from the user. 396 */ 397 398 static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) 399 { 400 struct aac_fib_context *fibctx; 401 int status; 402 unsigned long flags; 403 struct list_head * entry; 404 405 /* 406 * Verify that the HANDLE passed in was a valid AdapterFibContext 407 * 408 * Search the list of AdapterFibContext addresses on the adapter 409 * to be sure this is a valid address 410 */ 411 412 entry = dev->fib_list.next; 413 fibctx = NULL; 414 415 while(entry != &dev->fib_list) { 416 fibctx = list_entry(entry, struct aac_fib_context, next); 417 /* 418 * Extract the fibctx from the input parameters 419 */ 420 if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ 421 break; 422 entry = entry->next; 423 fibctx = NULL; 424 } 425 426 if (!fibctx) 427 return 0; /* Already gone */ 428 429 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 430 (fibctx->size != sizeof(struct aac_fib_context))) 431 return -EINVAL; 432 spin_lock_irqsave(&dev->fib_lock, flags); 433 status = aac_close_fib_context(dev, fibctx); 434 spin_unlock_irqrestore(&dev->fib_lock, flags); 435 return status; 436 } 437 438 /** 439 * check_revision - close down user fib context 440 * @dev: adapter 441 * @arg: ioctl arguments 442 * 443 * This routine returns the driver version. 444 * Under Linux, there have been no version incompatibilities, so this is 445 * simple! 446 */ 447 448 static int check_revision(struct aac_dev *dev, void __user *arg) 449 { 450 struct revision response; 451 char *driver_version = aac_driver_version; 452 u32 version; 453 454 response.compat = 1; 455 version = (simple_strtol(driver_version, 456 &driver_version, 10) << 24) | 0x00000400; 457 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; 458 version += simple_strtol(driver_version + 1, NULL, 10); 459 response.version = cpu_to_le32(version); 460 # ifdef AAC_DRIVER_BUILD 461 response.build = cpu_to_le32(AAC_DRIVER_BUILD); 462 # else 463 response.build = cpu_to_le32(9999); 464 # endif 465 466 if (copy_to_user(arg, &response, sizeof(response))) 467 return -EFAULT; 468 return 0; 469 } 470 471 472 /** 473 * 474 * aac_send_raw_scb 475 * 476 */ 477 478 static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) 479 { 480 struct fib* srbfib; 481 int status; 482 struct aac_srb *srbcmd = NULL; 483 struct aac_hba_cmd_req *hbacmd = NULL; 484 struct user_aac_srb *user_srbcmd = NULL; 485 struct user_aac_srb __user *user_srb = arg; 486 struct aac_srb_reply __user *user_reply; 487 u32 chn; 488 u32 fibsize = 0; 489 u32 flags = 0; 490 s32 rcode = 0; 491 u32 data_dir; 492 void __user *sg_user[HBA_MAX_SG_EMBEDDED]; 493 void *sg_list[HBA_MAX_SG_EMBEDDED]; 494 u32 sg_count[HBA_MAX_SG_EMBEDDED]; 495 u32 sg_indx = 0; 496 u32 byte_count = 0; 497 u32 actual_fibsize64, actual_fibsize = 0; 498 int i; 499 int is_native_device; 500 u64 address; 501 502 503 if (dev->in_reset) { 504 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); 505 return -EBUSY; 506 } 507 if (!capable(CAP_SYS_ADMIN)){ 508 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); 509 return -EPERM; 510 } 511 /* 512 * Allocate and initialize a Fib then setup a SRB command 513 */ 514 if (!(srbfib = aac_fib_alloc(dev))) { 515 return -ENOMEM; 516 } 517 518 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ 519 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ 520 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); 521 rcode = -EFAULT; 522 goto cleanup; 523 } 524 525 if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || 526 (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { 527 rcode = -EINVAL; 528 goto cleanup; 529 } 530 531 user_srbcmd = kmalloc(fibsize, GFP_KERNEL); 532 if (!user_srbcmd) { 533 dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); 534 rcode = -ENOMEM; 535 goto cleanup; 536 } 537 if(copy_from_user(user_srbcmd, user_srb,fibsize)){ 538 dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); 539 rcode = -EFAULT; 540 goto cleanup; 541 } 542 543 flags = user_srbcmd->flags; /* from user in cpu order */ 544 switch (flags & (SRB_DataIn | SRB_DataOut)) { 545 case SRB_DataOut: 546 data_dir = DMA_TO_DEVICE; 547 break; 548 case (SRB_DataIn | SRB_DataOut): 549 data_dir = DMA_BIDIRECTIONAL; 550 break; 551 case SRB_DataIn: 552 data_dir = DMA_FROM_DEVICE; 553 break; 554 default: 555 data_dir = DMA_NONE; 556 } 557 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { 558 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", 559 user_srbcmd->sg.count)); 560 rcode = -EINVAL; 561 goto cleanup; 562 } 563 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { 564 dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n")); 565 rcode = -EINVAL; 566 goto cleanup; 567 } 568 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + 569 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); 570 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * 571 (sizeof(struct sgentry64) - sizeof(struct sgentry)); 572 /* User made a mistake - should not continue */ 573 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { 574 dprintk((KERN_DEBUG"aacraid: Bad Size specified in " 575 "Raw SRB command calculated fibsize=%lu;%lu " 576 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " 577 "issued fibsize=%d\n", 578 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, 579 sizeof(struct aac_srb), sizeof(struct sgentry), 580 sizeof(struct sgentry64), fibsize)); 581 rcode = -EINVAL; 582 goto cleanup; 583 } 584 585 chn = user_srbcmd->channel; 586 if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && 587 dev->hba_map[chn][user_srbcmd->id].devtype == 588 AAC_DEVTYPE_NATIVE_RAW) { 589 is_native_device = 1; 590 hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va; 591 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ 592 593 /* iu_type is a parameter of aac_hba_send */ 594 switch (data_dir) { 595 case DMA_TO_DEVICE: 596 hbacmd->byte1 = 2; 597 break; 598 case DMA_FROM_DEVICE: 599 case DMA_BIDIRECTIONAL: 600 hbacmd->byte1 = 1; 601 break; 602 case DMA_NONE: 603 default: 604 break; 605 } 606 hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun); 607 hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus; 608 609 /* 610 * we fill in reply_qid later in aac_src_deliver_message 611 * we fill in iu_type, request_id later in aac_hba_send 612 * we fill in emb_data_desc_count, data_length later 613 * in sg list build 614 */ 615 616 memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb)); 617 618 address = (u64)srbfib->hw_error_pa; 619 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); 620 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); 621 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); 622 hbacmd->emb_data_desc_count = 623 cpu_to_le32(user_srbcmd->sg.count); 624 srbfib->hbacmd_size = 64 + 625 user_srbcmd->sg.count * sizeof(struct aac_hba_sgl); 626 627 } else { 628 is_native_device = 0; 629 aac_fib_init(srbfib); 630 631 /* raw_srb FIB is not FastResponseCapable */ 632 srbfib->hw_fib_va->header.XferState &= 633 ~cpu_to_le32(FastResponseCapable); 634 635 srbcmd = (struct aac_srb *) fib_data(srbfib); 636 637 // Fix up srb for endian and force some values 638 639 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 640 srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 641 srbcmd->id = cpu_to_le32(user_srbcmd->id); 642 srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 643 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 644 srbcmd->flags = cpu_to_le32(flags); 645 srbcmd->retry_limit = 0; // Obsolete parameter 646 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 647 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); 648 } 649 650 byte_count = 0; 651 if (is_native_device) { 652 struct user_sgmap *usg32 = &user_srbcmd->sg; 653 struct user_sgmap64 *usg64 = 654 (struct user_sgmap64 *)&user_srbcmd->sg; 655 656 for (i = 0; i < usg32->count; i++) { 657 void *p; 658 u64 addr; 659 660 sg_count[i] = (actual_fibsize64 == fibsize) ? 661 usg64->sg[i].count : usg32->sg[i].count; 662 if (sg_count[i] > 663 (dev->scsi_host_ptr->max_sectors << 9)) { 664 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n", 665 i, sg_count[i], 666 dev->scsi_host_ptr->max_sectors << 9); 667 rcode = -EINVAL; 668 goto cleanup; 669 } 670 671 p = kmalloc(sg_count[i], GFP_KERNEL); 672 if (!p) { 673 rcode = -ENOMEM; 674 goto cleanup; 675 } 676 677 if (actual_fibsize64 == fibsize) { 678 addr = (u64)usg64->sg[i].addr[0]; 679 addr += ((u64)usg64->sg[i].addr[1]) << 32; 680 } else { 681 addr = (u64)usg32->sg[i].addr; 682 } 683 684 sg_user[i] = (void __user *)(uintptr_t)addr; 685 sg_list[i] = p; // save so we can clean up later 686 sg_indx = i; 687 688 if (flags & SRB_DataOut) { 689 if (copy_from_user(p, sg_user[i], 690 sg_count[i])) { 691 rcode = -EFAULT; 692 goto cleanup; 693 } 694 } 695 addr = pci_map_single(dev->pdev, p, sg_count[i], 696 data_dir); 697 hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32)); 698 hbacmd->sge[i].addr_lo = cpu_to_le32( 699 (u32)(addr & 0xffffffff)); 700 hbacmd->sge[i].len = cpu_to_le32(sg_count[i]); 701 hbacmd->sge[i].flags = 0; 702 byte_count += sg_count[i]; 703 } 704 705 if (usg32->count > 0) /* embedded sglist */ 706 hbacmd->sge[usg32->count-1].flags = 707 cpu_to_le32(0x40000000); 708 hbacmd->data_length = cpu_to_le32(byte_count); 709 710 status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib, 711 NULL, NULL); 712 713 } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { 714 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 715 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; 716 717 /* 718 * This should also catch if user used the 32 bit sgmap 719 */ 720 if (actual_fibsize64 == fibsize) { 721 actual_fibsize = actual_fibsize64; 722 for (i = 0; i < upsg->count; i++) { 723 u64 addr; 724 void* p; 725 726 sg_count[i] = upsg->sg[i].count; 727 if (sg_count[i] > 728 ((dev->adapter_info.options & 729 AAC_OPT_NEW_COMM) ? 730 (dev->scsi_host_ptr->max_sectors << 9) : 731 65536)) { 732 rcode = -EINVAL; 733 goto cleanup; 734 } 735 736 p = kmalloc(sg_count[i], GFP_KERNEL); 737 if(!p) { 738 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 739 sg_count[i], i, upsg->count)); 740 rcode = -ENOMEM; 741 goto cleanup; 742 } 743 addr = (u64)upsg->sg[i].addr[0]; 744 addr += ((u64)upsg->sg[i].addr[1]) << 32; 745 sg_user[i] = (void __user *)(uintptr_t)addr; 746 sg_list[i] = p; // save so we can clean up later 747 sg_indx = i; 748 749 if (flags & SRB_DataOut) { 750 if (copy_from_user(p, sg_user[i], 751 sg_count[i])){ 752 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 753 rcode = -EFAULT; 754 goto cleanup; 755 } 756 } 757 addr = pci_map_single(dev->pdev, p, 758 sg_count[i], data_dir); 759 760 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 761 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 762 byte_count += sg_count[i]; 763 psg->sg[i].count = cpu_to_le32(sg_count[i]); 764 } 765 } else { 766 struct user_sgmap* usg; 767 usg = kmemdup(upsg, 768 actual_fibsize - sizeof(struct aac_srb) 769 + sizeof(struct sgmap), GFP_KERNEL); 770 if (!usg) { 771 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); 772 rcode = -ENOMEM; 773 goto cleanup; 774 } 775 actual_fibsize = actual_fibsize64; 776 777 for (i = 0; i < usg->count; i++) { 778 u64 addr; 779 void* p; 780 781 sg_count[i] = usg->sg[i].count; 782 if (sg_count[i] > 783 ((dev->adapter_info.options & 784 AAC_OPT_NEW_COMM) ? 785 (dev->scsi_host_ptr->max_sectors << 9) : 786 65536)) { 787 kfree(usg); 788 rcode = -EINVAL; 789 goto cleanup; 790 } 791 792 p = kmalloc(sg_count[i], GFP_KERNEL); 793 if(!p) { 794 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 795 sg_count[i], i, usg->count)); 796 kfree(usg); 797 rcode = -ENOMEM; 798 goto cleanup; 799 } 800 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; 801 sg_list[i] = p; // save so we can clean up later 802 sg_indx = i; 803 804 if (flags & SRB_DataOut) { 805 if (copy_from_user(p, sg_user[i], 806 sg_count[i])) { 807 kfree (usg); 808 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 809 rcode = -EFAULT; 810 goto cleanup; 811 } 812 } 813 addr = pci_map_single(dev->pdev, p, 814 sg_count[i], data_dir); 815 816 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 817 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 818 byte_count += sg_count[i]; 819 psg->sg[i].count = cpu_to_le32(sg_count[i]); 820 } 821 kfree (usg); 822 } 823 srbcmd->count = cpu_to_le32(byte_count); 824 if (user_srbcmd->sg.count) 825 psg->count = cpu_to_le32(sg_indx+1); 826 else 827 psg->count = 0; 828 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 829 } else { 830 struct user_sgmap* upsg = &user_srbcmd->sg; 831 struct sgmap* psg = &srbcmd->sg; 832 833 if (actual_fibsize64 == fibsize) { 834 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; 835 for (i = 0; i < upsg->count; i++) { 836 uintptr_t addr; 837 void* p; 838 839 sg_count[i] = usg->sg[i].count; 840 if (sg_count[i] > 841 ((dev->adapter_info.options & 842 AAC_OPT_NEW_COMM) ? 843 (dev->scsi_host_ptr->max_sectors << 9) : 844 65536)) { 845 rcode = -EINVAL; 846 goto cleanup; 847 } 848 p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32); 849 if (!p) { 850 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 851 sg_count[i], i, usg->count)); 852 rcode = -ENOMEM; 853 goto cleanup; 854 } 855 addr = (u64)usg->sg[i].addr[0]; 856 addr += ((u64)usg->sg[i].addr[1]) << 32; 857 sg_user[i] = (void __user *)addr; 858 sg_list[i] = p; // save so we can clean up later 859 sg_indx = i; 860 861 if (flags & SRB_DataOut) { 862 if (copy_from_user(p, sg_user[i], 863 sg_count[i])){ 864 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 865 rcode = -EFAULT; 866 goto cleanup; 867 } 868 } 869 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 870 871 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); 872 byte_count += usg->sg[i].count; 873 psg->sg[i].count = cpu_to_le32(sg_count[i]); 874 } 875 } else { 876 for (i = 0; i < upsg->count; i++) { 877 dma_addr_t addr; 878 void* p; 879 880 sg_count[i] = upsg->sg[i].count; 881 if (sg_count[i] > 882 ((dev->adapter_info.options & 883 AAC_OPT_NEW_COMM) ? 884 (dev->scsi_host_ptr->max_sectors << 9) : 885 65536)) { 886 rcode = -EINVAL; 887 goto cleanup; 888 } 889 p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32); 890 if (!p) { 891 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 892 sg_count[i], i, upsg->count)); 893 rcode = -ENOMEM; 894 goto cleanup; 895 } 896 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; 897 sg_list[i] = p; // save so we can clean up later 898 sg_indx = i; 899 900 if (flags & SRB_DataOut) { 901 if (copy_from_user(p, sg_user[i], 902 sg_count[i])) { 903 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 904 rcode = -EFAULT; 905 goto cleanup; 906 } 907 } 908 addr = pci_map_single(dev->pdev, p, 909 sg_count[i], data_dir); 910 911 psg->sg[i].addr = cpu_to_le32(addr); 912 byte_count += sg_count[i]; 913 psg->sg[i].count = cpu_to_le32(sg_count[i]); 914 } 915 } 916 srbcmd->count = cpu_to_le32(byte_count); 917 if (user_srbcmd->sg.count) 918 psg->count = cpu_to_le32(sg_indx+1); 919 else 920 psg->count = 0; 921 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 922 } 923 924 if (status == -ERESTARTSYS) { 925 rcode = -ERESTARTSYS; 926 goto cleanup; 927 } 928 929 if (status != 0) { 930 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 931 rcode = -ENXIO; 932 goto cleanup; 933 } 934 935 if (flags & SRB_DataIn) { 936 for(i = 0 ; i <= sg_indx; i++){ 937 if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) { 938 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); 939 rcode = -EFAULT; 940 goto cleanup; 941 942 } 943 } 944 } 945 946 user_reply = arg + fibsize; 947 if (is_native_device) { 948 struct aac_hba_resp *err = 949 &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; 950 struct aac_srb_reply reply; 951 952 memset(&reply, 0, sizeof(reply)); 953 reply.status = ST_OK; 954 if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { 955 /* fast response */ 956 reply.srb_status = SRB_STATUS_SUCCESS; 957 reply.scsi_status = 0; 958 reply.data_xfer_length = byte_count; 959 reply.sense_data_size = 0; 960 memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE); 961 } else { 962 reply.srb_status = err->service_response; 963 reply.scsi_status = err->status; 964 reply.data_xfer_length = byte_count - 965 le32_to_cpu(err->residual_count); 966 reply.sense_data_size = err->sense_response_data_len; 967 memcpy(reply.sense_data, err->sense_response_buf, 968 AAC_SENSE_BUFFERSIZE); 969 } 970 if (copy_to_user(user_reply, &reply, 971 sizeof(struct aac_srb_reply))) { 972 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); 973 rcode = -EFAULT; 974 goto cleanup; 975 } 976 } else { 977 struct aac_srb_reply *reply; 978 979 reply = (struct aac_srb_reply *) fib_data(srbfib); 980 if (copy_to_user(user_reply, reply, 981 sizeof(struct aac_srb_reply))) { 982 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); 983 rcode = -EFAULT; 984 goto cleanup; 985 } 986 } 987 988 cleanup: 989 kfree(user_srbcmd); 990 if (rcode != -ERESTARTSYS) { 991 for (i = 0; i <= sg_indx; i++) 992 kfree(sg_list[i]); 993 aac_fib_complete(srbfib); 994 aac_fib_free(srbfib); 995 } 996 997 return rcode; 998 } 999 1000 struct aac_pci_info { 1001 u32 bus; 1002 u32 slot; 1003 }; 1004 1005 1006 static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) 1007 { 1008 struct aac_pci_info pci_info; 1009 1010 pci_info.bus = dev->pdev->bus->number; 1011 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 1012 1013 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 1014 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 1015 return -EFAULT; 1016 } 1017 return 0; 1018 } 1019 1020 static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) 1021 { 1022 struct aac_hba_info hbainfo; 1023 1024 memset(&hbainfo, 0, sizeof(hbainfo)); 1025 hbainfo.adapter_number = (u8) dev->id; 1026 hbainfo.system_io_bus_number = dev->pdev->bus->number; 1027 hbainfo.device_number = (dev->pdev->devfn >> 3); 1028 hbainfo.function_number = (dev->pdev->devfn & 0x0007); 1029 1030 hbainfo.vendor_id = dev->pdev->vendor; 1031 hbainfo.device_id = dev->pdev->device; 1032 hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor; 1033 hbainfo.sub_system_id = dev->pdev->subsystem_device; 1034 1035 if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { 1036 dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n")); 1037 return -EFAULT; 1038 } 1039 1040 return 0; 1041 } 1042 1043 struct aac_reset_iop { 1044 u8 reset_type; 1045 }; 1046 1047 static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) 1048 { 1049 struct aac_reset_iop reset; 1050 int retval; 1051 1052 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop))) 1053 return -EFAULT; 1054 1055 dev->adapter_shutdown = 1; 1056 1057 mutex_unlock(&dev->ioctl_mutex); 1058 retval = aac_reset_adapter(dev, 0, reset.reset_type); 1059 mutex_lock(&dev->ioctl_mutex); 1060 1061 return retval; 1062 } 1063 1064 int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) 1065 { 1066 int status; 1067 1068 mutex_lock(&dev->ioctl_mutex); 1069 1070 if (dev->adapter_shutdown) { 1071 status = -EACCES; 1072 goto cleanup; 1073 } 1074 1075 /* 1076 * HBA gets first crack 1077 */ 1078 1079 status = aac_dev_ioctl(dev, cmd, arg); 1080 if (status != -ENOTTY) 1081 goto cleanup; 1082 1083 switch (cmd) { 1084 case FSACTL_MINIPORT_REV_CHECK: 1085 status = check_revision(dev, arg); 1086 break; 1087 case FSACTL_SEND_LARGE_FIB: 1088 case FSACTL_SENDFIB: 1089 status = ioctl_send_fib(dev, arg); 1090 break; 1091 case FSACTL_OPEN_GET_ADAPTER_FIB: 1092 status = open_getadapter_fib(dev, arg); 1093 break; 1094 case FSACTL_GET_NEXT_ADAPTER_FIB: 1095 status = next_getadapter_fib(dev, arg); 1096 break; 1097 case FSACTL_CLOSE_GET_ADAPTER_FIB: 1098 status = close_getadapter_fib(dev, arg); 1099 break; 1100 case FSACTL_SEND_RAW_SRB: 1101 status = aac_send_raw_srb(dev,arg); 1102 break; 1103 case FSACTL_GET_PCI_INFO: 1104 status = aac_get_pci_info(dev,arg); 1105 break; 1106 case FSACTL_GET_HBA_INFO: 1107 status = aac_get_hba_info(dev, arg); 1108 break; 1109 case FSACTL_RESET_IOP: 1110 status = aac_send_reset_adapter(dev, arg); 1111 break; 1112 1113 default: 1114 status = -ENOTTY; 1115 break; 1116 } 1117 1118 cleanup: 1119 mutex_unlock(&dev->ioctl_mutex); 1120 1121 return status; 1122 } 1123 1124