1 /* 2 * 3 * Linux MegaRAID device driver 4 * 5 * Copyright (c) 2003-2004 LSI Logic Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * FILE : megaraid_mm.c 13 * Version : v2.20.2.7 (Jul 16 2006) 14 * 15 * Common management module 16 */ 17 #include <linux/sched.h> 18 #include "megaraid_mm.h" 19 20 21 // Entry points for char node driver 22 static int mraid_mm_open(struct inode *, struct file *); 23 static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long); 24 25 26 // routines to convert to and from the old the format 27 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); 28 static int kioc_to_mimd(uioc_t *, mimd_t __user *); 29 30 31 // Helper functions 32 static int handle_drvrcmd(void __user *, uint8_t, int *); 33 static int lld_ioctl(mraid_mmadp_t *, uioc_t *); 34 static void ioctl_done(uioc_t *); 35 static void lld_timedout(unsigned long); 36 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); 37 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); 38 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); 39 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); 40 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); 41 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); 42 static void mraid_mm_free_adp_resources(mraid_mmadp_t *); 43 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); 44 45 #ifdef CONFIG_COMPAT 46 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long); 47 #endif 48 49 MODULE_AUTHOR("LSI Logic Corporation"); 50 MODULE_DESCRIPTION("LSI Logic Management Module"); 51 MODULE_LICENSE("GPL"); 52 MODULE_VERSION(LSI_COMMON_MOD_VERSION); 53 54 static int dbglevel = CL_ANN; 55 module_param_named(dlevel, dbglevel, int, 0); 56 MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); 57 58 EXPORT_SYMBOL(mraid_mm_register_adp); 59 EXPORT_SYMBOL(mraid_mm_unregister_adp); 60 EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 61 62 static int majorno; 63 static uint32_t drvr_ver = 0x02200207; 64 65 static int adapters_count_g; 66 static struct list_head adapters_list_g; 67 68 static wait_queue_head_t wait_q; 69 70 static const struct file_operations lsi_fops = { 71 .open = mraid_mm_open, 72 .ioctl = mraid_mm_ioctl, 73 #ifdef CONFIG_COMPAT 74 .compat_ioctl = mraid_mm_compat_ioctl, 75 #endif 76 .owner = THIS_MODULE, 77 }; 78 79 /** 80 * mraid_mm_open - open routine for char node interface 81 * @inode : unused 82 * @filep : unused 83 * 84 * Allow ioctl operations by apps only if they have superuser privilege. 85 */ 86 static int 87 mraid_mm_open(struct inode *inode, struct file *filep) 88 { 89 /* 90 * Only allow superuser to access private ioctl interface 91 */ 92 if (!capable(CAP_SYS_ADMIN)) return (-EACCES); 93 94 return 0; 95 } 96 97 /** 98 * mraid_mm_ioctl - module entry-point for ioctls 99 * @inode : inode (ignored) 100 * @filep : file operations pointer (ignored) 101 * @cmd : ioctl command 102 * @arg : user ioctl packet 103 */ 104 static int 105 mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, 106 unsigned long arg) 107 { 108 uioc_t *kioc; 109 char signature[EXT_IOCTL_SIGN_SZ] = {0}; 110 int rval; 111 mraid_mmadp_t *adp; 112 uint8_t old_ioctl; 113 int drvrcmd_rval; 114 void __user *argp = (void __user *)arg; 115 116 /* 117 * Make sure only USCSICMD are issued through this interface. 118 * MIMD application would still fire different command. 119 */ 120 121 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { 122 return (-EINVAL); 123 } 124 125 /* 126 * Look for signature to see if this is the new or old ioctl format. 127 */ 128 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { 129 con_log(CL_ANN, (KERN_WARNING 130 "megaraid cmm: copy from usr addr failed\n")); 131 return (-EFAULT); 132 } 133 134 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) 135 old_ioctl = 0; 136 else 137 old_ioctl = 1; 138 139 /* 140 * At present, we don't support the new ioctl packet 141 */ 142 if (!old_ioctl ) 143 return (-EINVAL); 144 145 /* 146 * If it is a driver ioctl (as opposed to fw ioctls), then we can 147 * handle the command locally. rval > 0 means it is not a drvr cmd 148 */ 149 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); 150 151 if (rval < 0) 152 return rval; 153 else if (rval == 0) 154 return drvrcmd_rval; 155 156 rval = 0; 157 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { 158 return rval; 159 } 160 161 /* 162 * Check if adapter can accept ioctl. We may have marked it offline 163 * if any previous kioc had timedout on this controller. 164 */ 165 if (!adp->quiescent) { 166 con_log(CL_ANN, (KERN_WARNING 167 "megaraid cmm: controller cannot accept cmds due to " 168 "earlier errors\n" )); 169 return -EFAULT; 170 } 171 172 /* 173 * The following call will block till a kioc is available 174 */ 175 kioc = mraid_mm_alloc_kioc(adp); 176 177 /* 178 * User sent the old mimd_t ioctl packet. Convert it to uioc_t. 179 */ 180 if ((rval = mimd_to_kioc(argp, adp, kioc))) { 181 mraid_mm_dealloc_kioc(adp, kioc); 182 return rval; 183 } 184 185 kioc->done = ioctl_done; 186 187 /* 188 * Issue the IOCTL to the low level driver. After the IOCTL completes 189 * release the kioc if and only if it was _not_ timedout. If it was 190 * timedout, that means that resources are still with low level driver. 191 */ 192 if ((rval = lld_ioctl(adp, kioc))) { 193 194 if (!kioc->timedout) 195 mraid_mm_dealloc_kioc(adp, kioc); 196 197 return rval; 198 } 199 200 /* 201 * Convert the kioc back to user space 202 */ 203 rval = kioc_to_mimd(kioc, argp); 204 205 /* 206 * Return the kioc to free pool 207 */ 208 mraid_mm_dealloc_kioc(adp, kioc); 209 210 return rval; 211 } 212 213 214 /** 215 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet 216 * @umimd : User space mimd_t ioctl packet 217 * @rval : returned success/error status 218 * 219 * The function return value is a pointer to the located @adapter. 220 */ 221 static mraid_mmadp_t * 222 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) 223 { 224 mraid_mmadp_t *adapter; 225 mimd_t mimd; 226 uint32_t adapno; 227 int iterator; 228 229 230 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { 231 *rval = -EFAULT; 232 return NULL; 233 } 234 235 adapno = GETADAP(mimd.ui.fcs.adapno); 236 237 if (adapno >= adapters_count_g) { 238 *rval = -ENODEV; 239 return NULL; 240 } 241 242 adapter = NULL; 243 iterator = 0; 244 245 list_for_each_entry(adapter, &adapters_list_g, list) { 246 if (iterator++ == adapno) break; 247 } 248 249 if (!adapter) { 250 *rval = -ENODEV; 251 return NULL; 252 } 253 254 return adapter; 255 } 256 257 /** 258 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. 259 * @arg : packet sent by the user app 260 * @old_ioctl : mimd if 1; uioc otherwise 261 * @rval : pointer for command's returned value (not function status) 262 */ 263 static int 264 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) 265 { 266 mimd_t __user *umimd; 267 mimd_t kmimd; 268 uint8_t opcode; 269 uint8_t subopcode; 270 271 if (old_ioctl) 272 goto old_packet; 273 else 274 goto new_packet; 275 276 new_packet: 277 return (-ENOTSUPP); 278 279 old_packet: 280 *rval = 0; 281 umimd = arg; 282 283 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) 284 return (-EFAULT); 285 286 opcode = kmimd.ui.fcs.opcode; 287 subopcode = kmimd.ui.fcs.subopcode; 288 289 /* 290 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or 291 * GET_NUMADP, then we can handle. Otherwise we should return 1 to 292 * indicate that we cannot handle this. 293 */ 294 if (opcode != 0x82) 295 return 1; 296 297 switch (subopcode) { 298 299 case MEGAIOC_QDRVRVER: 300 301 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) 302 return (-EFAULT); 303 304 return 0; 305 306 case MEGAIOC_QNADAP: 307 308 *rval = adapters_count_g; 309 310 if (copy_to_user(kmimd.data, &adapters_count_g, 311 sizeof(uint32_t))) 312 return (-EFAULT); 313 314 return 0; 315 316 default: 317 /* cannot handle */ 318 return 1; 319 } 320 321 return 0; 322 } 323 324 325 /** 326 * mimd_to_kioc - Converter from old to new ioctl format 327 * @umimd : user space old MIMD IOCTL 328 * @adp : adapter softstate 329 * @kioc : kernel space new format IOCTL 330 * 331 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The 332 * new packet is in kernel space so that driver can perform operations on it 333 * freely. 334 */ 335 336 static int 337 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) 338 { 339 mbox64_t *mbox64; 340 mbox_t *mbox; 341 mraid_passthru_t *pthru32; 342 uint32_t adapno; 343 uint8_t opcode; 344 uint8_t subopcode; 345 mimd_t mimd; 346 347 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) 348 return (-EFAULT); 349 350 /* 351 * Applications are not allowed to send extd pthru 352 */ 353 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || 354 (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) 355 return (-EINVAL); 356 357 opcode = mimd.ui.fcs.opcode; 358 subopcode = mimd.ui.fcs.subopcode; 359 adapno = GETADAP(mimd.ui.fcs.adapno); 360 361 if (adapno >= adapters_count_g) 362 return (-ENODEV); 363 364 kioc->adapno = adapno; 365 kioc->mb_type = MBOX_LEGACY; 366 kioc->app_type = APPTYPE_MIMD; 367 368 switch (opcode) { 369 370 case 0x82: 371 372 if (subopcode == MEGAIOC_QADAPINFO) { 373 374 kioc->opcode = GET_ADAP_INFO; 375 kioc->data_dir = UIOC_RD; 376 kioc->xferlen = sizeof(mraid_hba_info_t); 377 378 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 379 return (-ENOMEM); 380 } 381 else { 382 con_log(CL_ANN, (KERN_WARNING 383 "megaraid cmm: Invalid subop\n")); 384 return (-EINVAL); 385 } 386 387 break; 388 389 case 0x81: 390 391 kioc->opcode = MBOX_CMD; 392 kioc->xferlen = mimd.ui.fcs.length; 393 kioc->user_data_len = kioc->xferlen; 394 kioc->user_data = mimd.ui.fcs.buffer; 395 396 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 397 return (-ENOMEM); 398 399 if (mimd.outlen) kioc->data_dir = UIOC_RD; 400 if (mimd.inlen) kioc->data_dir |= UIOC_WR; 401 402 break; 403 404 case 0x80: 405 406 kioc->opcode = MBOX_CMD; 407 kioc->xferlen = (mimd.outlen > mimd.inlen) ? 408 mimd.outlen : mimd.inlen; 409 kioc->user_data_len = kioc->xferlen; 410 kioc->user_data = mimd.data; 411 412 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 413 return (-ENOMEM); 414 415 if (mimd.outlen) kioc->data_dir = UIOC_RD; 416 if (mimd.inlen) kioc->data_dir |= UIOC_WR; 417 418 break; 419 420 default: 421 return (-EINVAL); 422 } 423 424 /* 425 * If driver command, nothing else to do 426 */ 427 if (opcode == 0x82) 428 return 0; 429 430 /* 431 * This is a mailbox cmd; copy the mailbox from mimd 432 */ 433 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); 434 mbox = &mbox64->mbox32; 435 memcpy(mbox, mimd.mbox, 14); 436 437 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD 438 439 mbox->xferaddr = (uint32_t)kioc->buf_paddr; 440 441 if (kioc->data_dir & UIOC_WR) { 442 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 443 kioc->xferlen)) { 444 return (-EFAULT); 445 } 446 } 447 448 return 0; 449 } 450 451 /* 452 * This is a regular 32-bit pthru cmd; mbox points to pthru struct. 453 * Just like in above case, the beginning for memblk is treated as 454 * a mailbox. The passthru will begin at next 1K boundary. And the 455 * data will start 1K after that. 456 */ 457 pthru32 = kioc->pthru32; 458 kioc->user_pthru = &umimd->pthru; 459 mbox->xferaddr = (uint32_t)kioc->pthru32_h; 460 461 if (copy_from_user(pthru32, kioc->user_pthru, 462 sizeof(mraid_passthru_t))) { 463 return (-EFAULT); 464 } 465 466 pthru32->dataxferaddr = kioc->buf_paddr; 467 if (kioc->data_dir & UIOC_WR) { 468 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 469 pthru32->dataxferlen)) { 470 return (-EFAULT); 471 } 472 } 473 474 return 0; 475 } 476 477 /** 478 * mraid_mm_attch_buf - Attach a free dma buffer for required size 479 * @adp : Adapter softstate 480 * @kioc : kioc that the buffer needs to be attached to 481 * @xferlen : required length for buffer 482 * 483 * First we search for a pool with smallest buffer that is >= @xferlen. If 484 * that pool has no free buffer, we will try for the next bigger size. If none 485 * is available, we will try to allocate the smallest buffer that is >= 486 * @xferlen and attach it the pool. 487 */ 488 static int 489 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) 490 { 491 mm_dmapool_t *pool; 492 int right_pool = -1; 493 unsigned long flags; 494 int i; 495 496 kioc->pool_index = -1; 497 kioc->buf_vaddr = NULL; 498 kioc->buf_paddr = 0; 499 kioc->free_buf = 0; 500 501 /* 502 * We need xferlen amount of memory. See if we can get it from our 503 * dma pools. If we don't get exact size, we will try bigger buffer 504 */ 505 506 for (i = 0; i < MAX_DMA_POOLS; i++) { 507 508 pool = &adp->dma_pool_list[i]; 509 510 if (xferlen > pool->buf_size) 511 continue; 512 513 if (right_pool == -1) 514 right_pool = i; 515 516 spin_lock_irqsave(&pool->lock, flags); 517 518 if (!pool->in_use) { 519 520 pool->in_use = 1; 521 kioc->pool_index = i; 522 kioc->buf_vaddr = pool->vaddr; 523 kioc->buf_paddr = pool->paddr; 524 525 spin_unlock_irqrestore(&pool->lock, flags); 526 return 0; 527 } 528 else { 529 spin_unlock_irqrestore(&pool->lock, flags); 530 continue; 531 } 532 } 533 534 /* 535 * If xferlen doesn't match any of our pools, return error 536 */ 537 if (right_pool == -1) 538 return -EINVAL; 539 540 /* 541 * We did not get any buffer from the preallocated pool. Let us try 542 * to allocate one new buffer. NOTE: This is a blocking call. 543 */ 544 pool = &adp->dma_pool_list[right_pool]; 545 546 spin_lock_irqsave(&pool->lock, flags); 547 548 kioc->pool_index = right_pool; 549 kioc->free_buf = 1; 550 kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, 551 &kioc->buf_paddr); 552 spin_unlock_irqrestore(&pool->lock, flags); 553 554 if (!kioc->buf_vaddr) 555 return -ENOMEM; 556 557 return 0; 558 } 559 560 /** 561 * mraid_mm_alloc_kioc - Returns a uioc_t from free list 562 * @adp : Adapter softstate for this module 563 * 564 * The kioc_semaphore is initialized with number of kioc nodes in the 565 * free kioc pool. If the kioc pool is empty, this function blocks till 566 * a kioc becomes free. 567 */ 568 static uioc_t * 569 mraid_mm_alloc_kioc(mraid_mmadp_t *adp) 570 { 571 uioc_t *kioc; 572 struct list_head* head; 573 unsigned long flags; 574 575 down(&adp->kioc_semaphore); 576 577 spin_lock_irqsave(&adp->kioc_pool_lock, flags); 578 579 head = &adp->kioc_pool; 580 581 if (list_empty(head)) { 582 up(&adp->kioc_semaphore); 583 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 584 585 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); 586 return NULL; 587 } 588 589 kioc = list_entry(head->next, uioc_t, list); 590 list_del_init(&kioc->list); 591 592 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 593 594 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); 595 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); 596 597 kioc->buf_vaddr = NULL; 598 kioc->buf_paddr = 0; 599 kioc->pool_index =-1; 600 kioc->free_buf = 0; 601 kioc->user_data = NULL; 602 kioc->user_data_len = 0; 603 kioc->user_pthru = NULL; 604 kioc->timedout = 0; 605 606 return kioc; 607 } 608 609 /** 610 * mraid_mm_dealloc_kioc - Return kioc to free pool 611 * @adp : Adapter softstate 612 * @kioc : uioc_t node to be returned to free pool 613 */ 614 static void 615 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) 616 { 617 mm_dmapool_t *pool; 618 unsigned long flags; 619 620 if (kioc->pool_index != -1) { 621 pool = &adp->dma_pool_list[kioc->pool_index]; 622 623 /* This routine may be called in non-isr context also */ 624 spin_lock_irqsave(&pool->lock, flags); 625 626 /* 627 * While attaching the dma buffer, if we didn't get the 628 * required buffer from the pool, we would have allocated 629 * it at the run time and set the free_buf flag. We must 630 * free that buffer. Otherwise, just mark that the buffer is 631 * not in use 632 */ 633 if (kioc->free_buf == 1) 634 pci_pool_free(pool->handle, kioc->buf_vaddr, 635 kioc->buf_paddr); 636 else 637 pool->in_use = 0; 638 639 spin_unlock_irqrestore(&pool->lock, flags); 640 } 641 642 /* Return the kioc to the free pool */ 643 spin_lock_irqsave(&adp->kioc_pool_lock, flags); 644 list_add(&kioc->list, &adp->kioc_pool); 645 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 646 647 /* increment the free kioc count */ 648 up(&adp->kioc_semaphore); 649 650 return; 651 } 652 653 /** 654 * lld_ioctl - Routine to issue ioctl to low level drvr 655 * @adp : The adapter handle 656 * @kioc : The ioctl packet with kernel addresses 657 */ 658 static int 659 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) 660 { 661 int rval; 662 struct timer_list timer; 663 struct timer_list *tp = NULL; 664 665 kioc->status = -ENODATA; 666 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); 667 668 if (rval) return rval; 669 670 /* 671 * Start the timer 672 */ 673 if (adp->timeout > 0) { 674 tp = &timer; 675 init_timer(tp); 676 677 tp->function = lld_timedout; 678 tp->data = (unsigned long)kioc; 679 tp->expires = jiffies + adp->timeout * HZ; 680 681 add_timer(tp); 682 } 683 684 /* 685 * Wait till the low level driver completes the ioctl. After this 686 * call, the ioctl either completed successfully or timedout. 687 */ 688 wait_event(wait_q, (kioc->status != -ENODATA)); 689 if (tp) { 690 del_timer_sync(tp); 691 } 692 693 /* 694 * If the command had timedout, we mark the controller offline 695 * before returning 696 */ 697 if (kioc->timedout) { 698 adp->quiescent = 0; 699 } 700 701 return kioc->status; 702 } 703 704 705 /** 706 * ioctl_done - callback from the low level driver 707 * @kioc : completed ioctl packet 708 */ 709 static void 710 ioctl_done(uioc_t *kioc) 711 { 712 uint32_t adapno; 713 int iterator; 714 mraid_mmadp_t* adapter; 715 716 /* 717 * When the kioc returns from driver, make sure it still doesn't 718 * have ENODATA in status. Otherwise, driver will hang on wait_event 719 * forever 720 */ 721 if (kioc->status == -ENODATA) { 722 con_log(CL_ANN, (KERN_WARNING 723 "megaraid cmm: lld didn't change status!\n")); 724 725 kioc->status = -EINVAL; 726 } 727 728 /* 729 * Check if this kioc was timedout before. If so, nobody is waiting 730 * on this kioc. We don't have to wake up anybody. Instead, we just 731 * have to free the kioc 732 */ 733 if (kioc->timedout) { 734 iterator = 0; 735 adapter = NULL; 736 adapno = kioc->adapno; 737 738 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " 739 "ioctl that was timedout before\n")); 740 741 list_for_each_entry(adapter, &adapters_list_g, list) { 742 if (iterator++ == adapno) break; 743 } 744 745 kioc->timedout = 0; 746 747 if (adapter) { 748 mraid_mm_dealloc_kioc( adapter, kioc ); 749 } 750 } 751 else { 752 wake_up(&wait_q); 753 } 754 } 755 756 757 /** 758 * lld_timedout - callback from the expired timer 759 * @ptr : ioctl packet that timed out 760 */ 761 static void 762 lld_timedout(unsigned long ptr) 763 { 764 uioc_t *kioc = (uioc_t *)ptr; 765 766 kioc->status = -ETIME; 767 kioc->timedout = 1; 768 769 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); 770 771 wake_up(&wait_q); 772 } 773 774 775 /** 776 * kioc_to_mimd - Converter from new back to old format 777 * @kioc : Kernel space IOCTL packet (successfully issued) 778 * @mimd : User space MIMD packet 779 */ 780 static int 781 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) 782 { 783 mimd_t kmimd; 784 uint8_t opcode; 785 uint8_t subopcode; 786 787 mbox64_t *mbox64; 788 mraid_passthru_t __user *upthru32; 789 mraid_passthru_t *kpthru32; 790 mcontroller_t cinfo; 791 mraid_hba_info_t *hinfo; 792 793 794 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) 795 return (-EFAULT); 796 797 opcode = kmimd.ui.fcs.opcode; 798 subopcode = kmimd.ui.fcs.subopcode; 799 800 if (opcode == 0x82) { 801 switch (subopcode) { 802 803 case MEGAIOC_QADAPINFO: 804 805 hinfo = (mraid_hba_info_t *)(unsigned long) 806 kioc->buf_vaddr; 807 808 hinfo_to_cinfo(hinfo, &cinfo); 809 810 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) 811 return (-EFAULT); 812 813 return 0; 814 815 default: 816 return (-EINVAL); 817 } 818 819 return 0; 820 } 821 822 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 823 824 if (kioc->user_pthru) { 825 826 upthru32 = kioc->user_pthru; 827 kpthru32 = kioc->pthru32; 828 829 if (copy_to_user(&upthru32->scsistatus, 830 &kpthru32->scsistatus, 831 sizeof(uint8_t))) { 832 return (-EFAULT); 833 } 834 } 835 836 if (kioc->user_data) { 837 if (copy_to_user(kioc->user_data, kioc->buf_vaddr, 838 kioc->user_data_len)) { 839 return (-EFAULT); 840 } 841 } 842 843 if (copy_to_user(&mimd->mbox[17], 844 &mbox64->mbox32.status, sizeof(uint8_t))) { 845 return (-EFAULT); 846 } 847 848 return 0; 849 } 850 851 852 /** 853 * hinfo_to_cinfo - Convert new format hba info into old format 854 * @hinfo : New format, more comprehensive adapter info 855 * @cinfo : Old format adapter info to support mimd_t apps 856 */ 857 static void 858 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) 859 { 860 if (!hinfo || !cinfo) 861 return; 862 863 cinfo->base = hinfo->baseport; 864 cinfo->irq = hinfo->irq; 865 cinfo->numldrv = hinfo->num_ldrv; 866 cinfo->pcibus = hinfo->pci_bus; 867 cinfo->pcidev = hinfo->pci_slot; 868 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); 869 cinfo->pciid = hinfo->pci_device_id; 870 cinfo->pcivendor = hinfo->pci_vendor_id; 871 cinfo->pcislot = hinfo->pci_slot; 872 cinfo->uid = hinfo->unique_id; 873 } 874 875 876 /** 877 * mraid_mm_register_adp - Registration routine for low level drivers 878 * @lld_adp : Adapter objejct 879 */ 880 int 881 mraid_mm_register_adp(mraid_mmadp_t *lld_adp) 882 { 883 mraid_mmadp_t *adapter; 884 mbox64_t *mbox_list; 885 uioc_t *kioc; 886 uint32_t rval; 887 int i; 888 889 890 if (lld_adp->drvr_type != DRVRTYPE_MBOX) 891 return (-EINVAL); 892 893 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); 894 895 if (!adapter) 896 return -ENOMEM; 897 898 899 adapter->unique_id = lld_adp->unique_id; 900 adapter->drvr_type = lld_adp->drvr_type; 901 adapter->drvr_data = lld_adp->drvr_data; 902 adapter->pdev = lld_adp->pdev; 903 adapter->issue_uioc = lld_adp->issue_uioc; 904 adapter->timeout = lld_adp->timeout; 905 adapter->max_kioc = lld_adp->max_kioc; 906 adapter->quiescent = 1; 907 908 /* 909 * Allocate single blocks of memory for all required kiocs, 910 * mailboxes and passthru structures. 911 */ 912 adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc, 913 GFP_KERNEL); 914 adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc, 915 GFP_KERNEL); 916 adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool", 917 adapter->pdev, 918 sizeof(mraid_passthru_t), 919 16, 0); 920 921 if (!adapter->kioc_list || !adapter->mbox_list || 922 !adapter->pthru_dma_pool) { 923 924 con_log(CL_ANN, (KERN_WARNING 925 "megaraid cmm: out of memory, %s %d\n", __FUNCTION__, 926 __LINE__)); 927 928 rval = (-ENOMEM); 929 930 goto memalloc_error; 931 } 932 933 /* 934 * Slice kioc_list and make a kioc_pool with the individiual kiocs 935 */ 936 INIT_LIST_HEAD(&adapter->kioc_pool); 937 spin_lock_init(&adapter->kioc_pool_lock); 938 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); 939 940 mbox_list = (mbox64_t *)adapter->mbox_list; 941 942 for (i = 0; i < lld_adp->max_kioc; i++) { 943 944 kioc = adapter->kioc_list + i; 945 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); 946 kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool, 947 GFP_KERNEL, &kioc->pthru32_h); 948 949 if (!kioc->pthru32) { 950 951 con_log(CL_ANN, (KERN_WARNING 952 "megaraid cmm: out of memory, %s %d\n", 953 __FUNCTION__, __LINE__)); 954 955 rval = (-ENOMEM); 956 957 goto pthru_dma_pool_error; 958 } 959 960 list_add_tail(&kioc->list, &adapter->kioc_pool); 961 } 962 963 // Setup the dma pools for data buffers 964 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { 965 goto dma_pool_error; 966 } 967 968 list_add_tail(&adapter->list, &adapters_list_g); 969 970 adapters_count_g++; 971 972 return 0; 973 974 dma_pool_error: 975 /* Do nothing */ 976 977 pthru_dma_pool_error: 978 979 for (i = 0; i < lld_adp->max_kioc; i++) { 980 kioc = adapter->kioc_list + i; 981 if (kioc->pthru32) { 982 pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32, 983 kioc->pthru32_h); 984 } 985 } 986 987 memalloc_error: 988 989 kfree(adapter->kioc_list); 990 kfree(adapter->mbox_list); 991 992 if (adapter->pthru_dma_pool) 993 pci_pool_destroy(adapter->pthru_dma_pool); 994 995 kfree(adapter); 996 997 return rval; 998 } 999 1000 1001 /** 1002 * mraid_mm_adapter_app_handle - return the application handle for this adapter 1003 * @unique_id : adapter unique identifier 1004 * 1005 * For the given driver data, locate the adapter in our global list and 1006 * return the corresponding handle, which is also used by applications to 1007 * uniquely identify an adapter. 1008 * 1009 * Return adapter handle if found in the list. 1010 * Return 0 if adapter could not be located, should never happen though. 1011 */ 1012 uint32_t 1013 mraid_mm_adapter_app_handle(uint32_t unique_id) 1014 { 1015 mraid_mmadp_t *adapter; 1016 mraid_mmadp_t *tmp; 1017 int index = 0; 1018 1019 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { 1020 1021 if (adapter->unique_id == unique_id) { 1022 1023 return MKADAP(index); 1024 } 1025 1026 index++; 1027 } 1028 1029 return 0; 1030 } 1031 1032 1033 /** 1034 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter 1035 * @adp : Adapter softstate 1036 * 1037 * We maintain a pool of dma buffers per each adapter. Each pool has one 1038 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. 1039 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We 1040 * dont' want to waste too much memory by allocating more buffers per each 1041 * pool. 1042 */ 1043 static int 1044 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) 1045 { 1046 mm_dmapool_t *pool; 1047 int bufsize; 1048 int i; 1049 1050 /* 1051 * Create MAX_DMA_POOLS number of pools 1052 */ 1053 bufsize = MRAID_MM_INIT_BUFF_SIZE; 1054 1055 for (i = 0; i < MAX_DMA_POOLS; i++){ 1056 1057 pool = &adp->dma_pool_list[i]; 1058 1059 pool->buf_size = bufsize; 1060 spin_lock_init(&pool->lock); 1061 1062 pool->handle = pci_pool_create("megaraid mm data buffer", 1063 adp->pdev, bufsize, 16, 0); 1064 1065 if (!pool->handle) { 1066 goto dma_pool_setup_error; 1067 } 1068 1069 pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, 1070 &pool->paddr); 1071 1072 if (!pool->vaddr) 1073 goto dma_pool_setup_error; 1074 1075 bufsize = bufsize * 2; 1076 } 1077 1078 return 0; 1079 1080 dma_pool_setup_error: 1081 1082 mraid_mm_teardown_dma_pools(adp); 1083 return (-ENOMEM); 1084 } 1085 1086 1087 /** 1088 * mraid_mm_unregister_adp - Unregister routine for low level drivers 1089 * @unique_id : UID of the adpater 1090 * 1091 * Assumes no outstanding ioctls to llds. 1092 */ 1093 int 1094 mraid_mm_unregister_adp(uint32_t unique_id) 1095 { 1096 mraid_mmadp_t *adapter; 1097 mraid_mmadp_t *tmp; 1098 1099 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { 1100 1101 1102 if (adapter->unique_id == unique_id) { 1103 1104 adapters_count_g--; 1105 1106 list_del_init(&adapter->list); 1107 1108 mraid_mm_free_adp_resources(adapter); 1109 1110 kfree(adapter); 1111 1112 con_log(CL_ANN, ( 1113 "megaraid cmm: Unregistered one adapter:%#x\n", 1114 unique_id)); 1115 1116 return 0; 1117 } 1118 } 1119 1120 return (-ENODEV); 1121 } 1122 1123 /** 1124 * mraid_mm_free_adp_resources - Free adapter softstate 1125 * @adp : Adapter softstate 1126 */ 1127 static void 1128 mraid_mm_free_adp_resources(mraid_mmadp_t *adp) 1129 { 1130 uioc_t *kioc; 1131 int i; 1132 1133 mraid_mm_teardown_dma_pools(adp); 1134 1135 for (i = 0; i < adp->max_kioc; i++) { 1136 1137 kioc = adp->kioc_list + i; 1138 1139 pci_pool_free(adp->pthru_dma_pool, kioc->pthru32, 1140 kioc->pthru32_h); 1141 } 1142 1143 kfree(adp->kioc_list); 1144 kfree(adp->mbox_list); 1145 1146 pci_pool_destroy(adp->pthru_dma_pool); 1147 1148 1149 return; 1150 } 1151 1152 1153 /** 1154 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers 1155 * @adp : Adapter softstate 1156 */ 1157 static void 1158 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) 1159 { 1160 int i; 1161 mm_dmapool_t *pool; 1162 1163 for (i = 0; i < MAX_DMA_POOLS; i++) { 1164 1165 pool = &adp->dma_pool_list[i]; 1166 1167 if (pool->handle) { 1168 1169 if (pool->vaddr) 1170 pci_pool_free(pool->handle, pool->vaddr, 1171 pool->paddr); 1172 1173 pci_pool_destroy(pool->handle); 1174 pool->handle = NULL; 1175 } 1176 } 1177 1178 return; 1179 } 1180 1181 /** 1182 * mraid_mm_init - Module entry point 1183 */ 1184 static int __init 1185 mraid_mm_init(void) 1186 { 1187 // Announce the driver version 1188 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", 1189 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); 1190 1191 majorno = register_chrdev(0, "megadev", &lsi_fops); 1192 1193 if (majorno < 0) { 1194 con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); 1195 return majorno; 1196 } 1197 1198 init_waitqueue_head(&wait_q); 1199 1200 INIT_LIST_HEAD(&adapters_list_g); 1201 1202 return 0; 1203 } 1204 1205 1206 #ifdef CONFIG_COMPAT 1207 /** 1208 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine 1209 * @filep : file operations pointer (ignored) 1210 * @cmd : ioctl command 1211 * @arg : user ioctl packet 1212 */ 1213 static long 1214 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, 1215 unsigned long arg) 1216 { 1217 int err; 1218 1219 err = mraid_mm_ioctl(NULL, filep, cmd, arg); 1220 1221 return err; 1222 } 1223 #endif 1224 1225 /** 1226 * mraid_mm_exit - Module exit point 1227 */ 1228 static void __exit 1229 mraid_mm_exit(void) 1230 { 1231 con_log(CL_DLEVEL1 , ("exiting common mod\n")); 1232 1233 unregister_chrdev(majorno, "megadev"); 1234 } 1235 1236 module_init(mraid_mm_init); 1237 module_exit(mraid_mm_exit); 1238 1239 /* vi: set ts=8 sw=8 tw=78: */ 1240