1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2003-2004 LSI Logic Corporation. 7 * 8 * FILE : megaraid_mm.c 9 * Version : v2.20.2.7 (Jul 16 2006) 10 * 11 * Common management module 12 */ 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 #include <linux/mutex.h> 16 #include "megaraid_mm.h" 17 18 19 // Entry points for char node driver 20 static DEFINE_MUTEX(mraid_mm_mutex); 21 static int mraid_mm_open(struct inode *, struct file *); 22 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); 23 24 25 // routines to convert to and from the old the format 26 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); 27 static int kioc_to_mimd(uioc_t *, mimd_t __user *); 28 29 30 // Helper functions 31 static int handle_drvrcmd(void __user *, uint8_t, int *); 32 static int lld_ioctl(mraid_mmadp_t *, uioc_t *); 33 static void ioctl_done(uioc_t *); 34 static void lld_timedout(struct timer_list *); 35 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); 36 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); 37 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); 38 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); 39 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); 40 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); 41 static void mraid_mm_free_adp_resources(mraid_mmadp_t *); 42 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); 43 44 #ifdef CONFIG_COMPAT 45 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long); 46 #endif 47 48 MODULE_AUTHOR("LSI Logic Corporation"); 49 MODULE_DESCRIPTION("LSI Logic Management Module"); 50 MODULE_LICENSE("GPL"); 51 MODULE_VERSION(LSI_COMMON_MOD_VERSION); 52 53 static int dbglevel = CL_ANN; 54 module_param_named(dlevel, dbglevel, int, 0); 55 MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); 56 57 EXPORT_SYMBOL(mraid_mm_register_adp); 58 EXPORT_SYMBOL(mraid_mm_unregister_adp); 59 EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 60 61 static uint32_t drvr_ver = 0x02200207; 62 63 static int adapters_count_g; 64 static struct list_head adapters_list_g; 65 66 static wait_queue_head_t wait_q; 67 68 static const struct file_operations lsi_fops = { 69 .open = mraid_mm_open, 70 .unlocked_ioctl = mraid_mm_unlocked_ioctl, 71 #ifdef CONFIG_COMPAT 72 .compat_ioctl = mraid_mm_compat_ioctl, 73 #endif 74 .owner = THIS_MODULE, 75 .llseek = noop_llseek, 76 }; 77 78 static struct miscdevice megaraid_mm_dev = { 79 .minor = MISC_DYNAMIC_MINOR, 80 .name = "megadev0", 81 .fops = &lsi_fops, 82 }; 83 84 /** 85 * mraid_mm_open - open routine for char node interface 86 * @inode : unused 87 * @filep : unused 88 * 89 * Allow ioctl operations by apps only if they have superuser privilege. 90 */ 91 static int 92 mraid_mm_open(struct inode *inode, struct file *filep) 93 { 94 /* 95 * Only allow superuser to access private ioctl interface 96 */ 97 if (!capable(CAP_SYS_ADMIN)) return (-EACCES); 98 99 return 0; 100 } 101 102 /** 103 * mraid_mm_ioctl - module entry-point for ioctls 104 * @inode : inode (ignored) 105 * @filep : file operations pointer (ignored) 106 * @cmd : ioctl command 107 * @arg : user ioctl packet 108 */ 109 static int 110 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 111 { 112 uioc_t *kioc; 113 char signature[EXT_IOCTL_SIGN_SZ] = {0}; 114 int rval; 115 mraid_mmadp_t *adp; 116 uint8_t old_ioctl; 117 int drvrcmd_rval; 118 void __user *argp = (void __user *)arg; 119 120 /* 121 * Make sure only USCSICMD are issued through this interface. 122 * MIMD application would still fire different command. 123 */ 124 125 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { 126 return (-EINVAL); 127 } 128 129 /* 130 * Look for signature to see if this is the new or old ioctl format. 131 */ 132 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { 133 con_log(CL_ANN, (KERN_WARNING 134 "megaraid cmm: copy from usr addr failed\n")); 135 return (-EFAULT); 136 } 137 138 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) 139 old_ioctl = 0; 140 else 141 old_ioctl = 1; 142 143 /* 144 * At present, we don't support the new ioctl packet 145 */ 146 if (!old_ioctl ) 147 return (-EINVAL); 148 149 /* 150 * If it is a driver ioctl (as opposed to fw ioctls), then we can 151 * handle the command locally. rval > 0 means it is not a drvr cmd 152 */ 153 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); 154 155 if (rval < 0) 156 return rval; 157 else if (rval == 0) 158 return drvrcmd_rval; 159 160 rval = 0; 161 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { 162 return rval; 163 } 164 165 /* 166 * Check if adapter can accept ioctl. We may have marked it offline 167 * if any previous kioc had timedout on this controller. 168 */ 169 if (!adp->quiescent) { 170 con_log(CL_ANN, (KERN_WARNING 171 "megaraid cmm: controller cannot accept cmds due to " 172 "earlier errors\n" )); 173 return -EFAULT; 174 } 175 176 /* 177 * The following call will block till a kioc is available 178 * or return NULL if the list head is empty for the pointer 179 * of type mraid_mmapt passed to mraid_mm_alloc_kioc 180 */ 181 kioc = mraid_mm_alloc_kioc(adp); 182 if (!kioc) 183 return -ENXIO; 184 185 /* 186 * User sent the old mimd_t ioctl packet. Convert it to uioc_t. 187 */ 188 if ((rval = mimd_to_kioc(argp, adp, kioc))) { 189 mraid_mm_dealloc_kioc(adp, kioc); 190 return rval; 191 } 192 193 kioc->done = ioctl_done; 194 195 /* 196 * Issue the IOCTL to the low level driver. After the IOCTL completes 197 * release the kioc if and only if it was _not_ timedout. If it was 198 * timedout, that means that resources are still with low level driver. 199 */ 200 if ((rval = lld_ioctl(adp, kioc))) { 201 202 if (!kioc->timedout) 203 mraid_mm_dealloc_kioc(adp, kioc); 204 205 return rval; 206 } 207 208 /* 209 * Convert the kioc back to user space 210 */ 211 rval = kioc_to_mimd(kioc, argp); 212 213 /* 214 * Return the kioc to free pool 215 */ 216 mraid_mm_dealloc_kioc(adp, kioc); 217 218 return rval; 219 } 220 221 static long 222 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, 223 unsigned long arg) 224 { 225 int err; 226 227 /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */ 228 mutex_lock(&mraid_mm_mutex); 229 err = mraid_mm_ioctl(filep, cmd, arg); 230 mutex_unlock(&mraid_mm_mutex); 231 232 return err; 233 } 234 235 /** 236 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet 237 * @umimd : User space mimd_t ioctl packet 238 * @rval : returned success/error status 239 * 240 * The function return value is a pointer to the located @adapter. 241 */ 242 static mraid_mmadp_t * 243 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) 244 { 245 mraid_mmadp_t *adapter; 246 mimd_t mimd; 247 uint32_t adapno; 248 int iterator; 249 250 251 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { 252 *rval = -EFAULT; 253 return NULL; 254 } 255 256 adapno = GETADAP(mimd.ui.fcs.adapno); 257 258 if (adapno >= adapters_count_g) { 259 *rval = -ENODEV; 260 return NULL; 261 } 262 263 adapter = NULL; 264 iterator = 0; 265 266 list_for_each_entry(adapter, &adapters_list_g, list) { 267 if (iterator++ == adapno) break; 268 } 269 270 if (!adapter) { 271 *rval = -ENODEV; 272 return NULL; 273 } 274 275 return adapter; 276 } 277 278 /** 279 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. 280 * @arg : packet sent by the user app 281 * @old_ioctl : mimd if 1; uioc otherwise 282 * @rval : pointer for command's returned value (not function status) 283 */ 284 static int 285 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) 286 { 287 mimd_t __user *umimd; 288 mimd_t kmimd; 289 uint8_t opcode; 290 uint8_t subopcode; 291 292 if (old_ioctl) 293 goto old_packet; 294 else 295 goto new_packet; 296 297 new_packet: 298 return (-ENOTSUPP); 299 300 old_packet: 301 *rval = 0; 302 umimd = arg; 303 304 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) 305 return (-EFAULT); 306 307 opcode = kmimd.ui.fcs.opcode; 308 subopcode = kmimd.ui.fcs.subopcode; 309 310 /* 311 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or 312 * GET_NUMADP, then we can handle. Otherwise we should return 1 to 313 * indicate that we cannot handle this. 314 */ 315 if (opcode != 0x82) 316 return 1; 317 318 switch (subopcode) { 319 320 case MEGAIOC_QDRVRVER: 321 322 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) 323 return (-EFAULT); 324 325 return 0; 326 327 case MEGAIOC_QNADAP: 328 329 *rval = adapters_count_g; 330 331 if (copy_to_user(kmimd.data, &adapters_count_g, 332 sizeof(uint32_t))) 333 return (-EFAULT); 334 335 return 0; 336 337 default: 338 /* cannot handle */ 339 return 1; 340 } 341 342 return 0; 343 } 344 345 346 /** 347 * mimd_to_kioc - Converter from old to new ioctl format 348 * @umimd : user space old MIMD IOCTL 349 * @adp : adapter softstate 350 * @kioc : kernel space new format IOCTL 351 * 352 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The 353 * new packet is in kernel space so that driver can perform operations on it 354 * freely. 355 */ 356 357 static int 358 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) 359 { 360 mbox64_t *mbox64; 361 mbox_t *mbox; 362 mraid_passthru_t *pthru32; 363 uint32_t adapno; 364 uint8_t opcode; 365 uint8_t subopcode; 366 mimd_t mimd; 367 368 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) 369 return (-EFAULT); 370 371 /* 372 * Applications are not allowed to send extd pthru 373 */ 374 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || 375 (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) 376 return (-EINVAL); 377 378 opcode = mimd.ui.fcs.opcode; 379 subopcode = mimd.ui.fcs.subopcode; 380 adapno = GETADAP(mimd.ui.fcs.adapno); 381 382 if (adapno >= adapters_count_g) 383 return (-ENODEV); 384 385 kioc->adapno = adapno; 386 kioc->mb_type = MBOX_LEGACY; 387 kioc->app_type = APPTYPE_MIMD; 388 389 switch (opcode) { 390 391 case 0x82: 392 393 if (subopcode == MEGAIOC_QADAPINFO) { 394 395 kioc->opcode = GET_ADAP_INFO; 396 kioc->data_dir = UIOC_RD; 397 kioc->xferlen = sizeof(mraid_hba_info_t); 398 399 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 400 return (-ENOMEM); 401 } 402 else { 403 con_log(CL_ANN, (KERN_WARNING 404 "megaraid cmm: Invalid subop\n")); 405 return (-EINVAL); 406 } 407 408 break; 409 410 case 0x81: 411 412 kioc->opcode = MBOX_CMD; 413 kioc->xferlen = mimd.ui.fcs.length; 414 kioc->user_data_len = kioc->xferlen; 415 kioc->user_data = mimd.ui.fcs.buffer; 416 417 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 418 return (-ENOMEM); 419 420 if (mimd.outlen) kioc->data_dir = UIOC_RD; 421 if (mimd.inlen) kioc->data_dir |= UIOC_WR; 422 423 break; 424 425 case 0x80: 426 427 kioc->opcode = MBOX_CMD; 428 kioc->xferlen = (mimd.outlen > mimd.inlen) ? 429 mimd.outlen : mimd.inlen; 430 kioc->user_data_len = kioc->xferlen; 431 kioc->user_data = mimd.data; 432 433 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 434 return (-ENOMEM); 435 436 if (mimd.outlen) kioc->data_dir = UIOC_RD; 437 if (mimd.inlen) kioc->data_dir |= UIOC_WR; 438 439 break; 440 441 default: 442 return (-EINVAL); 443 } 444 445 /* 446 * If driver command, nothing else to do 447 */ 448 if (opcode == 0x82) 449 return 0; 450 451 /* 452 * This is a mailbox cmd; copy the mailbox from mimd 453 */ 454 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); 455 mbox = &mbox64->mbox32; 456 memcpy(mbox, mimd.mbox, 14); 457 458 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD 459 460 mbox->xferaddr = (uint32_t)kioc->buf_paddr; 461 462 if (kioc->data_dir & UIOC_WR) { 463 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 464 kioc->xferlen)) { 465 return (-EFAULT); 466 } 467 } 468 469 return 0; 470 } 471 472 /* 473 * This is a regular 32-bit pthru cmd; mbox points to pthru struct. 474 * Just like in above case, the beginning for memblk is treated as 475 * a mailbox. The passthru will begin at next 1K boundary. And the 476 * data will start 1K after that. 477 */ 478 pthru32 = kioc->pthru32; 479 kioc->user_pthru = &umimd->pthru; 480 mbox->xferaddr = (uint32_t)kioc->pthru32_h; 481 482 if (copy_from_user(pthru32, kioc->user_pthru, 483 sizeof(mraid_passthru_t))) { 484 return (-EFAULT); 485 } 486 487 pthru32->dataxferaddr = kioc->buf_paddr; 488 if (kioc->data_dir & UIOC_WR) { 489 if (pthru32->dataxferlen > kioc->xferlen) 490 return -EINVAL; 491 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 492 pthru32->dataxferlen)) { 493 return (-EFAULT); 494 } 495 } 496 497 return 0; 498 } 499 500 /** 501 * mraid_mm_attch_buf - Attach a free dma buffer for required size 502 * @adp : Adapter softstate 503 * @kioc : kioc that the buffer needs to be attached to 504 * @xferlen : required length for buffer 505 * 506 * First we search for a pool with smallest buffer that is >= @xferlen. If 507 * that pool has no free buffer, we will try for the next bigger size. If none 508 * is available, we will try to allocate the smallest buffer that is >= 509 * @xferlen and attach it the pool. 510 */ 511 static int 512 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) 513 { 514 mm_dmapool_t *pool; 515 int right_pool = -1; 516 unsigned long flags; 517 int i; 518 519 kioc->pool_index = -1; 520 kioc->buf_vaddr = NULL; 521 kioc->buf_paddr = 0; 522 kioc->free_buf = 0; 523 524 /* 525 * We need xferlen amount of memory. See if we can get it from our 526 * dma pools. If we don't get exact size, we will try bigger buffer 527 */ 528 529 for (i = 0; i < MAX_DMA_POOLS; i++) { 530 531 pool = &adp->dma_pool_list[i]; 532 533 if (xferlen > pool->buf_size) 534 continue; 535 536 if (right_pool == -1) 537 right_pool = i; 538 539 spin_lock_irqsave(&pool->lock, flags); 540 541 if (!pool->in_use) { 542 543 pool->in_use = 1; 544 kioc->pool_index = i; 545 kioc->buf_vaddr = pool->vaddr; 546 kioc->buf_paddr = pool->paddr; 547 548 spin_unlock_irqrestore(&pool->lock, flags); 549 return 0; 550 } 551 else { 552 spin_unlock_irqrestore(&pool->lock, flags); 553 continue; 554 } 555 } 556 557 /* 558 * If xferlen doesn't match any of our pools, return error 559 */ 560 if (right_pool == -1) 561 return -EINVAL; 562 563 /* 564 * We did not get any buffer from the preallocated pool. Let us try 565 * to allocate one new buffer. NOTE: This is a blocking call. 566 */ 567 pool = &adp->dma_pool_list[right_pool]; 568 569 spin_lock_irqsave(&pool->lock, flags); 570 571 kioc->pool_index = right_pool; 572 kioc->free_buf = 1; 573 kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC, 574 &kioc->buf_paddr); 575 spin_unlock_irqrestore(&pool->lock, flags); 576 577 if (!kioc->buf_vaddr) 578 return -ENOMEM; 579 580 return 0; 581 } 582 583 /** 584 * mraid_mm_alloc_kioc - Returns a uioc_t from free list 585 * @adp : Adapter softstate for this module 586 * 587 * The kioc_semaphore is initialized with number of kioc nodes in the 588 * free kioc pool. If the kioc pool is empty, this function blocks till 589 * a kioc becomes free. 590 */ 591 static uioc_t * 592 mraid_mm_alloc_kioc(mraid_mmadp_t *adp) 593 { 594 uioc_t *kioc; 595 struct list_head* head; 596 unsigned long flags; 597 598 down(&adp->kioc_semaphore); 599 600 spin_lock_irqsave(&adp->kioc_pool_lock, flags); 601 602 head = &adp->kioc_pool; 603 604 if (list_empty(head)) { 605 up(&adp->kioc_semaphore); 606 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 607 608 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); 609 return NULL; 610 } 611 612 kioc = list_entry(head->next, uioc_t, list); 613 list_del_init(&kioc->list); 614 615 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 616 617 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); 618 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); 619 620 kioc->buf_vaddr = NULL; 621 kioc->buf_paddr = 0; 622 kioc->pool_index =-1; 623 kioc->free_buf = 0; 624 kioc->user_data = NULL; 625 kioc->user_data_len = 0; 626 kioc->user_pthru = NULL; 627 kioc->timedout = 0; 628 629 return kioc; 630 } 631 632 /** 633 * mraid_mm_dealloc_kioc - Return kioc to free pool 634 * @adp : Adapter softstate 635 * @kioc : uioc_t node to be returned to free pool 636 */ 637 static void 638 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) 639 { 640 mm_dmapool_t *pool; 641 unsigned long flags; 642 643 if (kioc->pool_index != -1) { 644 pool = &adp->dma_pool_list[kioc->pool_index]; 645 646 /* This routine may be called in non-isr context also */ 647 spin_lock_irqsave(&pool->lock, flags); 648 649 /* 650 * While attaching the dma buffer, if we didn't get the 651 * required buffer from the pool, we would have allocated 652 * it at the run time and set the free_buf flag. We must 653 * free that buffer. Otherwise, just mark that the buffer is 654 * not in use 655 */ 656 if (kioc->free_buf == 1) 657 dma_pool_free(pool->handle, kioc->buf_vaddr, 658 kioc->buf_paddr); 659 else 660 pool->in_use = 0; 661 662 spin_unlock_irqrestore(&pool->lock, flags); 663 } 664 665 /* Return the kioc to the free pool */ 666 spin_lock_irqsave(&adp->kioc_pool_lock, flags); 667 list_add(&kioc->list, &adp->kioc_pool); 668 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 669 670 /* increment the free kioc count */ 671 up(&adp->kioc_semaphore); 672 673 return; 674 } 675 676 /** 677 * lld_ioctl - Routine to issue ioctl to low level drvr 678 * @adp : The adapter handle 679 * @kioc : The ioctl packet with kernel addresses 680 */ 681 static int 682 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) 683 { 684 int rval; 685 struct uioc_timeout timeout = { }; 686 687 kioc->status = -ENODATA; 688 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); 689 690 if (rval) return rval; 691 692 /* 693 * Start the timer 694 */ 695 if (adp->timeout > 0) { 696 timeout.uioc = kioc; 697 timer_setup_on_stack(&timeout.timer, lld_timedout, 0); 698 699 timeout.timer.expires = jiffies + adp->timeout * HZ; 700 701 add_timer(&timeout.timer); 702 } 703 704 /* 705 * Wait till the low level driver completes the ioctl. After this 706 * call, the ioctl either completed successfully or timedout. 707 */ 708 wait_event(wait_q, (kioc->status != -ENODATA)); 709 if (timeout.timer.function) { 710 del_timer_sync(&timeout.timer); 711 destroy_timer_on_stack(&timeout.timer); 712 } 713 714 /* 715 * If the command had timedout, we mark the controller offline 716 * before returning 717 */ 718 if (kioc->timedout) { 719 adp->quiescent = 0; 720 } 721 722 return kioc->status; 723 } 724 725 726 /** 727 * ioctl_done - callback from the low level driver 728 * @kioc : completed ioctl packet 729 */ 730 static void 731 ioctl_done(uioc_t *kioc) 732 { 733 uint32_t adapno; 734 int iterator; 735 mraid_mmadp_t* adapter; 736 737 /* 738 * When the kioc returns from driver, make sure it still doesn't 739 * have ENODATA in status. Otherwise, driver will hang on wait_event 740 * forever 741 */ 742 if (kioc->status == -ENODATA) { 743 con_log(CL_ANN, (KERN_WARNING 744 "megaraid cmm: lld didn't change status!\n")); 745 746 kioc->status = -EINVAL; 747 } 748 749 /* 750 * Check if this kioc was timedout before. If so, nobody is waiting 751 * on this kioc. We don't have to wake up anybody. Instead, we just 752 * have to free the kioc 753 */ 754 if (kioc->timedout) { 755 iterator = 0; 756 adapter = NULL; 757 adapno = kioc->adapno; 758 759 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " 760 "ioctl that was timedout before\n")); 761 762 list_for_each_entry(adapter, &adapters_list_g, list) { 763 if (iterator++ == adapno) break; 764 } 765 766 kioc->timedout = 0; 767 768 if (adapter) { 769 mraid_mm_dealloc_kioc( adapter, kioc ); 770 } 771 } 772 else { 773 wake_up(&wait_q); 774 } 775 } 776 777 778 /** 779 * lld_timedout - callback from the expired timer 780 * @t : timer that timed out 781 */ 782 static void 783 lld_timedout(struct timer_list *t) 784 { 785 struct uioc_timeout *timeout = from_timer(timeout, t, timer); 786 uioc_t *kioc = timeout->uioc; 787 788 kioc->status = -ETIME; 789 kioc->timedout = 1; 790 791 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); 792 793 wake_up(&wait_q); 794 } 795 796 797 /** 798 * kioc_to_mimd - Converter from new back to old format 799 * @kioc : Kernel space IOCTL packet (successfully issued) 800 * @mimd : User space MIMD packet 801 */ 802 static int 803 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) 804 { 805 mimd_t kmimd; 806 uint8_t opcode; 807 uint8_t subopcode; 808 809 mbox64_t *mbox64; 810 mraid_passthru_t __user *upthru32; 811 mraid_passthru_t *kpthru32; 812 mcontroller_t cinfo; 813 mraid_hba_info_t *hinfo; 814 815 816 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) 817 return (-EFAULT); 818 819 opcode = kmimd.ui.fcs.opcode; 820 subopcode = kmimd.ui.fcs.subopcode; 821 822 if (opcode == 0x82) { 823 switch (subopcode) { 824 825 case MEGAIOC_QADAPINFO: 826 827 hinfo = (mraid_hba_info_t *)(unsigned long) 828 kioc->buf_vaddr; 829 830 hinfo_to_cinfo(hinfo, &cinfo); 831 832 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) 833 return (-EFAULT); 834 835 return 0; 836 837 default: 838 return (-EINVAL); 839 } 840 841 return 0; 842 } 843 844 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 845 846 if (kioc->user_pthru) { 847 848 upthru32 = kioc->user_pthru; 849 kpthru32 = kioc->pthru32; 850 851 if (copy_to_user(&upthru32->scsistatus, 852 &kpthru32->scsistatus, 853 sizeof(uint8_t))) { 854 return (-EFAULT); 855 } 856 } 857 858 if (kioc->user_data) { 859 if (copy_to_user(kioc->user_data, kioc->buf_vaddr, 860 kioc->user_data_len)) { 861 return (-EFAULT); 862 } 863 } 864 865 if (copy_to_user(&mimd->mbox[17], 866 &mbox64->mbox32.status, sizeof(uint8_t))) { 867 return (-EFAULT); 868 } 869 870 return 0; 871 } 872 873 874 /** 875 * hinfo_to_cinfo - Convert new format hba info into old format 876 * @hinfo : New format, more comprehensive adapter info 877 * @cinfo : Old format adapter info to support mimd_t apps 878 */ 879 static void 880 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) 881 { 882 if (!hinfo || !cinfo) 883 return; 884 885 cinfo->base = hinfo->baseport; 886 cinfo->irq = hinfo->irq; 887 cinfo->numldrv = hinfo->num_ldrv; 888 cinfo->pcibus = hinfo->pci_bus; 889 cinfo->pcidev = hinfo->pci_slot; 890 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); 891 cinfo->pciid = hinfo->pci_device_id; 892 cinfo->pcivendor = hinfo->pci_vendor_id; 893 cinfo->pcislot = hinfo->pci_slot; 894 cinfo->uid = hinfo->unique_id; 895 } 896 897 898 /** 899 * mraid_mm_register_adp - Registration routine for low level drivers 900 * @lld_adp : Adapter object 901 */ 902 int 903 mraid_mm_register_adp(mraid_mmadp_t *lld_adp) 904 { 905 mraid_mmadp_t *adapter; 906 mbox64_t *mbox_list; 907 uioc_t *kioc; 908 uint32_t rval; 909 int i; 910 911 912 if (lld_adp->drvr_type != DRVRTYPE_MBOX) 913 return (-EINVAL); 914 915 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); 916 917 if (!adapter) 918 return -ENOMEM; 919 920 921 adapter->unique_id = lld_adp->unique_id; 922 adapter->drvr_type = lld_adp->drvr_type; 923 adapter->drvr_data = lld_adp->drvr_data; 924 adapter->pdev = lld_adp->pdev; 925 adapter->issue_uioc = lld_adp->issue_uioc; 926 adapter->timeout = lld_adp->timeout; 927 adapter->max_kioc = lld_adp->max_kioc; 928 adapter->quiescent = 1; 929 930 /* 931 * Allocate single blocks of memory for all required kiocs, 932 * mailboxes and passthru structures. 933 */ 934 adapter->kioc_list = kmalloc_array(lld_adp->max_kioc, 935 sizeof(uioc_t), 936 GFP_KERNEL); 937 adapter->mbox_list = kmalloc_array(lld_adp->max_kioc, 938 sizeof(mbox64_t), 939 GFP_KERNEL); 940 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool", 941 &adapter->pdev->dev, 942 sizeof(mraid_passthru_t), 943 16, 0); 944 945 if (!adapter->kioc_list || !adapter->mbox_list || 946 !adapter->pthru_dma_pool) { 947 948 con_log(CL_ANN, (KERN_WARNING 949 "megaraid cmm: out of memory, %s %d\n", __func__, 950 __LINE__)); 951 952 rval = (-ENOMEM); 953 954 goto memalloc_error; 955 } 956 957 /* 958 * Slice kioc_list and make a kioc_pool with the individiual kiocs 959 */ 960 INIT_LIST_HEAD(&adapter->kioc_pool); 961 spin_lock_init(&adapter->kioc_pool_lock); 962 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); 963 964 mbox_list = (mbox64_t *)adapter->mbox_list; 965 966 for (i = 0; i < lld_adp->max_kioc; i++) { 967 968 kioc = adapter->kioc_list + i; 969 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); 970 kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool, 971 GFP_KERNEL, &kioc->pthru32_h); 972 973 if (!kioc->pthru32) { 974 975 con_log(CL_ANN, (KERN_WARNING 976 "megaraid cmm: out of memory, %s %d\n", 977 __func__, __LINE__)); 978 979 rval = (-ENOMEM); 980 981 goto pthru_dma_pool_error; 982 } 983 984 list_add_tail(&kioc->list, &adapter->kioc_pool); 985 } 986 987 // Setup the dma pools for data buffers 988 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { 989 goto dma_pool_error; 990 } 991 992 list_add_tail(&adapter->list, &adapters_list_g); 993 994 adapters_count_g++; 995 996 return 0; 997 998 dma_pool_error: 999 /* Do nothing */ 1000 1001 pthru_dma_pool_error: 1002 1003 for (i = 0; i < lld_adp->max_kioc; i++) { 1004 kioc = adapter->kioc_list + i; 1005 if (kioc->pthru32) { 1006 dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32, 1007 kioc->pthru32_h); 1008 } 1009 } 1010 1011 memalloc_error: 1012 1013 kfree(adapter->kioc_list); 1014 kfree(adapter->mbox_list); 1015 1016 dma_pool_destroy(adapter->pthru_dma_pool); 1017 1018 kfree(adapter); 1019 1020 return rval; 1021 } 1022 1023 1024 /** 1025 * mraid_mm_adapter_app_handle - return the application handle for this adapter 1026 * @unique_id : adapter unique identifier 1027 * 1028 * For the given driver data, locate the adapter in our global list and 1029 * return the corresponding handle, which is also used by applications to 1030 * uniquely identify an adapter. 1031 * 1032 * Return adapter handle if found in the list. 1033 * Return 0 if adapter could not be located, should never happen though. 1034 */ 1035 uint32_t 1036 mraid_mm_adapter_app_handle(uint32_t unique_id) 1037 { 1038 mraid_mmadp_t *adapter; 1039 mraid_mmadp_t *tmp; 1040 int index = 0; 1041 1042 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { 1043 1044 if (adapter->unique_id == unique_id) { 1045 1046 return MKADAP(index); 1047 } 1048 1049 index++; 1050 } 1051 1052 return 0; 1053 } 1054 1055 1056 /** 1057 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter 1058 * @adp : Adapter softstate 1059 * 1060 * We maintain a pool of dma buffers per each adapter. Each pool has one 1061 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. 1062 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We 1063 * dont' want to waste too much memory by allocating more buffers per each 1064 * pool. 1065 */ 1066 static int 1067 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) 1068 { 1069 mm_dmapool_t *pool; 1070 int bufsize; 1071 int i; 1072 1073 /* 1074 * Create MAX_DMA_POOLS number of pools 1075 */ 1076 bufsize = MRAID_MM_INIT_BUFF_SIZE; 1077 1078 for (i = 0; i < MAX_DMA_POOLS; i++){ 1079 1080 pool = &adp->dma_pool_list[i]; 1081 1082 pool->buf_size = bufsize; 1083 spin_lock_init(&pool->lock); 1084 1085 pool->handle = dma_pool_create("megaraid mm data buffer", 1086 &adp->pdev->dev, bufsize, 1087 16, 0); 1088 1089 if (!pool->handle) { 1090 goto dma_pool_setup_error; 1091 } 1092 1093 pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL, 1094 &pool->paddr); 1095 1096 if (!pool->vaddr) 1097 goto dma_pool_setup_error; 1098 1099 bufsize = bufsize * 2; 1100 } 1101 1102 return 0; 1103 1104 dma_pool_setup_error: 1105 1106 mraid_mm_teardown_dma_pools(adp); 1107 return (-ENOMEM); 1108 } 1109 1110 1111 /** 1112 * mraid_mm_unregister_adp - Unregister routine for low level drivers 1113 * @unique_id : UID of the adpater 1114 * 1115 * Assumes no outstanding ioctls to llds. 1116 */ 1117 int 1118 mraid_mm_unregister_adp(uint32_t unique_id) 1119 { 1120 mraid_mmadp_t *adapter; 1121 mraid_mmadp_t *tmp; 1122 1123 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { 1124 1125 1126 if (adapter->unique_id == unique_id) { 1127 1128 adapters_count_g--; 1129 1130 list_del_init(&adapter->list); 1131 1132 mraid_mm_free_adp_resources(adapter); 1133 1134 kfree(adapter); 1135 1136 con_log(CL_ANN, ( 1137 "megaraid cmm: Unregistered one adapter:%#x\n", 1138 unique_id)); 1139 1140 return 0; 1141 } 1142 } 1143 1144 return (-ENODEV); 1145 } 1146 1147 /** 1148 * mraid_mm_free_adp_resources - Free adapter softstate 1149 * @adp : Adapter softstate 1150 */ 1151 static void 1152 mraid_mm_free_adp_resources(mraid_mmadp_t *adp) 1153 { 1154 uioc_t *kioc; 1155 int i; 1156 1157 mraid_mm_teardown_dma_pools(adp); 1158 1159 for (i = 0; i < adp->max_kioc; i++) { 1160 1161 kioc = adp->kioc_list + i; 1162 1163 dma_pool_free(adp->pthru_dma_pool, kioc->pthru32, 1164 kioc->pthru32_h); 1165 } 1166 1167 kfree(adp->kioc_list); 1168 kfree(adp->mbox_list); 1169 1170 dma_pool_destroy(adp->pthru_dma_pool); 1171 1172 1173 return; 1174 } 1175 1176 1177 /** 1178 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers 1179 * @adp : Adapter softstate 1180 */ 1181 static void 1182 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) 1183 { 1184 int i; 1185 mm_dmapool_t *pool; 1186 1187 for (i = 0; i < MAX_DMA_POOLS; i++) { 1188 1189 pool = &adp->dma_pool_list[i]; 1190 1191 if (pool->handle) { 1192 1193 if (pool->vaddr) 1194 dma_pool_free(pool->handle, pool->vaddr, 1195 pool->paddr); 1196 1197 dma_pool_destroy(pool->handle); 1198 pool->handle = NULL; 1199 } 1200 } 1201 1202 return; 1203 } 1204 1205 /** 1206 * mraid_mm_init - Module entry point 1207 */ 1208 static int __init 1209 mraid_mm_init(void) 1210 { 1211 int err; 1212 1213 // Announce the driver version 1214 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", 1215 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); 1216 1217 err = misc_register(&megaraid_mm_dev); 1218 if (err < 0) { 1219 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); 1220 return err; 1221 } 1222 1223 init_waitqueue_head(&wait_q); 1224 1225 INIT_LIST_HEAD(&adapters_list_g); 1226 1227 return 0; 1228 } 1229 1230 1231 #ifdef CONFIG_COMPAT 1232 /** 1233 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine 1234 * @filep : file operations pointer (ignored) 1235 * @cmd : ioctl command 1236 * @arg : user ioctl packet 1237 */ 1238 static long 1239 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, 1240 unsigned long arg) 1241 { 1242 int err; 1243 1244 err = mraid_mm_ioctl(filep, cmd, arg); 1245 1246 return err; 1247 } 1248 #endif 1249 1250 /** 1251 * mraid_mm_exit - Module exit point 1252 */ 1253 static void __exit 1254 mraid_mm_exit(void) 1255 { 1256 con_log(CL_DLEVEL1 , ("exiting common mod\n")); 1257 1258 misc_deregister(&megaraid_mm_dev); 1259 } 1260 1261 module_init(mraid_mm_init); 1262 module_exit(mraid_mm_exit); 1263 1264 /* vi: set ts=8 sw=8 tw=78: */ 1265