1 /* 2 * 3 * Linux MegaRAID device driver 4 * 5 * Copyright (c) 2003-2004 LSI Logic Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * FILE : megaraid_mbox.c 13 * Version : v2.20.5.1 (Nov 16 2006) 14 * 15 * Authors: 16 * Atul Mukker <Atul.Mukker@lsi.com> 17 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com> 18 * Manoj Jose <Manoj.Jose@lsi.com> 19 * Seokmann Ju 20 * 21 * List of supported controllers 22 * 23 * OEM Product Name VID DID SSVID SSID 24 * --- ------------ --- --- ---- ---- 25 * Dell PERC3/QC 101E 1960 1028 0471 26 * Dell PERC3/DC 101E 1960 1028 0493 27 * Dell PERC3/SC 101E 1960 1028 0475 28 * Dell PERC3/Di 1028 1960 1028 0123 29 * Dell PERC4/SC 1000 1960 1028 0520 30 * Dell PERC4/DC 1000 1960 1028 0518 31 * Dell PERC4/QC 1000 0407 1028 0531 32 * Dell PERC4/Di 1028 000F 1028 014A 33 * Dell PERC 4e/Si 1028 0013 1028 016c 34 * Dell PERC 4e/Di 1028 0013 1028 016d 35 * Dell PERC 4e/Di 1028 0013 1028 016e 36 * Dell PERC 4e/Di 1028 0013 1028 016f 37 * Dell PERC 4e/Di 1028 0013 1028 0170 38 * Dell PERC 4e/DC 1000 0408 1028 0002 39 * Dell PERC 4e/SC 1000 0408 1028 0001 40 * 41 * 42 * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520 43 * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520 44 * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518 45 * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530 46 * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532 47 * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531 48 * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001 49 * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002 50 * LSI MegaRAID SATA 150-4 1000 1960 1000 4523 51 * LSI MegaRAID SATA 150-6 1000 1960 1000 0523 52 * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004 53 * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008 54 * 55 * INTEL RAID Controller SRCU42X 1000 0407 8086 0532 56 * INTEL RAID Controller SRCS16 1000 1960 8086 0523 57 * INTEL RAID Controller SRCU42E 1000 0408 8086 0002 58 * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530 59 * INTEL RAID Controller SRCS28X 1000 0409 8086 3008 60 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431 61 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499 62 * INTEL RAID Controller SRCU51L 1000 1960 8086 0520 63 * 64 * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065 65 * 66 * ACER MegaRAID ROMB-2E 1000 0408 1025 004D 67 * 68 * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287 69 * 70 * For history of changes, see Documentation/ChangeLog.megaraid 71 */ 72 73 #include "megaraid_mbox.h" 74 75 static int megaraid_init(void); 76 static void megaraid_exit(void); 77 78 static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *); 79 static void megaraid_detach_one(struct pci_dev *); 80 static void megaraid_mbox_shutdown(struct pci_dev *); 81 82 static int megaraid_io_attach(adapter_t *); 83 static void megaraid_io_detach(adapter_t *); 84 85 static int megaraid_init_mbox(adapter_t *); 86 static void megaraid_fini_mbox(adapter_t *); 87 88 static int megaraid_alloc_cmd_packets(adapter_t *); 89 static void megaraid_free_cmd_packets(adapter_t *); 90 91 static int megaraid_mbox_setup_dma_pools(adapter_t *); 92 static void megaraid_mbox_teardown_dma_pools(adapter_t *); 93 94 static int megaraid_sysfs_alloc_resources(adapter_t *); 95 static void megaraid_sysfs_free_resources(adapter_t *); 96 97 static int megaraid_abort_handler(struct scsi_cmnd *); 98 static int megaraid_reset_handler(struct scsi_cmnd *); 99 100 static int mbox_post_sync_cmd(adapter_t *, uint8_t []); 101 static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []); 102 static int megaraid_busywait_mbox(mraid_device_t *); 103 static int megaraid_mbox_product_info(adapter_t *); 104 static int megaraid_mbox_extended_cdb(adapter_t *); 105 static int megaraid_mbox_support_ha(adapter_t *, uint16_t *); 106 static int megaraid_mbox_support_random_del(adapter_t *); 107 static int megaraid_mbox_get_max_sg(adapter_t *); 108 static void megaraid_mbox_enum_raid_scsi(adapter_t *); 109 static void megaraid_mbox_flush_cache(adapter_t *); 110 static int megaraid_mbox_fire_sync_cmd(adapter_t *); 111 112 static void megaraid_mbox_display_scb(adapter_t *, scb_t *); 113 static void megaraid_mbox_setup_device_map(adapter_t *); 114 115 static int megaraid_queue_command(struct scsi_cmnd *, 116 void (*)(struct scsi_cmnd *)); 117 static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); 118 static void megaraid_mbox_runpendq(adapter_t *, scb_t *); 119 static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, 120 struct scsi_cmnd *); 121 static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *, 122 struct scsi_cmnd *); 123 124 static irqreturn_t megaraid_isr(int, void *); 125 126 static void megaraid_mbox_dpc(unsigned long); 127 128 static ssize_t megaraid_sysfs_show_app_hndl(struct class_device *, char *); 129 static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *); 130 131 static int megaraid_cmm_register(adapter_t *); 132 static int megaraid_cmm_unregister(adapter_t *); 133 static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t); 134 static int megaraid_mbox_mm_command(adapter_t *, uioc_t *); 135 static void megaraid_mbox_mm_done(adapter_t *, scb_t *); 136 static int gather_hbainfo(adapter_t *, mraid_hba_info_t *); 137 static int wait_till_fw_empty(adapter_t *); 138 139 140 141 MODULE_AUTHOR("megaraidlinux@lsi.com"); 142 MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); 143 MODULE_LICENSE("GPL"); 144 MODULE_VERSION(MEGARAID_VERSION); 145 146 /* 147 * ### modules parameters for driver ### 148 */ 149 150 /* 151 * Set to enable driver to expose unconfigured disk to kernel 152 */ 153 static int megaraid_expose_unconf_disks = 0; 154 module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0); 155 MODULE_PARM_DESC(unconf_disks, 156 "Set to expose unconfigured disks to kernel (default=0)"); 157 158 /* 159 * driver wait time if the adapter's mailbox is busy 160 */ 161 static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; 162 module_param_named(busy_wait, max_mbox_busy_wait, int, 0); 163 MODULE_PARM_DESC(busy_wait, 164 "Max wait for mailbox in microseconds if busy (default=10)"); 165 166 /* 167 * number of sectors per IO command 168 */ 169 static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; 170 module_param_named(max_sectors, megaraid_max_sectors, int, 0); 171 MODULE_PARM_DESC(max_sectors, 172 "Maximum number of sectors per IO command (default=128)"); 173 174 /* 175 * number of commands per logical unit 176 */ 177 static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; 178 module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0); 179 MODULE_PARM_DESC(cmd_per_lun, 180 "Maximum number of commands per logical unit (default=64)"); 181 182 183 /* 184 * Fast driver load option, skip scanning for physical devices during load. 185 * This would result in non-disk devices being skipped during driver load 186 * time. These can be later added though, using /proc/scsi/scsi 187 */ 188 static unsigned int megaraid_fast_load = 0; 189 module_param_named(fast_load, megaraid_fast_load, int, 0); 190 MODULE_PARM_DESC(fast_load, 191 "Faster loading of the driver, skips physical devices! (default=0)"); 192 193 194 /* 195 * mraid_debug level - threshold for amount of information to be displayed by 196 * the driver. This level can be changed through modules parameters, ioctl or 197 * sysfs/proc interface. By default, print the announcement messages only. 198 */ 199 int mraid_debug_level = CL_ANN; 200 module_param_named(debug_level, mraid_debug_level, int, 0); 201 MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)"); 202 203 /* 204 * ### global data ### 205 */ 206 static uint8_t megaraid_mbox_version[8] = 207 { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 }; 208 209 210 /* 211 * PCI table for all supported controllers. 212 */ 213 static struct pci_device_id pci_id_table_g[] = { 214 { 215 PCI_VENDOR_ID_DELL, 216 PCI_DEVICE_ID_PERC4_DI_DISCOVERY, 217 PCI_VENDOR_ID_DELL, 218 PCI_SUBSYS_ID_PERC4_DI_DISCOVERY, 219 }, 220 { 221 PCI_VENDOR_ID_LSI_LOGIC, 222 PCI_DEVICE_ID_PERC4_SC, 223 PCI_VENDOR_ID_DELL, 224 PCI_SUBSYS_ID_PERC4_SC, 225 }, 226 { 227 PCI_VENDOR_ID_LSI_LOGIC, 228 PCI_DEVICE_ID_PERC4_DC, 229 PCI_VENDOR_ID_DELL, 230 PCI_SUBSYS_ID_PERC4_DC, 231 }, 232 { 233 PCI_VENDOR_ID_LSI_LOGIC, 234 PCI_DEVICE_ID_VERDE, 235 PCI_ANY_ID, 236 PCI_ANY_ID, 237 }, 238 { 239 PCI_VENDOR_ID_DELL, 240 PCI_DEVICE_ID_PERC4_DI_EVERGLADES, 241 PCI_VENDOR_ID_DELL, 242 PCI_SUBSYS_ID_PERC4_DI_EVERGLADES, 243 }, 244 { 245 PCI_VENDOR_ID_DELL, 246 PCI_DEVICE_ID_PERC4E_SI_BIGBEND, 247 PCI_VENDOR_ID_DELL, 248 PCI_SUBSYS_ID_PERC4E_SI_BIGBEND, 249 }, 250 { 251 PCI_VENDOR_ID_DELL, 252 PCI_DEVICE_ID_PERC4E_DI_KOBUK, 253 PCI_VENDOR_ID_DELL, 254 PCI_SUBSYS_ID_PERC4E_DI_KOBUK, 255 }, 256 { 257 PCI_VENDOR_ID_DELL, 258 PCI_DEVICE_ID_PERC4E_DI_CORVETTE, 259 PCI_VENDOR_ID_DELL, 260 PCI_SUBSYS_ID_PERC4E_DI_CORVETTE, 261 }, 262 { 263 PCI_VENDOR_ID_DELL, 264 PCI_DEVICE_ID_PERC4E_DI_EXPEDITION, 265 PCI_VENDOR_ID_DELL, 266 PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION, 267 }, 268 { 269 PCI_VENDOR_ID_DELL, 270 PCI_DEVICE_ID_PERC4E_DI_GUADALUPE, 271 PCI_VENDOR_ID_DELL, 272 PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE, 273 }, 274 { 275 PCI_VENDOR_ID_LSI_LOGIC, 276 PCI_DEVICE_ID_DOBSON, 277 PCI_ANY_ID, 278 PCI_ANY_ID, 279 }, 280 { 281 PCI_VENDOR_ID_AMI, 282 PCI_DEVICE_ID_AMI_MEGARAID3, 283 PCI_ANY_ID, 284 PCI_ANY_ID, 285 }, 286 { 287 PCI_VENDOR_ID_LSI_LOGIC, 288 PCI_DEVICE_ID_AMI_MEGARAID3, 289 PCI_ANY_ID, 290 PCI_ANY_ID, 291 }, 292 { 293 PCI_VENDOR_ID_LSI_LOGIC, 294 PCI_DEVICE_ID_LINDSAY, 295 PCI_ANY_ID, 296 PCI_ANY_ID, 297 }, 298 {0} /* Terminating entry */ 299 }; 300 MODULE_DEVICE_TABLE(pci, pci_id_table_g); 301 302 303 static struct pci_driver megaraid_pci_driver_g = { 304 .name = "megaraid", 305 .id_table = pci_id_table_g, 306 .probe = megaraid_probe_one, 307 .remove = __devexit_p(megaraid_detach_one), 308 .shutdown = megaraid_mbox_shutdown, 309 }; 310 311 312 313 // definitions for the device attributes for exporting logical drive number 314 // for a scsi address (Host, Channel, Id, Lun) 315 316 CLASS_DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl, 317 NULL); 318 319 // Host template initializer for megaraid mbox sysfs device attributes 320 static struct class_device_attribute *megaraid_shost_attrs[] = { 321 &class_device_attr_megaraid_mbox_app_hndl, 322 NULL, 323 }; 324 325 326 DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL); 327 328 // Host template initializer for megaraid mbox sysfs device attributes 329 static struct device_attribute *megaraid_sdev_attrs[] = { 330 &dev_attr_megaraid_mbox_ld, 331 NULL, 332 }; 333 334 /** 335 * megaraid_change_queue_depth - Change the device's queue depth 336 * @sdev: scsi device struct 337 * @qdepth: depth to set 338 * 339 * Return value: 340 * actual depth set 341 */ 342 static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) 343 { 344 if (qdepth > MBOX_MAX_SCSI_CMDS) 345 qdepth = MBOX_MAX_SCSI_CMDS; 346 scsi_adjust_queue_depth(sdev, 0, qdepth); 347 return sdev->queue_depth; 348 } 349 350 /* 351 * Scsi host template for megaraid unified driver 352 */ 353 static struct scsi_host_template megaraid_template_g = { 354 .module = THIS_MODULE, 355 .name = "LSI Logic MegaRAID driver", 356 .proc_name = "megaraid", 357 .queuecommand = megaraid_queue_command, 358 .eh_abort_handler = megaraid_abort_handler, 359 .eh_device_reset_handler = megaraid_reset_handler, 360 .eh_bus_reset_handler = megaraid_reset_handler, 361 .eh_host_reset_handler = megaraid_reset_handler, 362 .change_queue_depth = megaraid_change_queue_depth, 363 .use_clustering = ENABLE_CLUSTERING, 364 .sdev_attrs = megaraid_sdev_attrs, 365 .shost_attrs = megaraid_shost_attrs, 366 }; 367 368 369 /** 370 * megaraid_init - module load hook 371 * 372 * We register ourselves as hotplug enabled module and let PCI subsystem 373 * discover our adapters. 374 */ 375 static int __init 376 megaraid_init(void) 377 { 378 int rval; 379 380 // Announce the driver version 381 con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION, 382 MEGARAID_EXT_VERSION)); 383 384 // check validity of module parameters 385 if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) { 386 387 con_log(CL_ANN, (KERN_WARNING 388 "megaraid mailbox: max commands per lun reset to %d\n", 389 MBOX_MAX_SCSI_CMDS)); 390 391 megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS; 392 } 393 394 395 // register as a PCI hot-plug driver module 396 rval = pci_register_driver(&megaraid_pci_driver_g); 397 if (rval < 0) { 398 con_log(CL_ANN, (KERN_WARNING 399 "megaraid: could not register hotplug support.\n")); 400 } 401 402 return rval; 403 } 404 405 406 /** 407 * megaraid_exit - driver unload entry point 408 * 409 * We simply unwrap the megaraid_init routine here. 410 */ 411 static void __exit 412 megaraid_exit(void) 413 { 414 con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n")); 415 416 // unregister as PCI hotplug driver 417 pci_unregister_driver(&megaraid_pci_driver_g); 418 419 return; 420 } 421 422 423 /** 424 * megaraid_probe_one - PCI hotplug entry point 425 * @pdev : handle to this controller's PCI configuration space 426 * @id : pci device id of the class of controllers 427 * 428 * This routine should be called whenever a new adapter is detected by the 429 * PCI hotplug susbsytem. 430 */ 431 static int __devinit 432 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 433 { 434 adapter_t *adapter; 435 436 437 // detected a new controller 438 con_log(CL_ANN, (KERN_INFO 439 "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", 440 pdev->vendor, pdev->device, pdev->subsystem_vendor, 441 pdev->subsystem_device)); 442 443 con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number, 444 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn))); 445 446 if (pci_enable_device(pdev)) { 447 con_log(CL_ANN, (KERN_WARNING 448 "megaraid: pci_enable_device failed\n")); 449 450 return -ENODEV; 451 } 452 453 // Enable bus-mastering on this controller 454 pci_set_master(pdev); 455 456 // Allocate the per driver initialization structure 457 adapter = kmalloc(sizeof(adapter_t), GFP_KERNEL); 458 459 if (adapter == NULL) { 460 con_log(CL_ANN, (KERN_WARNING 461 "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__)); 462 463 goto out_probe_one; 464 } 465 memset(adapter, 0, sizeof(adapter_t)); 466 467 468 // set up PCI related soft state and other pre-known parameters 469 adapter->unique_id = pdev->bus->number << 8 | pdev->devfn; 470 adapter->irq = pdev->irq; 471 adapter->pdev = pdev; 472 473 atomic_set(&adapter->being_detached, 0); 474 475 // Setup the default DMA mask. This would be changed later on 476 // depending on hardware capabilities 477 if (pci_set_dma_mask(adapter->pdev, DMA_32BIT_MASK) != 0) { 478 479 con_log(CL_ANN, (KERN_WARNING 480 "megaraid: pci_set_dma_mask failed:%d\n", __LINE__)); 481 482 goto out_free_adapter; 483 } 484 485 486 // Initialize the synchronization lock for kernel and LLD 487 spin_lock_init(&adapter->lock); 488 489 // Initialize the command queues: the list of free SCBs and the list 490 // of pending SCBs. 491 INIT_LIST_HEAD(&adapter->kscb_pool); 492 spin_lock_init(SCSI_FREE_LIST_LOCK(adapter)); 493 494 INIT_LIST_HEAD(&adapter->pend_list); 495 spin_lock_init(PENDING_LIST_LOCK(adapter)); 496 497 INIT_LIST_HEAD(&adapter->completed_list); 498 spin_lock_init(COMPLETED_LIST_LOCK(adapter)); 499 500 501 // Start the mailbox based controller 502 if (megaraid_init_mbox(adapter) != 0) { 503 con_log(CL_ANN, (KERN_WARNING 504 "megaraid: maibox adapter did not initialize\n")); 505 506 goto out_free_adapter; 507 } 508 509 // Register with LSI Common Management Module 510 if (megaraid_cmm_register(adapter) != 0) { 511 512 con_log(CL_ANN, (KERN_WARNING 513 "megaraid: could not register with management module\n")); 514 515 goto out_fini_mbox; 516 } 517 518 // setup adapter handle in PCI soft state 519 pci_set_drvdata(pdev, adapter); 520 521 // attach with scsi mid-layer 522 if (megaraid_io_attach(adapter) != 0) { 523 524 con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n")); 525 526 goto out_cmm_unreg; 527 } 528 529 return 0; 530 531 out_cmm_unreg: 532 pci_set_drvdata(pdev, NULL); 533 megaraid_cmm_unregister(adapter); 534 out_fini_mbox: 535 megaraid_fini_mbox(adapter); 536 out_free_adapter: 537 kfree(adapter); 538 out_probe_one: 539 pci_disable_device(pdev); 540 541 return -ENODEV; 542 } 543 544 545 /** 546 * megaraid_detach_one - release framework resources and call LLD release routine 547 * @pdev : handle for our PCI cofiguration space 548 * 549 * This routine is called during driver unload. We free all the allocated 550 * resources and call the corresponding LLD so that it can also release all 551 * its resources. 552 * 553 * This routine is also called from the PCI hotplug system. 554 */ 555 static void 556 megaraid_detach_one(struct pci_dev *pdev) 557 { 558 adapter_t *adapter; 559 struct Scsi_Host *host; 560 561 562 // Start a rollback on this adapter 563 adapter = pci_get_drvdata(pdev); 564 565 if (!adapter) { 566 con_log(CL_ANN, (KERN_CRIT 567 "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", 568 pdev->vendor, pdev->device, pdev->subsystem_vendor, 569 pdev->subsystem_device)); 570 571 return; 572 } 573 else { 574 con_log(CL_ANN, (KERN_NOTICE 575 "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", 576 pdev->vendor, pdev->device, pdev->subsystem_vendor, 577 pdev->subsystem_device)); 578 } 579 580 581 host = adapter->host; 582 583 // do not allow any more requests from the management module for this 584 // adapter. 585 // FIXME: How do we account for the request which might still be 586 // pending with us? 587 atomic_set(&adapter->being_detached, 1); 588 589 // detach from the IO sub-system 590 megaraid_io_detach(adapter); 591 592 // reset the device state in the PCI structure. We check this 593 // condition when we enter here. If the device state is NULL, 594 // that would mean the device has already been removed 595 pci_set_drvdata(pdev, NULL); 596 597 // Unregister from common management module 598 // 599 // FIXME: this must return success or failure for conditions if there 600 // is a command pending with LLD or not. 601 megaraid_cmm_unregister(adapter); 602 603 // finalize the mailbox based controller and release all resources 604 megaraid_fini_mbox(adapter); 605 606 kfree(adapter); 607 608 scsi_host_put(host); 609 610 pci_disable_device(pdev); 611 612 return; 613 } 614 615 616 /** 617 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA 618 * @pdev : generic driver model device 619 * 620 * Shutdown notification, perform flush cache. 621 */ 622 static void 623 megaraid_mbox_shutdown(struct pci_dev *pdev) 624 { 625 adapter_t *adapter = pci_get_drvdata(pdev); 626 static int counter; 627 628 if (!adapter) { 629 con_log(CL_ANN, (KERN_WARNING 630 "megaraid: null device in shutdown\n")); 631 return; 632 } 633 634 // flush caches now 635 con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...", 636 counter++)); 637 638 megaraid_mbox_flush_cache(adapter); 639 640 con_log(CL_ANN, ("done\n")); 641 } 642 643 644 /** 645 * megaraid_io_attach - attach a device with the IO subsystem 646 * @adapter : controller's soft state 647 * 648 * Attach this device with the IO subsystem. 649 */ 650 static int 651 megaraid_io_attach(adapter_t *adapter) 652 { 653 struct Scsi_Host *host; 654 655 // Initialize SCSI Host structure 656 host = scsi_host_alloc(&megaraid_template_g, 8); 657 if (!host) { 658 con_log(CL_ANN, (KERN_WARNING 659 "megaraid mbox: scsi_register failed\n")); 660 661 return -1; 662 } 663 664 SCSIHOST2ADAP(host) = (caddr_t)adapter; 665 adapter->host = host; 666 667 host->irq = adapter->irq; 668 host->unique_id = adapter->unique_id; 669 host->can_queue = adapter->max_cmds; 670 host->this_id = adapter->init_id; 671 host->sg_tablesize = adapter->sglen; 672 host->max_sectors = adapter->max_sectors; 673 host->cmd_per_lun = adapter->cmd_per_lun; 674 host->max_channel = adapter->max_channel; 675 host->max_id = adapter->max_target; 676 host->max_lun = adapter->max_lun; 677 678 679 // notify mid-layer about the new controller 680 if (scsi_add_host(host, &adapter->pdev->dev)) { 681 682 con_log(CL_ANN, (KERN_WARNING 683 "megaraid mbox: scsi_add_host failed\n")); 684 685 scsi_host_put(host); 686 687 return -1; 688 } 689 690 scsi_scan_host(host); 691 692 return 0; 693 } 694 695 696 /** 697 * megaraid_io_detach - detach a device from the IO subsystem 698 * @adapter : controller's soft state 699 * 700 * Detach this device from the IO subsystem. 701 */ 702 static void 703 megaraid_io_detach(adapter_t *adapter) 704 { 705 struct Scsi_Host *host; 706 707 con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n")); 708 709 host = adapter->host; 710 711 scsi_remove_host(host); 712 713 return; 714 } 715 716 717 /* 718 * START: Mailbox Low Level Driver 719 * 720 * This is section specific to the single mailbox based controllers 721 */ 722 723 /** 724 * megaraid_init_mbox - initialize controller 725 * @adapter : our soft state 726 * 727 * - Allocate 16-byte aligned mailbox memory for firmware handshake 728 * - Allocate controller's memory resources 729 * - Find out all initialization data 730 * - Allocate memory required for all the commands 731 * - Use internal library of FW routines, build up complete soft state 732 */ 733 static int __devinit 734 megaraid_init_mbox(adapter_t *adapter) 735 { 736 struct pci_dev *pdev; 737 mraid_device_t *raid_dev; 738 int i; 739 uint32_t magic64; 740 741 742 adapter->ito = MBOX_TIMEOUT; 743 pdev = adapter->pdev; 744 745 /* 746 * Allocate and initialize the init data structure for mailbox 747 * controllers 748 */ 749 raid_dev = kmalloc(sizeof(mraid_device_t), GFP_KERNEL); 750 if (raid_dev == NULL) return -1; 751 752 memset(raid_dev, 0, sizeof(mraid_device_t)); 753 754 /* 755 * Attach the adapter soft state to raid device soft state 756 */ 757 adapter->raid_device = (caddr_t)raid_dev; 758 raid_dev->fast_load = megaraid_fast_load; 759 760 761 // our baseport 762 raid_dev->baseport = pci_resource_start(pdev, 0); 763 764 if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) { 765 766 con_log(CL_ANN, (KERN_WARNING 767 "megaraid: mem region busy\n")); 768 769 goto out_free_raid_dev; 770 } 771 772 raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128); 773 774 if (!raid_dev->baseaddr) { 775 776 con_log(CL_ANN, (KERN_WARNING 777 "megaraid: could not map hba memory\n") ); 778 779 goto out_release_regions; 780 } 781 782 /* initialize the mutual exclusion lock for the mailbox */ 783 spin_lock_init(&raid_dev->mailbox_lock); 784 785 /* allocate memory required for commands */ 786 if (megaraid_alloc_cmd_packets(adapter) != 0) 787 goto out_iounmap; 788 789 /* 790 * Issue SYNC cmd to flush the pending cmds in the adapter 791 * and initialize its internal state 792 */ 793 794 if (megaraid_mbox_fire_sync_cmd(adapter)) 795 con_log(CL_ANN, ("megaraid: sync cmd failed\n")); 796 797 /* 798 * Setup the rest of the soft state using the library of 799 * FW routines 800 */ 801 802 /* request IRQ and register the interrupt service routine */ 803 if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid", 804 adapter)) { 805 806 con_log(CL_ANN, (KERN_WARNING 807 "megaraid: Couldn't register IRQ %d!\n", adapter->irq)); 808 goto out_alloc_cmds; 809 810 } 811 812 // Product info 813 if (megaraid_mbox_product_info(adapter) != 0) 814 goto out_free_irq; 815 816 // Do we support extended CDBs 817 adapter->max_cdb_sz = 10; 818 if (megaraid_mbox_extended_cdb(adapter) == 0) { 819 adapter->max_cdb_sz = 16; 820 } 821 822 /* 823 * Do we support cluster environment, if we do, what is the initiator 824 * id. 825 * NOTE: In a non-cluster aware firmware environment, the LLD should 826 * return 7 as initiator id. 827 */ 828 adapter->ha = 0; 829 adapter->init_id = -1; 830 if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) { 831 adapter->ha = 1; 832 } 833 834 /* 835 * Prepare the device ids array to have the mapping between the kernel 836 * device address and megaraid device address. 837 * We export the physical devices on their actual addresses. The 838 * logical drives are exported on a virtual SCSI channel 839 */ 840 megaraid_mbox_setup_device_map(adapter); 841 842 // If the firmware supports random deletion, update the device id map 843 if (megaraid_mbox_support_random_del(adapter)) { 844 845 // Change the logical drives numbers in device_ids array one 846 // slot in device_ids is reserved for target id, that's why 847 // "<=" below 848 for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) { 849 adapter->device_ids[adapter->max_channel][i] += 0x80; 850 } 851 adapter->device_ids[adapter->max_channel][adapter->init_id] = 852 0xFF; 853 854 raid_dev->random_del_supported = 1; 855 } 856 857 /* 858 * find out the maximum number of scatter-gather elements supported by 859 * this firmware 860 */ 861 adapter->sglen = megaraid_mbox_get_max_sg(adapter); 862 863 // enumerate RAID and SCSI channels so that all devices on SCSI 864 // channels can later be exported, including disk devices 865 megaraid_mbox_enum_raid_scsi(adapter); 866 867 /* 868 * Other parameters required by upper layer 869 * 870 * maximum number of sectors per IO command 871 */ 872 adapter->max_sectors = megaraid_max_sectors; 873 874 /* 875 * number of queued commands per LUN. 876 */ 877 adapter->cmd_per_lun = megaraid_cmd_per_lun; 878 879 /* 880 * Allocate resources required to issue FW calls, when sysfs is 881 * accessed 882 */ 883 if (megaraid_sysfs_alloc_resources(adapter) != 0) 884 goto out_free_irq; 885 886 // Set the DMA mask to 64-bit. All supported controllers as capable of 887 // DMA in this range 888 pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); 889 890 if (((magic64 == HBA_SIGNATURE_64_BIT) && 891 ((adapter->pdev->subsystem_device != 892 PCI_SUBSYS_ID_MEGARAID_SATA_150_6) && 893 (adapter->pdev->subsystem_device != 894 PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || 895 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && 896 adapter->pdev->device == PCI_DEVICE_ID_VERDE) || 897 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && 898 adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || 899 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && 900 adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || 901 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && 902 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || 903 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && 904 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { 905 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { 906 con_log(CL_ANN, (KERN_WARNING 907 "megaraid: DMA mask for 64-bit failed\n")); 908 909 if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { 910 con_log(CL_ANN, (KERN_WARNING 911 "megaraid: 32-bit DMA mask failed\n")); 912 goto out_free_sysfs_res; 913 } 914 } 915 } 916 917 // setup tasklet for DPC 918 tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc, 919 (unsigned long)adapter); 920 921 con_log(CL_DLEVEL1, (KERN_INFO 922 "megaraid mbox hba successfully initialized\n")); 923 924 return 0; 925 926 out_free_sysfs_res: 927 megaraid_sysfs_free_resources(adapter); 928 out_free_irq: 929 free_irq(adapter->irq, adapter); 930 out_alloc_cmds: 931 megaraid_free_cmd_packets(adapter); 932 out_iounmap: 933 iounmap(raid_dev->baseaddr); 934 out_release_regions: 935 pci_release_regions(pdev); 936 out_free_raid_dev: 937 kfree(raid_dev); 938 939 return -1; 940 } 941 942 943 /** 944 * megaraid_fini_mbox - undo controller initialization 945 * @adapter : our soft state 946 */ 947 static void 948 megaraid_fini_mbox(adapter_t *adapter) 949 { 950 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 951 952 // flush all caches 953 megaraid_mbox_flush_cache(adapter); 954 955 tasklet_kill(&adapter->dpc_h); 956 957 megaraid_sysfs_free_resources(adapter); 958 959 megaraid_free_cmd_packets(adapter); 960 961 free_irq(adapter->irq, adapter); 962 963 iounmap(raid_dev->baseaddr); 964 965 pci_release_regions(adapter->pdev); 966 967 kfree(raid_dev); 968 969 return; 970 } 971 972 973 /** 974 * megaraid_alloc_cmd_packets - allocate shared mailbox 975 * @adapter : soft state of the raid controller 976 * 977 * Allocate and align the shared mailbox. This maibox is used to issue 978 * all the commands. For IO based controllers, the mailbox is also regsitered 979 * with the FW. Allocate memory for all commands as well. 980 * This is our big allocator. 981 */ 982 static int 983 megaraid_alloc_cmd_packets(adapter_t *adapter) 984 { 985 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 986 struct pci_dev *pdev; 987 unsigned long align; 988 scb_t *scb; 989 mbox_ccb_t *ccb; 990 struct mraid_pci_blk *epthru_pci_blk; 991 struct mraid_pci_blk *sg_pci_blk; 992 struct mraid_pci_blk *mbox_pci_blk; 993 int i; 994 995 pdev = adapter->pdev; 996 997 /* 998 * Setup the mailbox 999 * Allocate the common 16-byte aligned memory for the handshake 1000 * mailbox. 1001 */ 1002 raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev, 1003 sizeof(mbox64_t), &raid_dev->una_mbox64_dma); 1004 1005 if (!raid_dev->una_mbox64) { 1006 con_log(CL_ANN, (KERN_WARNING 1007 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1008 __LINE__)); 1009 return -1; 1010 } 1011 memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t)); 1012 1013 /* 1014 * Align the mailbox at 16-byte boundary 1015 */ 1016 raid_dev->mbox = &raid_dev->una_mbox64->mbox32; 1017 1018 raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) & 1019 (~0UL ^ 0xFUL)); 1020 1021 raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8); 1022 1023 align = ((void *)raid_dev->mbox - 1024 ((void *)&raid_dev->una_mbox64->mbox32)); 1025 1026 raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 + 1027 align; 1028 1029 // Allocate memory for commands issued internally 1030 adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE, 1031 &adapter->ibuf_dma_h); 1032 if (!adapter->ibuf) { 1033 1034 con_log(CL_ANN, (KERN_WARNING 1035 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1036 __LINE__)); 1037 1038 goto out_free_common_mbox; 1039 } 1040 memset(adapter->ibuf, 0, MBOX_IBUF_SIZE); 1041 1042 // Allocate memory for our SCSI Command Blocks and their associated 1043 // memory 1044 1045 /* 1046 * Allocate memory for the base list of scb. Later allocate memory for 1047 * CCBs and embedded components of each CCB and point the pointers in 1048 * scb to the allocated components 1049 * NOTE: The code to allocate SCB will be duplicated in all the LLD 1050 * since the calling routine does not yet know the number of available 1051 * commands. 1052 */ 1053 adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS, 1054 GFP_KERNEL); 1055 1056 if (adapter->kscb_list == NULL) { 1057 con_log(CL_ANN, (KERN_WARNING 1058 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1059 __LINE__)); 1060 goto out_free_ibuf; 1061 } 1062 memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS); 1063 1064 // memory allocation for our command packets 1065 if (megaraid_mbox_setup_dma_pools(adapter) != 0) { 1066 con_log(CL_ANN, (KERN_WARNING 1067 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1068 __LINE__)); 1069 goto out_free_scb_list; 1070 } 1071 1072 // Adjust the scb pointers and link in the free pool 1073 epthru_pci_blk = raid_dev->epthru_pool; 1074 sg_pci_blk = raid_dev->sg_pool; 1075 mbox_pci_blk = raid_dev->mbox_pool; 1076 1077 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { 1078 scb = adapter->kscb_list + i; 1079 ccb = raid_dev->ccb_list + i; 1080 1081 ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16); 1082 ccb->raw_mbox = (uint8_t *)ccb->mbox; 1083 ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8); 1084 ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16; 1085 1086 // make sure the mailbox is aligned properly 1087 if (ccb->mbox_dma_h & 0x0F) { 1088 con_log(CL_ANN, (KERN_CRIT 1089 "megaraid mbox: not aligned on 16-bytes\n")); 1090 1091 goto out_teardown_dma_pools; 1092 } 1093 1094 ccb->epthru = (mraid_epassthru_t *) 1095 epthru_pci_blk[i].vaddr; 1096 ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr; 1097 ccb->pthru = (mraid_passthru_t *)ccb->epthru; 1098 ccb->pthru_dma_h = ccb->epthru_dma_h; 1099 1100 1101 ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr; 1102 ccb->sgl_dma_h = sg_pci_blk[i].dma_addr; 1103 ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64; 1104 1105 scb->ccb = (caddr_t)ccb; 1106 scb->gp = 0; 1107 1108 scb->sno = i; // command index 1109 1110 scb->scp = NULL; 1111 scb->state = SCB_FREE; 1112 scb->dma_direction = PCI_DMA_NONE; 1113 scb->dma_type = MRAID_DMA_NONE; 1114 scb->dev_channel = -1; 1115 scb->dev_target = -1; 1116 1117 // put scb in the free pool 1118 list_add_tail(&scb->list, &adapter->kscb_pool); 1119 } 1120 1121 return 0; 1122 1123 out_teardown_dma_pools: 1124 megaraid_mbox_teardown_dma_pools(adapter); 1125 out_free_scb_list: 1126 kfree(adapter->kscb_list); 1127 out_free_ibuf: 1128 pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, 1129 adapter->ibuf_dma_h); 1130 out_free_common_mbox: 1131 pci_free_consistent(adapter->pdev, sizeof(mbox64_t), 1132 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); 1133 1134 return -1; 1135 } 1136 1137 1138 /** 1139 * megaraid_free_cmd_packets - free memory 1140 * @adapter : soft state of the raid controller 1141 * 1142 * Release memory resources allocated for commands. 1143 */ 1144 static void 1145 megaraid_free_cmd_packets(adapter_t *adapter) 1146 { 1147 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 1148 1149 megaraid_mbox_teardown_dma_pools(adapter); 1150 1151 kfree(adapter->kscb_list); 1152 1153 pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE, 1154 (void *)adapter->ibuf, adapter->ibuf_dma_h); 1155 1156 pci_free_consistent(adapter->pdev, sizeof(mbox64_t), 1157 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); 1158 return; 1159 } 1160 1161 1162 /** 1163 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets 1164 * @adapter : HBA soft state 1165 * 1166 * Setup the dma pools for mailbox, passthru and extended passthru structures, 1167 * and scatter-gather lists. 1168 */ 1169 static int 1170 megaraid_mbox_setup_dma_pools(adapter_t *adapter) 1171 { 1172 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 1173 struct mraid_pci_blk *epthru_pci_blk; 1174 struct mraid_pci_blk *sg_pci_blk; 1175 struct mraid_pci_blk *mbox_pci_blk; 1176 int i; 1177 1178 1179 1180 // Allocate memory for 16-bytes aligned mailboxes 1181 raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool", 1182 adapter->pdev, 1183 sizeof(mbox64_t) + 16, 1184 16, 0); 1185 1186 if (raid_dev->mbox_pool_handle == NULL) { 1187 goto fail_setup_dma_pool; 1188 } 1189 1190 mbox_pci_blk = raid_dev->mbox_pool; 1191 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { 1192 mbox_pci_blk[i].vaddr = pci_pool_alloc( 1193 raid_dev->mbox_pool_handle, 1194 GFP_KERNEL, 1195 &mbox_pci_blk[i].dma_addr); 1196 if (!mbox_pci_blk[i].vaddr) { 1197 goto fail_setup_dma_pool; 1198 } 1199 } 1200 1201 /* 1202 * Allocate memory for each embedded passthru strucuture pointer 1203 * Request for a 128 bytes aligned structure for each passthru command 1204 * structure 1205 * Since passthru and extended passthru commands are exclusive, they 1206 * share common memory pool. Passthru structures piggyback on memory 1207 * allocted to extended passthru since passthru is smaller of the two 1208 */ 1209 raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru", 1210 adapter->pdev, sizeof(mraid_epassthru_t), 128, 0); 1211 1212 if (raid_dev->epthru_pool_handle == NULL) { 1213 goto fail_setup_dma_pool; 1214 } 1215 1216 epthru_pci_blk = raid_dev->epthru_pool; 1217 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { 1218 epthru_pci_blk[i].vaddr = pci_pool_alloc( 1219 raid_dev->epthru_pool_handle, 1220 GFP_KERNEL, 1221 &epthru_pci_blk[i].dma_addr); 1222 if (!epthru_pci_blk[i].vaddr) { 1223 goto fail_setup_dma_pool; 1224 } 1225 } 1226 1227 1228 // Allocate memory for each scatter-gather list. Request for 512 bytes 1229 // alignment for each sg list 1230 raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg", 1231 adapter->pdev, 1232 sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE, 1233 512, 0); 1234 1235 if (raid_dev->sg_pool_handle == NULL) { 1236 goto fail_setup_dma_pool; 1237 } 1238 1239 sg_pci_blk = raid_dev->sg_pool; 1240 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { 1241 sg_pci_blk[i].vaddr = pci_pool_alloc( 1242 raid_dev->sg_pool_handle, 1243 GFP_KERNEL, 1244 &sg_pci_blk[i].dma_addr); 1245 if (!sg_pci_blk[i].vaddr) { 1246 goto fail_setup_dma_pool; 1247 } 1248 } 1249 1250 return 0; 1251 1252 fail_setup_dma_pool: 1253 megaraid_mbox_teardown_dma_pools(adapter); 1254 return -1; 1255 } 1256 1257 1258 /** 1259 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets 1260 * @adapter : HBA soft state 1261 * 1262 * Teardown the dma pool for mailbox, passthru and extended passthru 1263 * structures, and scatter-gather lists. 1264 */ 1265 static void 1266 megaraid_mbox_teardown_dma_pools(adapter_t *adapter) 1267 { 1268 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 1269 struct mraid_pci_blk *epthru_pci_blk; 1270 struct mraid_pci_blk *sg_pci_blk; 1271 struct mraid_pci_blk *mbox_pci_blk; 1272 int i; 1273 1274 1275 sg_pci_blk = raid_dev->sg_pool; 1276 for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { 1277 pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, 1278 sg_pci_blk[i].dma_addr); 1279 } 1280 if (raid_dev->sg_pool_handle) 1281 pci_pool_destroy(raid_dev->sg_pool_handle); 1282 1283 1284 epthru_pci_blk = raid_dev->epthru_pool; 1285 for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { 1286 pci_pool_free(raid_dev->epthru_pool_handle, 1287 epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); 1288 } 1289 if (raid_dev->epthru_pool_handle) 1290 pci_pool_destroy(raid_dev->epthru_pool_handle); 1291 1292 1293 mbox_pci_blk = raid_dev->mbox_pool; 1294 for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { 1295 pci_pool_free(raid_dev->mbox_pool_handle, 1296 mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); 1297 } 1298 if (raid_dev->mbox_pool_handle) 1299 pci_pool_destroy(raid_dev->mbox_pool_handle); 1300 1301 return; 1302 } 1303 1304 1305 /** 1306 * megaraid_alloc_scb - detach and return a scb from the free list 1307 * @adapter : controller's soft state 1308 * @scp : pointer to the scsi command to be executed 1309 * 1310 * Return the scb from the head of the free list. %NULL if there are none 1311 * available. 1312 */ 1313 static scb_t * 1314 megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) 1315 { 1316 struct list_head *head = &adapter->kscb_pool; 1317 scb_t *scb = NULL; 1318 unsigned long flags; 1319 1320 // detach scb from free pool 1321 spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); 1322 1323 if (list_empty(head)) { 1324 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); 1325 return NULL; 1326 } 1327 1328 scb = list_entry(head->next, scb_t, list); 1329 list_del_init(&scb->list); 1330 1331 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); 1332 1333 scb->state = SCB_ACTIVE; 1334 scb->scp = scp; 1335 scb->dma_type = MRAID_DMA_NONE; 1336 1337 return scb; 1338 } 1339 1340 1341 /** 1342 * megaraid_dealloc_scb - return the scb to the free pool 1343 * @adapter : controller's soft state 1344 * @scb : scb to be freed 1345 * 1346 * Return the scb back to the free list of scbs. The caller must 'flush' the 1347 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. 1348 * NOTE NOTE: Make sure the scb is not on any list before calling this 1349 * routine. 1350 */ 1351 static inline void 1352 megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) 1353 { 1354 unsigned long flags; 1355 1356 // put scb in the free pool 1357 scb->state = SCB_FREE; 1358 scb->scp = NULL; 1359 spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); 1360 1361 list_add(&scb->list, &adapter->kscb_pool); 1362 1363 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); 1364 1365 return; 1366 } 1367 1368 1369 /** 1370 * megaraid_mbox_mksgl - make the scatter-gather list 1371 * @adapter : controller's soft state 1372 * @scb : scsi control block 1373 * 1374 * Prepare the scatter-gather list. 1375 */ 1376 static int 1377 megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) 1378 { 1379 struct scatterlist *sgl; 1380 mbox_ccb_t *ccb; 1381 struct page *page; 1382 unsigned long offset; 1383 struct scsi_cmnd *scp; 1384 int sgcnt; 1385 int i; 1386 1387 1388 scp = scb->scp; 1389 ccb = (mbox_ccb_t *)scb->ccb; 1390 1391 // no mapping required if no data to be transferred 1392 if (!scp->request_buffer || !scp->request_bufflen) 1393 return 0; 1394 1395 if (!scp->use_sg) { /* scatter-gather list not used */ 1396 1397 page = virt_to_page(scp->request_buffer); 1398 1399 offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK); 1400 1401 ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset, 1402 scp->request_bufflen, 1403 scb->dma_direction); 1404 scb->dma_type = MRAID_DMA_WBUF; 1405 1406 /* 1407 * We need to handle special 64-bit commands that need a 1408 * minimum of 1 SG 1409 */ 1410 sgcnt = 1; 1411 ccb->sgl64[0].address = ccb->buf_dma_h; 1412 ccb->sgl64[0].length = scp->request_bufflen; 1413 1414 return sgcnt; 1415 } 1416 1417 sgl = (struct scatterlist *)scp->request_buffer; 1418 1419 // The number of sg elements returned must not exceed our limit 1420 sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg, 1421 scb->dma_direction); 1422 1423 if (sgcnt > adapter->sglen) { 1424 con_log(CL_ANN, (KERN_CRIT 1425 "megaraid critical: too many sg elements:%d\n", 1426 sgcnt)); 1427 BUG(); 1428 } 1429 1430 scb->dma_type = MRAID_DMA_WSG; 1431 1432 for (i = 0; i < sgcnt; i++, sgl++) { 1433 ccb->sgl64[i].address = sg_dma_address(sgl); 1434 ccb->sgl64[i].length = sg_dma_len(sgl); 1435 } 1436 1437 // Return count of SG nodes 1438 return sgcnt; 1439 } 1440 1441 1442 /** 1443 * mbox_post_cmd - issue a mailbox command 1444 * @adapter : controller's soft state 1445 * @scb : command to be issued 1446 * 1447 * Post the command to the controller if mailbox is available. 1448 */ 1449 static int 1450 mbox_post_cmd(adapter_t *adapter, scb_t *scb) 1451 { 1452 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 1453 mbox64_t *mbox64; 1454 mbox_t *mbox; 1455 mbox_ccb_t *ccb; 1456 unsigned long flags; 1457 unsigned int i = 0; 1458 1459 1460 ccb = (mbox_ccb_t *)scb->ccb; 1461 mbox = raid_dev->mbox; 1462 mbox64 = raid_dev->mbox64; 1463 1464 /* 1465 * Check for busy mailbox. If it is, return failure - the caller 1466 * should retry later. 1467 */ 1468 spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); 1469 1470 if (unlikely(mbox->busy)) { 1471 do { 1472 udelay(1); 1473 i++; 1474 rmb(); 1475 } while(mbox->busy && (i < max_mbox_busy_wait)); 1476 1477 if (mbox->busy) { 1478 1479 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); 1480 1481 return -1; 1482 } 1483 } 1484 1485 1486 // Copy this command's mailbox data into "adapter's" mailbox 1487 memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22); 1488 mbox->cmdid = scb->sno; 1489 1490 adapter->outstanding_cmds++; 1491 1492 if (scb->dma_direction == PCI_DMA_TODEVICE) { 1493 if (!scb->scp->use_sg) { // sg list not used 1494 pci_dma_sync_single_for_device(adapter->pdev, 1495 ccb->buf_dma_h, 1496 scb->scp->request_bufflen, 1497 PCI_DMA_TODEVICE); 1498 } 1499 else { 1500 pci_dma_sync_sg_for_device(adapter->pdev, 1501 scb->scp->request_buffer, 1502 scb->scp->use_sg, PCI_DMA_TODEVICE); 1503 } 1504 } 1505 1506 mbox->busy = 1; // Set busy 1507 mbox->poll = 0; 1508 mbox->ack = 0; 1509 wmb(); 1510 1511 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 1512 1513 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); 1514 1515 return 0; 1516 } 1517 1518 1519 /** 1520 * megaraid_queue_command - generic queue entry point for all LLDs 1521 * @scp : pointer to the scsi command to be executed 1522 * @done : callback routine to be called after the cmd has be completed 1523 * 1524 * Queue entry point for mailbox based controllers. 1525 */ 1526 static int 1527 megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) 1528 { 1529 adapter_t *adapter; 1530 scb_t *scb; 1531 int if_busy; 1532 1533 adapter = SCP2ADAPTER(scp); 1534 scp->scsi_done = done; 1535 scp->result = 0; 1536 1537 /* 1538 * Allocate and build a SCB request 1539 * if_busy flag will be set if megaraid_mbox_build_cmd() command could 1540 * not allocate scb. We will return non-zero status in that case. 1541 * NOTE: scb can be null even though certain commands completed 1542 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would 1543 * return 0 in that case, and we would do the callback right away. 1544 */ 1545 if_busy = 0; 1546 scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy); 1547 if (!scb) { // command already completed 1548 done(scp); 1549 return 0; 1550 } 1551 1552 megaraid_mbox_runpendq(adapter, scb); 1553 return if_busy; 1554 } 1555 1556 /** 1557 * megaraid_mbox_build_cmd - transform the mid-layer scsi commands 1558 * @adapter : controller's soft state 1559 * @scp : mid-layer scsi command pointer 1560 * @busy : set if request could not be completed because of lack of 1561 * resources 1562 * 1563 * Transform the mid-layer scsi command to megaraid firmware lingua. 1564 * Convert the command issued by mid-layer to format understood by megaraid 1565 * firmware. We also complete certain commands without sending them to firmware. 1566 */ 1567 static scb_t * 1568 megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) 1569 { 1570 mraid_device_t *rdev = ADAP2RAIDDEV(adapter); 1571 int channel; 1572 int target; 1573 int islogical; 1574 mbox_ccb_t *ccb; 1575 mraid_passthru_t *pthru; 1576 mbox64_t *mbox64; 1577 mbox_t *mbox; 1578 scb_t *scb; 1579 char skip[] = "skipping"; 1580 char scan[] = "scanning"; 1581 char *ss; 1582 1583 1584 /* 1585 * Get the appropriate device map for the device this command is 1586 * intended for 1587 */ 1588 MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical); 1589 1590 /* 1591 * Logical drive commands 1592 */ 1593 if (islogical) { 1594 switch (scp->cmnd[0]) { 1595 case TEST_UNIT_READY: 1596 /* 1597 * Do we support clustering and is the support enabled 1598 * If no, return success always 1599 */ 1600 if (!adapter->ha) { 1601 scp->result = (DID_OK << 16); 1602 return NULL; 1603 } 1604 1605 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1606 scp->result = (DID_ERROR << 16); 1607 *busy = 1; 1608 return NULL; 1609 } 1610 1611 scb->dma_direction = scp->sc_data_direction; 1612 scb->dev_channel = 0xFF; 1613 scb->dev_target = target; 1614 ccb = (mbox_ccb_t *)scb->ccb; 1615 1616 /* 1617 * The command id will be provided by the command 1618 * issuance routine 1619 */ 1620 ccb->raw_mbox[0] = CLUSTER_CMD; 1621 ccb->raw_mbox[2] = RESERVATION_STATUS; 1622 ccb->raw_mbox[3] = target; 1623 1624 return scb; 1625 1626 case MODE_SENSE: 1627 if (scp->use_sg) { 1628 struct scatterlist *sgl; 1629 caddr_t vaddr; 1630 1631 sgl = (struct scatterlist *)scp->request_buffer; 1632 if (sgl->page) { 1633 vaddr = (caddr_t) 1634 (page_address((&sgl[0])->page) 1635 + (&sgl[0])->offset); 1636 1637 memset(vaddr, 0, scp->cmnd[4]); 1638 } 1639 else { 1640 con_log(CL_ANN, (KERN_WARNING 1641 "megaraid mailbox: invalid sg:%d\n", 1642 __LINE__)); 1643 } 1644 } 1645 else { 1646 memset(scp->request_buffer, 0, scp->cmnd[4]); 1647 } 1648 scp->result = (DID_OK << 16); 1649 return NULL; 1650 1651 case INQUIRY: 1652 /* 1653 * Display the channel scan for logical drives 1654 * Do not display scan for a channel if already done. 1655 */ 1656 if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { 1657 1658 con_log(CL_ANN, (KERN_INFO 1659 "scsi[%d]: scanning scsi channel %d", 1660 adapter->host->host_no, 1661 SCP2CHANNEL(scp))); 1662 1663 con_log(CL_ANN, ( 1664 " [virtual] for logical drives\n")); 1665 1666 rdev->last_disp |= (1L << SCP2CHANNEL(scp)); 1667 } 1668 1669 if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { 1670 scp->sense_buffer[0] = 0x70; 1671 scp->sense_buffer[2] = ILLEGAL_REQUEST; 1672 scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; 1673 scp->result = CHECK_CONDITION << 1; 1674 return NULL; 1675 } 1676 1677 /* Fall through */ 1678 1679 case READ_CAPACITY: 1680 /* 1681 * Do not allow LUN > 0 for logical drives and 1682 * requests for more than 40 logical drives 1683 */ 1684 if (SCP2LUN(scp)) { 1685 scp->result = (DID_BAD_TARGET << 16); 1686 return NULL; 1687 } 1688 if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) { 1689 scp->result = (DID_BAD_TARGET << 16); 1690 return NULL; 1691 } 1692 1693 1694 /* Allocate a SCB and initialize passthru */ 1695 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1696 scp->result = (DID_ERROR << 16); 1697 *busy = 1; 1698 return NULL; 1699 } 1700 1701 ccb = (mbox_ccb_t *)scb->ccb; 1702 scb->dev_channel = 0xFF; 1703 scb->dev_target = target; 1704 pthru = ccb->pthru; 1705 mbox = ccb->mbox; 1706 mbox64 = ccb->mbox64; 1707 1708 pthru->timeout = 0; 1709 pthru->ars = 1; 1710 pthru->reqsenselen = 14; 1711 pthru->islogical = 1; 1712 pthru->logdrv = target; 1713 pthru->cdblen = scp->cmd_len; 1714 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1715 1716 mbox->cmd = MBOXCMD_PASSTHRU64; 1717 scb->dma_direction = scp->sc_data_direction; 1718 1719 pthru->dataxferlen = scp->request_bufflen; 1720 pthru->dataxferaddr = ccb->sgl_dma_h; 1721 pthru->numsge = megaraid_mbox_mksgl(adapter, 1722 scb); 1723 1724 mbox->xferaddr = 0xFFFFFFFF; 1725 mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h; 1726 mbox64->xferaddr_hi = 0; 1727 1728 return scb; 1729 1730 case READ_6: 1731 case WRITE_6: 1732 case READ_10: 1733 case WRITE_10: 1734 case READ_12: 1735 case WRITE_12: 1736 1737 /* 1738 * Allocate a SCB and initialize mailbox 1739 */ 1740 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1741 scp->result = (DID_ERROR << 16); 1742 *busy = 1; 1743 return NULL; 1744 } 1745 ccb = (mbox_ccb_t *)scb->ccb; 1746 scb->dev_channel = 0xFF; 1747 scb->dev_target = target; 1748 mbox = ccb->mbox; 1749 mbox64 = ccb->mbox64; 1750 mbox->logdrv = target; 1751 1752 /* 1753 * A little HACK: 2nd bit is zero for all scsi read 1754 * commands and is set for all scsi write commands 1755 */ 1756 mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64: 1757 MBOXCMD_LREAD64 ; 1758 1759 /* 1760 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1761 */ 1762 if (scp->cmd_len == 6) { 1763 mbox->numsectors = (uint32_t)scp->cmnd[4]; 1764 mbox->lba = 1765 ((uint32_t)scp->cmnd[1] << 16) | 1766 ((uint32_t)scp->cmnd[2] << 8) | 1767 (uint32_t)scp->cmnd[3]; 1768 1769 mbox->lba &= 0x1FFFFF; 1770 } 1771 1772 /* 1773 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1774 */ 1775 else if (scp->cmd_len == 10) { 1776 mbox->numsectors = 1777 (uint32_t)scp->cmnd[8] | 1778 ((uint32_t)scp->cmnd[7] << 8); 1779 mbox->lba = 1780 ((uint32_t)scp->cmnd[2] << 24) | 1781 ((uint32_t)scp->cmnd[3] << 16) | 1782 ((uint32_t)scp->cmnd[4] << 8) | 1783 (uint32_t)scp->cmnd[5]; 1784 } 1785 1786 /* 1787 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1788 */ 1789 else if (scp->cmd_len == 12) { 1790 mbox->lba = 1791 ((uint32_t)scp->cmnd[2] << 24) | 1792 ((uint32_t)scp->cmnd[3] << 16) | 1793 ((uint32_t)scp->cmnd[4] << 8) | 1794 (uint32_t)scp->cmnd[5]; 1795 1796 mbox->numsectors = 1797 ((uint32_t)scp->cmnd[6] << 24) | 1798 ((uint32_t)scp->cmnd[7] << 16) | 1799 ((uint32_t)scp->cmnd[8] << 8) | 1800 (uint32_t)scp->cmnd[9]; 1801 } 1802 else { 1803 con_log(CL_ANN, (KERN_WARNING 1804 "megaraid: unsupported CDB length\n")); 1805 1806 megaraid_dealloc_scb(adapter, scb); 1807 1808 scp->result = (DID_ERROR << 16); 1809 return NULL; 1810 } 1811 1812 scb->dma_direction = scp->sc_data_direction; 1813 1814 // Calculate Scatter-Gather info 1815 mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h; 1816 mbox->numsge = megaraid_mbox_mksgl(adapter, 1817 scb); 1818 mbox->xferaddr = 0xFFFFFFFF; 1819 mbox64->xferaddr_hi = 0; 1820 1821 return scb; 1822 1823 case RESERVE: 1824 case RELEASE: 1825 /* 1826 * Do we support clustering and is the support enabled 1827 */ 1828 if (!adapter->ha) { 1829 scp->result = (DID_BAD_TARGET << 16); 1830 return NULL; 1831 } 1832 1833 /* 1834 * Allocate a SCB and initialize mailbox 1835 */ 1836 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1837 scp->result = (DID_ERROR << 16); 1838 *busy = 1; 1839 return NULL; 1840 } 1841 1842 ccb = (mbox_ccb_t *)scb->ccb; 1843 scb->dev_channel = 0xFF; 1844 scb->dev_target = target; 1845 ccb->raw_mbox[0] = CLUSTER_CMD; 1846 ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ? 1847 RESERVE_LD : RELEASE_LD; 1848 1849 ccb->raw_mbox[3] = target; 1850 scb->dma_direction = scp->sc_data_direction; 1851 1852 return scb; 1853 1854 default: 1855 scp->result = (DID_BAD_TARGET << 16); 1856 return NULL; 1857 } 1858 } 1859 else { // Passthru device commands 1860 1861 // Do not allow access to target id > 15 or LUN > 7 1862 if (target > 15 || SCP2LUN(scp) > 7) { 1863 scp->result = (DID_BAD_TARGET << 16); 1864 return NULL; 1865 } 1866 1867 // if fast load option was set and scan for last device is 1868 // over, reset the fast_load flag so that during a possible 1869 // next scan, devices can be made available 1870 if (rdev->fast_load && (target == 15) && 1871 (SCP2CHANNEL(scp) == adapter->max_channel -1)) { 1872 1873 con_log(CL_ANN, (KERN_INFO 1874 "megaraid[%d]: physical device scan re-enabled\n", 1875 adapter->host->host_no)); 1876 rdev->fast_load = 0; 1877 } 1878 1879 /* 1880 * Display the channel scan for physical devices 1881 */ 1882 if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { 1883 1884 ss = rdev->fast_load ? skip : scan; 1885 1886 con_log(CL_ANN, (KERN_INFO 1887 "scsi[%d]: %s scsi channel %d [Phy %d]", 1888 adapter->host->host_no, ss, SCP2CHANNEL(scp), 1889 channel)); 1890 1891 con_log(CL_ANN, ( 1892 " for non-raid devices\n")); 1893 1894 rdev->last_disp |= (1L << SCP2CHANNEL(scp)); 1895 } 1896 1897 // disable channel sweep if fast load option given 1898 if (rdev->fast_load) { 1899 scp->result = (DID_BAD_TARGET << 16); 1900 return NULL; 1901 } 1902 1903 // Allocate a SCB and initialize passthru 1904 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1905 scp->result = (DID_ERROR << 16); 1906 *busy = 1; 1907 return NULL; 1908 } 1909 1910 ccb = (mbox_ccb_t *)scb->ccb; 1911 scb->dev_channel = channel; 1912 scb->dev_target = target; 1913 scb->dma_direction = scp->sc_data_direction; 1914 mbox = ccb->mbox; 1915 mbox64 = ccb->mbox64; 1916 1917 // Does this firmware support extended CDBs 1918 if (adapter->max_cdb_sz == 16) { 1919 mbox->cmd = MBOXCMD_EXTPTHRU; 1920 1921 megaraid_mbox_prepare_epthru(adapter, scb, scp); 1922 1923 mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h; 1924 mbox64->xferaddr_hi = 0; 1925 mbox->xferaddr = 0xFFFFFFFF; 1926 } 1927 else { 1928 mbox->cmd = MBOXCMD_PASSTHRU64; 1929 1930 megaraid_mbox_prepare_pthru(adapter, scb, scp); 1931 1932 mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h; 1933 mbox64->xferaddr_hi = 0; 1934 mbox->xferaddr = 0xFFFFFFFF; 1935 } 1936 return scb; 1937 } 1938 1939 // NOT REACHED 1940 } 1941 1942 1943 /** 1944 * megaraid_mbox_runpendq - execute commands queued in the pending queue 1945 * @adapter : controller's soft state 1946 * @scb_q : SCB to be queued in the pending list 1947 * 1948 * Scan the pending list for commands which are not yet issued and try to 1949 * post to the controller. The SCB can be a null pointer, which would indicate 1950 * no SCB to be queue, just try to execute the ones in the pending list. 1951 * 1952 * NOTE: We do not actually traverse the pending list. The SCBs are plucked 1953 * out from the head of the pending list. If it is successfully issued, the 1954 * next SCB is at the head now. 1955 */ 1956 static void 1957 megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q) 1958 { 1959 scb_t *scb; 1960 unsigned long flags; 1961 1962 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 1963 1964 if (scb_q) { 1965 scb_q->state = SCB_PENDQ; 1966 list_add_tail(&scb_q->list, &adapter->pend_list); 1967 } 1968 1969 // if the adapter in not in quiescent mode, post the commands to FW 1970 if (adapter->quiescent) { 1971 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 1972 return; 1973 } 1974 1975 while (!list_empty(&adapter->pend_list)) { 1976 1977 assert_spin_locked(PENDING_LIST_LOCK(adapter)); 1978 1979 scb = list_entry(adapter->pend_list.next, scb_t, list); 1980 1981 // remove the scb from the pending list and try to 1982 // issue. If we are unable to issue it, put back in 1983 // the pending list and return 1984 1985 list_del_init(&scb->list); 1986 1987 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 1988 1989 // if mailbox was busy, return SCB back to pending 1990 // list. Make sure to add at the head, since that's 1991 // where it would have been removed from 1992 1993 scb->state = SCB_ISSUED; 1994 1995 if (mbox_post_cmd(adapter, scb) != 0) { 1996 1997 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 1998 1999 scb->state = SCB_PENDQ; 2000 2001 list_add(&scb->list, &adapter->pend_list); 2002 2003 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), 2004 flags); 2005 2006 return; 2007 } 2008 2009 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 2010 } 2011 2012 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 2013 2014 2015 return; 2016 } 2017 2018 2019 /** 2020 * megaraid_mbox_prepare_pthru - prepare a command for physical devices 2021 * @adapter : pointer to controller's soft state 2022 * @scb : scsi control block 2023 * @scp : scsi command from the mid-layer 2024 * 2025 * Prepare a command for the scsi physical devices. 2026 */ 2027 static void 2028 megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, 2029 struct scsi_cmnd *scp) 2030 { 2031 mbox_ccb_t *ccb; 2032 mraid_passthru_t *pthru; 2033 uint8_t channel; 2034 uint8_t target; 2035 2036 ccb = (mbox_ccb_t *)scb->ccb; 2037 pthru = ccb->pthru; 2038 channel = scb->dev_channel; 2039 target = scb->dev_target; 2040 2041 // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout 2042 pthru->timeout = 4; 2043 pthru->ars = 1; 2044 pthru->islogical = 0; 2045 pthru->channel = 0; 2046 pthru->target = (channel << 4) | target; 2047 pthru->logdrv = SCP2LUN(scp); 2048 pthru->reqsenselen = 14; 2049 pthru->cdblen = scp->cmd_len; 2050 2051 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 2052 2053 if (scp->request_bufflen) { 2054 pthru->dataxferlen = scp->request_bufflen; 2055 pthru->dataxferaddr = ccb->sgl_dma_h; 2056 pthru->numsge = megaraid_mbox_mksgl(adapter, scb); 2057 } 2058 else { 2059 pthru->dataxferaddr = 0; 2060 pthru->dataxferlen = 0; 2061 pthru->numsge = 0; 2062 } 2063 return; 2064 } 2065 2066 2067 /** 2068 * megaraid_mbox_prepare_epthru - prepare a command for physical devices 2069 * @adapter : pointer to controller's soft state 2070 * @scb : scsi control block 2071 * @scp : scsi command from the mid-layer 2072 * 2073 * Prepare a command for the scsi physical devices. This rountine prepares 2074 * commands for devices which can take extended CDBs (>10 bytes). 2075 */ 2076 static void 2077 megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, 2078 struct scsi_cmnd *scp) 2079 { 2080 mbox_ccb_t *ccb; 2081 mraid_epassthru_t *epthru; 2082 uint8_t channel; 2083 uint8_t target; 2084 2085 ccb = (mbox_ccb_t *)scb->ccb; 2086 epthru = ccb->epthru; 2087 channel = scb->dev_channel; 2088 target = scb->dev_target; 2089 2090 // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout 2091 epthru->timeout = 4; 2092 epthru->ars = 1; 2093 epthru->islogical = 0; 2094 epthru->channel = 0; 2095 epthru->target = (channel << 4) | target; 2096 epthru->logdrv = SCP2LUN(scp); 2097 epthru->reqsenselen = 14; 2098 epthru->cdblen = scp->cmd_len; 2099 2100 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len); 2101 2102 if (scp->request_bufflen) { 2103 epthru->dataxferlen = scp->request_bufflen; 2104 epthru->dataxferaddr = ccb->sgl_dma_h; 2105 epthru->numsge = megaraid_mbox_mksgl(adapter, scb); 2106 } 2107 else { 2108 epthru->dataxferaddr = 0; 2109 epthru->dataxferlen = 0; 2110 epthru->numsge = 0; 2111 } 2112 return; 2113 } 2114 2115 2116 /** 2117 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs 2118 * @adapter : controller's soft state 2119 * 2120 * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the 2121 * completed command and put them on the completed list for later processing. 2122 * 2123 * Returns: 1 if the interrupt is valid, 0 otherwise 2124 */ 2125 static int 2126 megaraid_ack_sequence(adapter_t *adapter) 2127 { 2128 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 2129 mbox_t *mbox; 2130 scb_t *scb; 2131 uint8_t nstatus; 2132 uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; 2133 struct list_head clist; 2134 int handled; 2135 uint32_t dword; 2136 unsigned long flags; 2137 int i, j; 2138 2139 2140 mbox = raid_dev->mbox; 2141 2142 // move the SCBs from the firmware completed array to our local list 2143 INIT_LIST_HEAD(&clist); 2144 2145 // loop till F/W has more commands for us to complete 2146 handled = 0; 2147 spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); 2148 do { 2149 /* 2150 * Check if a valid interrupt is pending. If found, force the 2151 * interrupt line low. 2152 */ 2153 dword = RDOUTDOOR(raid_dev); 2154 if (dword != 0x10001234) break; 2155 2156 handled = 1; 2157 2158 WROUTDOOR(raid_dev, 0x10001234); 2159 2160 nstatus = 0; 2161 // wait for valid numstatus to post 2162 for (i = 0; i < 0xFFFFF; i++) { 2163 if (mbox->numstatus != 0xFF) { 2164 nstatus = mbox->numstatus; 2165 break; 2166 } 2167 rmb(); 2168 } 2169 mbox->numstatus = 0xFF; 2170 2171 adapter->outstanding_cmds -= nstatus; 2172 2173 for (i = 0; i < nstatus; i++) { 2174 2175 // wait for valid command index to post 2176 for (j = 0; j < 0xFFFFF; j++) { 2177 if (mbox->completed[i] != 0xFF) break; 2178 rmb(); 2179 } 2180 completed[i] = mbox->completed[i]; 2181 mbox->completed[i] = 0xFF; 2182 2183 if (completed[i] == 0xFF) { 2184 con_log(CL_ANN, (KERN_CRIT 2185 "megaraid: command posting timed out\n")); 2186 2187 BUG(); 2188 continue; 2189 } 2190 2191 // Get SCB associated with this command id 2192 if (completed[i] >= MBOX_MAX_SCSI_CMDS) { 2193 // a cmm command 2194 scb = adapter->uscb_list + (completed[i] - 2195 MBOX_MAX_SCSI_CMDS); 2196 } 2197 else { 2198 // an os command 2199 scb = adapter->kscb_list + completed[i]; 2200 } 2201 2202 scb->status = mbox->status; 2203 list_add_tail(&scb->list, &clist); 2204 } 2205 2206 // Acknowledge interrupt 2207 WRINDOOR(raid_dev, 0x02); 2208 2209 } while(1); 2210 2211 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); 2212 2213 2214 // put the completed commands in the completed list. DPC would 2215 // complete these commands later 2216 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); 2217 2218 list_splice(&clist, &adapter->completed_list); 2219 2220 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); 2221 2222 2223 // schedule the DPC if there is some work for it 2224 if (handled) 2225 tasklet_schedule(&adapter->dpc_h); 2226 2227 return handled; 2228 } 2229 2230 2231 /** 2232 * megaraid_isr - isr for memory based mailbox based controllers 2233 * @irq : irq 2234 * @devp : pointer to our soft state 2235 * 2236 * Interrupt service routine for memory-mapped mailbox controllers. 2237 */ 2238 static irqreturn_t 2239 megaraid_isr(int irq, void *devp) 2240 { 2241 adapter_t *adapter = devp; 2242 int handled; 2243 2244 handled = megaraid_ack_sequence(adapter); 2245 2246 /* Loop through any pending requests */ 2247 if (!adapter->quiescent) { 2248 megaraid_mbox_runpendq(adapter, NULL); 2249 } 2250 2251 return IRQ_RETVAL(handled); 2252 } 2253 2254 2255 /** 2256 * megaraid_mbox_sync_scb - sync kernel buffers 2257 * @adapter : controller's soft state 2258 * @scb : pointer to the resource packet 2259 * 2260 * DMA sync if required. 2261 */ 2262 static void 2263 megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb) 2264 { 2265 mbox_ccb_t *ccb; 2266 2267 ccb = (mbox_ccb_t *)scb->ccb; 2268 2269 switch (scb->dma_type) { 2270 2271 case MRAID_DMA_WBUF: 2272 if (scb->dma_direction == PCI_DMA_FROMDEVICE) { 2273 pci_dma_sync_single_for_cpu(adapter->pdev, 2274 ccb->buf_dma_h, 2275 scb->scp->request_bufflen, 2276 PCI_DMA_FROMDEVICE); 2277 } 2278 2279 pci_unmap_page(adapter->pdev, ccb->buf_dma_h, 2280 scb->scp->request_bufflen, scb->dma_direction); 2281 2282 break; 2283 2284 case MRAID_DMA_WSG: 2285 if (scb->dma_direction == PCI_DMA_FROMDEVICE) { 2286 pci_dma_sync_sg_for_cpu(adapter->pdev, 2287 scb->scp->request_buffer, 2288 scb->scp->use_sg, PCI_DMA_FROMDEVICE); 2289 } 2290 2291 pci_unmap_sg(adapter->pdev, scb->scp->request_buffer, 2292 scb->scp->use_sg, scb->dma_direction); 2293 2294 break; 2295 2296 default: 2297 break; 2298 } 2299 2300 return; 2301 } 2302 2303 2304 /** 2305 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list 2306 * @devp : pointer to HBA soft state 2307 * 2308 * Pick up the commands from the completed list and send back to the owners. 2309 * This is a reentrant function and does not assume any locks are held while 2310 * it is being called. 2311 */ 2312 static void 2313 megaraid_mbox_dpc(unsigned long devp) 2314 { 2315 adapter_t *adapter = (adapter_t *)devp; 2316 mraid_device_t *raid_dev; 2317 struct list_head clist; 2318 struct scatterlist *sgl; 2319 scb_t *scb; 2320 scb_t *tmp; 2321 struct scsi_cmnd *scp; 2322 mraid_passthru_t *pthru; 2323 mraid_epassthru_t *epthru; 2324 mbox_ccb_t *ccb; 2325 int islogical; 2326 int pdev_index; 2327 int pdev_state; 2328 mbox_t *mbox; 2329 unsigned long flags; 2330 uint8_t c; 2331 int status; 2332 uioc_t *kioc; 2333 2334 2335 if (!adapter) return; 2336 2337 raid_dev = ADAP2RAIDDEV(adapter); 2338 2339 // move the SCBs from the completed list to our local list 2340 INIT_LIST_HEAD(&clist); 2341 2342 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); 2343 2344 list_splice_init(&adapter->completed_list, &clist); 2345 2346 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); 2347 2348 2349 list_for_each_entry_safe(scb, tmp, &clist, list) { 2350 2351 status = scb->status; 2352 scp = scb->scp; 2353 ccb = (mbox_ccb_t *)scb->ccb; 2354 pthru = ccb->pthru; 2355 epthru = ccb->epthru; 2356 mbox = ccb->mbox; 2357 2358 // Make sure f/w has completed a valid command 2359 if (scb->state != SCB_ISSUED) { 2360 con_log(CL_ANN, (KERN_CRIT 2361 "megaraid critical err: invalid command %d:%d:%p\n", 2362 scb->sno, scb->state, scp)); 2363 BUG(); 2364 continue; // Must never happen! 2365 } 2366 2367 // check for the management command and complete it right away 2368 if (scb->sno >= MBOX_MAX_SCSI_CMDS) { 2369 scb->state = SCB_FREE; 2370 scb->status = status; 2371 2372 // remove from local clist 2373 list_del_init(&scb->list); 2374 2375 kioc = (uioc_t *)scb->gp; 2376 kioc->status = 0; 2377 2378 megaraid_mbox_mm_done(adapter, scb); 2379 2380 continue; 2381 } 2382 2383 // Was an abort issued for this command earlier 2384 if (scb->state & SCB_ABORT) { 2385 con_log(CL_ANN, (KERN_NOTICE 2386 "megaraid: aborted cmd %lx[%x] completed\n", 2387 scp->serial_number, scb->sno)); 2388 } 2389 2390 /* 2391 * If the inquiry came of a disk drive which is not part of 2392 * any RAID array, expose it to the kernel. For this to be 2393 * enabled, user must set the "megaraid_expose_unconf_disks" 2394 * flag to 1 by specifying it on module parameter list. 2395 * This would enable data migration off drives from other 2396 * configurations. 2397 */ 2398 islogical = MRAID_IS_LOGICAL(adapter, scp); 2399 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 2400 && IS_RAID_CH(raid_dev, scb->dev_channel)) { 2401 2402 if (scp->use_sg) { 2403 sgl = (struct scatterlist *) 2404 scp->request_buffer; 2405 2406 if (sgl->page) { 2407 c = *(unsigned char *) 2408 (page_address((&sgl[0])->page) + 2409 (&sgl[0])->offset); 2410 } 2411 else { 2412 con_log(CL_ANN, (KERN_WARNING 2413 "megaraid mailbox: invalid sg:%d\n", 2414 __LINE__)); 2415 c = 0; 2416 } 2417 } 2418 else { 2419 c = *(uint8_t *)scp->request_buffer; 2420 } 2421 2422 if ((c & 0x1F ) == TYPE_DISK) { 2423 pdev_index = (scb->dev_channel * 16) + 2424 scb->dev_target; 2425 pdev_state = 2426 raid_dev->pdrv_state[pdev_index] & 0x0F; 2427 2428 if (pdev_state == PDRV_ONLINE || 2429 pdev_state == PDRV_FAILED || 2430 pdev_state == PDRV_RBLD || 2431 pdev_state == PDRV_HOTSPARE || 2432 megaraid_expose_unconf_disks == 0) { 2433 2434 status = 0xF0; 2435 } 2436 } 2437 } 2438 2439 // Convert MegaRAID status to Linux error code 2440 switch (status) { 2441 2442 case 0x00: 2443 2444 scp->result = (DID_OK << 16); 2445 break; 2446 2447 case 0x02: 2448 2449 /* set sense_buffer and result fields */ 2450 if (mbox->cmd == MBOXCMD_PASSTHRU || 2451 mbox->cmd == MBOXCMD_PASSTHRU64) { 2452 2453 memcpy(scp->sense_buffer, pthru->reqsensearea, 2454 14); 2455 2456 scp->result = DRIVER_SENSE << 24 | 2457 DID_OK << 16 | CHECK_CONDITION << 1; 2458 } 2459 else { 2460 if (mbox->cmd == MBOXCMD_EXTPTHRU) { 2461 2462 memcpy(scp->sense_buffer, 2463 epthru->reqsensearea, 14); 2464 2465 scp->result = DRIVER_SENSE << 24 | 2466 DID_OK << 16 | 2467 CHECK_CONDITION << 1; 2468 } else { 2469 scp->sense_buffer[0] = 0x70; 2470 scp->sense_buffer[2] = ABORTED_COMMAND; 2471 scp->result = CHECK_CONDITION << 1; 2472 } 2473 } 2474 break; 2475 2476 case 0x08: 2477 2478 scp->result = DID_BUS_BUSY << 16 | status; 2479 break; 2480 2481 default: 2482 2483 /* 2484 * If TEST_UNIT_READY fails, we know RESERVATION_STATUS 2485 * failed 2486 */ 2487 if (scp->cmnd[0] == TEST_UNIT_READY) { 2488 scp->result = DID_ERROR << 16 | 2489 RESERVATION_CONFLICT << 1; 2490 } 2491 else 2492 /* 2493 * Error code returned is 1 if Reserve or Release 2494 * failed or the input parameter is invalid 2495 */ 2496 if (status == 1 && (scp->cmnd[0] == RESERVE || 2497 scp->cmnd[0] == RELEASE)) { 2498 2499 scp->result = DID_ERROR << 16 | 2500 RESERVATION_CONFLICT << 1; 2501 } 2502 else { 2503 scp->result = DID_BAD_TARGET << 16 | status; 2504 } 2505 } 2506 2507 // print a debug message for all failed commands 2508 if (status) { 2509 megaraid_mbox_display_scb(adapter, scb); 2510 } 2511 2512 // Free our internal resources and call the mid-layer callback 2513 // routine 2514 megaraid_mbox_sync_scb(adapter, scb); 2515 2516 // remove from local clist 2517 list_del_init(&scb->list); 2518 2519 // put back in free list 2520 megaraid_dealloc_scb(adapter, scb); 2521 2522 // send the scsi packet back to kernel 2523 scp->scsi_done(scp); 2524 } 2525 2526 return; 2527 } 2528 2529 2530 /** 2531 * megaraid_abort_handler - abort the scsi command 2532 * @scp : command to be aborted 2533 * 2534 * Abort a previous SCSI request. Only commands on the pending list can be 2535 * aborted. All the commands issued to the F/W must complete. 2536 **/ 2537 static int 2538 megaraid_abort_handler(struct scsi_cmnd *scp) 2539 { 2540 adapter_t *adapter; 2541 mraid_device_t *raid_dev; 2542 scb_t *scb; 2543 scb_t *tmp; 2544 int found; 2545 unsigned long flags; 2546 int i; 2547 2548 2549 adapter = SCP2ADAPTER(scp); 2550 raid_dev = ADAP2RAIDDEV(adapter); 2551 2552 con_log(CL_ANN, (KERN_WARNING 2553 "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n", 2554 scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp), 2555 SCP2TARGET(scp), SCP2LUN(scp))); 2556 2557 // If FW has stopped responding, simply return failure 2558 if (raid_dev->hw_error) { 2559 con_log(CL_ANN, (KERN_NOTICE 2560 "megaraid: hw error, not aborting\n")); 2561 return FAILED; 2562 } 2563 2564 // There might a race here, where the command was completed by the 2565 // firmware and now it is on the completed list. Before we could 2566 // complete the command to the kernel in dpc, the abort came. 2567 // Find out if this is the case to avoid the race. 2568 scb = NULL; 2569 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); 2570 list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) { 2571 2572 if (scb->scp == scp) { // Found command 2573 2574 list_del_init(&scb->list); // from completed list 2575 2576 con_log(CL_ANN, (KERN_WARNING 2577 "megaraid: %ld:%d[%d:%d], abort from completed list\n", 2578 scp->serial_number, scb->sno, 2579 scb->dev_channel, scb->dev_target)); 2580 2581 scp->result = (DID_ABORT << 16); 2582 scp->scsi_done(scp); 2583 2584 megaraid_dealloc_scb(adapter, scb); 2585 2586 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), 2587 flags); 2588 2589 return SUCCESS; 2590 } 2591 } 2592 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); 2593 2594 2595 // Find out if this command is still on the pending list. If it is and 2596 // was never issued, abort and return success. If the command is owned 2597 // by the firmware, we must wait for it to complete by the FW. 2598 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 2599 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { 2600 2601 if (scb->scp == scp) { // Found command 2602 2603 list_del_init(&scb->list); // from pending list 2604 2605 ASSERT(!(scb->state & SCB_ISSUED)); 2606 2607 con_log(CL_ANN, (KERN_WARNING 2608 "megaraid abort: %ld[%d:%d], driver owner\n", 2609 scp->serial_number, scb->dev_channel, 2610 scb->dev_target)); 2611 2612 scp->result = (DID_ABORT << 16); 2613 scp->scsi_done(scp); 2614 2615 megaraid_dealloc_scb(adapter, scb); 2616 2617 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), 2618 flags); 2619 2620 return SUCCESS; 2621 } 2622 } 2623 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 2624 2625 2626 // Check do we even own this command, in which case this would be 2627 // owned by the firmware. The only way to locate the FW scb is to 2628 // traverse through the list of all SCB, since driver does not 2629 // maintain these SCBs on any list 2630 found = 0; 2631 spin_lock_irq(&adapter->lock); 2632 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { 2633 scb = adapter->kscb_list + i; 2634 2635 if (scb->scp == scp) { 2636 2637 found = 1; 2638 2639 if (!(scb->state & SCB_ISSUED)) { 2640 con_log(CL_ANN, (KERN_WARNING 2641 "megaraid abort: %ld%d[%d:%d], invalid state\n", 2642 scp->serial_number, scb->sno, scb->dev_channel, 2643 scb->dev_target)); 2644 BUG(); 2645 } 2646 else { 2647 con_log(CL_ANN, (KERN_WARNING 2648 "megaraid abort: %ld:%d[%d:%d], fw owner\n", 2649 scp->serial_number, scb->sno, scb->dev_channel, 2650 scb->dev_target)); 2651 } 2652 } 2653 } 2654 spin_unlock_irq(&adapter->lock); 2655 2656 if (!found) { 2657 con_log(CL_ANN, (KERN_WARNING 2658 "megaraid abort: scsi cmd:%ld, do now own\n", 2659 scp->serial_number)); 2660 2661 // FIXME: Should there be a callback for this command? 2662 return SUCCESS; 2663 } 2664 2665 // We cannot actually abort a command owned by firmware, return 2666 // failure and wait for reset. In host reset handler, we will find out 2667 // if the HBA is still live 2668 return FAILED; 2669 } 2670 2671 /** 2672 * megaraid_reset_handler - device reset hadler for mailbox based driver 2673 * @scp : reference command 2674 * 2675 * Reset handler for the mailbox based controller. First try to find out if 2676 * the FW is still live, in which case the outstanding commands counter mut go 2677 * down to 0. If that happens, also issue the reservation reset command to 2678 * relinquish (possible) reservations on the logical drives connected to this 2679 * host. 2680 **/ 2681 static int 2682 megaraid_reset_handler(struct scsi_cmnd *scp) 2683 { 2684 adapter_t *adapter; 2685 scb_t *scb; 2686 scb_t *tmp; 2687 mraid_device_t *raid_dev; 2688 unsigned long flags; 2689 uint8_t raw_mbox[sizeof(mbox_t)]; 2690 int rval; 2691 int recovery_window; 2692 int recovering; 2693 int i; 2694 uioc_t *kioc; 2695 2696 adapter = SCP2ADAPTER(scp); 2697 raid_dev = ADAP2RAIDDEV(adapter); 2698 2699 // return failure if adapter is not responding 2700 if (raid_dev->hw_error) { 2701 con_log(CL_ANN, (KERN_NOTICE 2702 "megaraid: hw error, cannot reset\n")); 2703 return FAILED; 2704 } 2705 2706 2707 // Under exceptional conditions, FW can take up to 3 minutes to 2708 // complete command processing. Wait for additional 2 minutes for the 2709 // pending commands counter to go down to 0. If it doesn't, let the 2710 // controller be marked offline 2711 // Also, reset all the commands currently owned by the driver 2712 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 2713 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { 2714 list_del_init(&scb->list); // from pending list 2715 2716 if (scb->sno >= MBOX_MAX_SCSI_CMDS) { 2717 con_log(CL_ANN, (KERN_WARNING 2718 "megaraid: IOCTL packet with %d[%d:%d] being reset\n", 2719 scb->sno, scb->dev_channel, scb->dev_target)); 2720 2721 scb->status = -1; 2722 2723 kioc = (uioc_t *)scb->gp; 2724 kioc->status = -EFAULT; 2725 2726 megaraid_mbox_mm_done(adapter, scb); 2727 } else { 2728 if (scb->scp == scp) { // Found command 2729 con_log(CL_ANN, (KERN_WARNING 2730 "megaraid: %ld:%d[%d:%d], reset from pending list\n", 2731 scp->serial_number, scb->sno, 2732 scb->dev_channel, scb->dev_target)); 2733 } else { 2734 con_log(CL_ANN, (KERN_WARNING 2735 "megaraid: IO packet with %d[%d:%d] being reset\n", 2736 scb->sno, scb->dev_channel, scb->dev_target)); 2737 } 2738 2739 scb->scp->result = (DID_RESET << 16); 2740 scb->scp->scsi_done(scb->scp); 2741 2742 megaraid_dealloc_scb(adapter, scb); 2743 } 2744 } 2745 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 2746 2747 if (adapter->outstanding_cmds) { 2748 con_log(CL_ANN, (KERN_NOTICE 2749 "megaraid: %d outstanding commands. Max wait %d sec\n", 2750 adapter->outstanding_cmds, 2751 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT))); 2752 } 2753 2754 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; 2755 2756 recovering = adapter->outstanding_cmds; 2757 2758 for (i = 0; i < recovery_window; i++) { 2759 2760 megaraid_ack_sequence(adapter); 2761 2762 // print a message once every 5 seconds only 2763 if (!(i % 5)) { 2764 con_log(CL_ANN, ( 2765 "megaraid mbox: Wait for %d commands to complete:%d\n", 2766 adapter->outstanding_cmds, 2767 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i)); 2768 } 2769 2770 // bailout if no recovery happended in reset time 2771 if (adapter->outstanding_cmds == 0) { 2772 break; 2773 } 2774 2775 msleep(1000); 2776 } 2777 2778 spin_lock(&adapter->lock); 2779 2780 // If still outstanding commands, bail out 2781 if (adapter->outstanding_cmds) { 2782 con_log(CL_ANN, (KERN_WARNING 2783 "megaraid mbox: critical hardware error!\n")); 2784 2785 raid_dev->hw_error = 1; 2786 2787 rval = FAILED; 2788 goto out; 2789 } 2790 else { 2791 con_log(CL_ANN, (KERN_NOTICE 2792 "megaraid mbox: reset sequence completed sucessfully\n")); 2793 } 2794 2795 2796 // If the controller supports clustering, reset reservations 2797 if (!adapter->ha) { 2798 rval = SUCCESS; 2799 goto out; 2800 } 2801 2802 // clear reservations if any 2803 raw_mbox[0] = CLUSTER_CMD; 2804 raw_mbox[2] = RESET_RESERVATIONS; 2805 2806 rval = SUCCESS; 2807 if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) { 2808 con_log(CL_ANN, 2809 (KERN_INFO "megaraid: reservation reset\n")); 2810 } 2811 else { 2812 rval = FAILED; 2813 con_log(CL_ANN, (KERN_WARNING 2814 "megaraid: reservation reset failed\n")); 2815 } 2816 2817 out: 2818 spin_unlock_irq(&adapter->lock); 2819 return rval; 2820 } 2821 2822 /* 2823 * START: internal commands library 2824 * 2825 * This section of the driver has the common routine used by the driver and 2826 * also has all the FW routines 2827 */ 2828 2829 /** 2830 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers 2831 * @adapter : controller's soft state 2832 * @raw_mbox : the mailbox 2833 * 2834 * Issue a scb in synchronous and non-interrupt mode for mailbox based 2835 * controllers. 2836 */ 2837 static int 2838 mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) 2839 { 2840 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 2841 mbox64_t *mbox64; 2842 mbox_t *mbox; 2843 uint8_t status; 2844 int i; 2845 2846 2847 mbox64 = raid_dev->mbox64; 2848 mbox = raid_dev->mbox; 2849 2850 /* 2851 * Wait until mailbox is free 2852 */ 2853 if (megaraid_busywait_mbox(raid_dev) != 0) 2854 goto blocked_mailbox; 2855 2856 /* 2857 * Copy mailbox data into host structure 2858 */ 2859 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); 2860 mbox->cmdid = 0xFE; 2861 mbox->busy = 1; 2862 mbox->poll = 0; 2863 mbox->ack = 0; 2864 mbox->numstatus = 0xFF; 2865 mbox->status = 0xFF; 2866 2867 wmb(); 2868 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 2869 2870 // wait for maximum 1 second for status to post. If the status is not 2871 // available within 1 second, assume FW is initializing and wait 2872 // for an extended amount of time 2873 if (mbox->numstatus == 0xFF) { // status not yet available 2874 udelay(25); 2875 2876 for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) { 2877 rmb(); 2878 msleep(1); 2879 } 2880 2881 2882 if (i == 1000) { 2883 con_log(CL_ANN, (KERN_NOTICE 2884 "megaraid mailbox: wait for FW to boot ")); 2885 2886 for (i = 0; (mbox->numstatus == 0xFF) && 2887 (i < MBOX_RESET_WAIT); i++) { 2888 rmb(); 2889 con_log(CL_ANN, ("\b\b\b\b\b[%03d]", 2890 MBOX_RESET_WAIT - i)); 2891 msleep(1000); 2892 } 2893 2894 if (i == MBOX_RESET_WAIT) { 2895 2896 con_log(CL_ANN, ( 2897 "\nmegaraid mailbox: status not available\n")); 2898 2899 return -1; 2900 } 2901 con_log(CL_ANN, ("\b\b\b\b\b[ok] \n")); 2902 } 2903 } 2904 2905 // wait for maximum 1 second for poll semaphore 2906 if (mbox->poll != 0x77) { 2907 udelay(25); 2908 2909 for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) { 2910 rmb(); 2911 msleep(1); 2912 } 2913 2914 if (i == 1000) { 2915 con_log(CL_ANN, (KERN_WARNING 2916 "megaraid mailbox: could not get poll semaphore\n")); 2917 return -1; 2918 } 2919 } 2920 2921 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); 2922 wmb(); 2923 2924 // wait for maximum 1 second for acknowledgement 2925 if (RDINDOOR(raid_dev) & 0x2) { 2926 udelay(25); 2927 2928 for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) { 2929 rmb(); 2930 msleep(1); 2931 } 2932 2933 if (i == 1000) { 2934 con_log(CL_ANN, (KERN_WARNING 2935 "megaraid mailbox: could not acknowledge\n")); 2936 return -1; 2937 } 2938 } 2939 mbox->poll = 0; 2940 mbox->ack = 0x77; 2941 2942 status = mbox->status; 2943 2944 // invalidate the completed command id array. After command 2945 // completion, firmware would write the valid id. 2946 mbox->numstatus = 0xFF; 2947 mbox->status = 0xFF; 2948 for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) { 2949 mbox->completed[i] = 0xFF; 2950 } 2951 2952 return status; 2953 2954 blocked_mailbox: 2955 2956 con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") ); 2957 return -1; 2958 } 2959 2960 2961 /** 2962 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers 2963 * @adapter : controller's soft state 2964 * @raw_mbox : the mailbox 2965 * 2966 * Issue a scb in synchronous and non-interrupt mode for mailbox based 2967 * controllers. This is a faster version of the synchronous command and 2968 * therefore can be called in interrupt-context as well. 2969 */ 2970 static int 2971 mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) 2972 { 2973 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 2974 mbox_t *mbox; 2975 long i; 2976 2977 2978 mbox = raid_dev->mbox; 2979 2980 // return immediately if the mailbox is busy 2981 if (mbox->busy) return -1; 2982 2983 // Copy mailbox data into host structure 2984 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14); 2985 mbox->cmdid = 0xFE; 2986 mbox->busy = 1; 2987 mbox->poll = 0; 2988 mbox->ack = 0; 2989 mbox->numstatus = 0xFF; 2990 mbox->status = 0xFF; 2991 2992 wmb(); 2993 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 2994 2995 for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) { 2996 if (mbox->numstatus != 0xFF) break; 2997 rmb(); 2998 udelay(MBOX_SYNC_DELAY_200); 2999 } 3000 3001 if (i == MBOX_SYNC_WAIT_CNT) { 3002 // We may need to re-calibrate the counter 3003 con_log(CL_ANN, (KERN_CRIT 3004 "megaraid: fast sync command timed out\n")); 3005 } 3006 3007 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); 3008 wmb(); 3009 3010 return mbox->status; 3011 } 3012 3013 3014 /** 3015 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available 3016 * @raid_dev : RAID device (HBA) soft state 3017 * 3018 * Wait until the controller's mailbox is available to accept more commands. 3019 * Wait for at most 1 second. 3020 */ 3021 static int 3022 megaraid_busywait_mbox(mraid_device_t *raid_dev) 3023 { 3024 mbox_t *mbox = raid_dev->mbox; 3025 int i = 0; 3026 3027 if (mbox->busy) { 3028 udelay(25); 3029 for (i = 0; mbox->busy && i < 1000; i++) 3030 msleep(1); 3031 } 3032 3033 if (i < 1000) return 0; 3034 else return -1; 3035 } 3036 3037 3038 /** 3039 * megaraid_mbox_product_info - some static information about the controller 3040 * @adapter : our soft state 3041 * 3042 * Issue commands to the controller to grab some parameters required by our 3043 * caller. 3044 */ 3045 static int 3046 megaraid_mbox_product_info(adapter_t *adapter) 3047 { 3048 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3049 mbox_t *mbox; 3050 uint8_t raw_mbox[sizeof(mbox_t)]; 3051 mraid_pinfo_t *pinfo; 3052 dma_addr_t pinfo_dma_h; 3053 mraid_inquiry3_t *mraid_inq3; 3054 int i; 3055 3056 3057 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); 3058 mbox = (mbox_t *)raw_mbox; 3059 3060 /* 3061 * Issue an ENQUIRY3 command to find out certain adapter parameters, 3062 * e.g., max channels, max commands etc. 3063 */ 3064 pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), 3065 &pinfo_dma_h); 3066 3067 if (pinfo == NULL) { 3068 con_log(CL_ANN, (KERN_WARNING 3069 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3070 __LINE__)); 3071 3072 return -1; 3073 } 3074 memset(pinfo, 0, sizeof(mraid_pinfo_t)); 3075 3076 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; 3077 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); 3078 3079 raw_mbox[0] = FC_NEW_CONFIG; 3080 raw_mbox[2] = NC_SUBOP_ENQUIRY3; 3081 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; 3082 3083 // Issue the command 3084 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { 3085 3086 con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n")); 3087 3088 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), 3089 pinfo, pinfo_dma_h); 3090 3091 return -1; 3092 } 3093 3094 /* 3095 * Collect information about state of each physical drive 3096 * attached to the controller. We will expose all the disks 3097 * which are not part of RAID 3098 */ 3099 mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf; 3100 for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) { 3101 raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i]; 3102 } 3103 3104 /* 3105 * Get product info for information like number of channels, 3106 * maximum commands supported. 3107 */ 3108 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); 3109 mbox->xferaddr = (uint32_t)pinfo_dma_h; 3110 3111 raw_mbox[0] = FC_NEW_CONFIG; 3112 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; 3113 3114 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { 3115 3116 con_log(CL_ANN, (KERN_WARNING 3117 "megaraid: product info failed\n")); 3118 3119 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), 3120 pinfo, pinfo_dma_h); 3121 3122 return -1; 3123 } 3124 3125 /* 3126 * Setup some parameters for host, as required by our caller 3127 */ 3128 adapter->max_channel = pinfo->nchannels; 3129 3130 /* 3131 * we will export all the logical drives on a single channel. 3132 * Add 1 since inquires do not come for inititor ID 3133 */ 3134 adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1; 3135 adapter->max_lun = 8; // up to 8 LUNs for non-disk devices 3136 3137 /* 3138 * These are the maximum outstanding commands for the scsi-layer 3139 */ 3140 adapter->max_cmds = MBOX_MAX_SCSI_CMDS; 3141 3142 memset(adapter->fw_version, 0, VERSION_SIZE); 3143 memset(adapter->bios_version, 0, VERSION_SIZE); 3144 3145 memcpy(adapter->fw_version, pinfo->fw_version, 4); 3146 adapter->fw_version[4] = 0; 3147 3148 memcpy(adapter->bios_version, pinfo->bios_version, 4); 3149 adapter->bios_version[4] = 0; 3150 3151 con_log(CL_ANN, (KERN_NOTICE 3152 "megaraid: fw version:[%s] bios version:[%s]\n", 3153 adapter->fw_version, adapter->bios_version)); 3154 3155 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo, 3156 pinfo_dma_h); 3157 3158 return 0; 3159 } 3160 3161 3162 3163 /** 3164 * megaraid_mbox_extended_cdb - check for support for extended CDBs 3165 * @adapter : soft state for the controller 3166 * 3167 * This routine check whether the controller in question supports extended 3168 * ( > 10 bytes ) CDBs. 3169 */ 3170 static int 3171 megaraid_mbox_extended_cdb(adapter_t *adapter) 3172 { 3173 mbox_t *mbox; 3174 uint8_t raw_mbox[sizeof(mbox_t)]; 3175 int rval; 3176 3177 mbox = (mbox_t *)raw_mbox; 3178 3179 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); 3180 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; 3181 3182 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); 3183 3184 raw_mbox[0] = MAIN_MISC_OPCODE; 3185 raw_mbox[2] = SUPPORT_EXT_CDB; 3186 3187 /* 3188 * Issue the command 3189 */ 3190 rval = 0; 3191 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { 3192 rval = -1; 3193 } 3194 3195 return rval; 3196 } 3197 3198 3199 /** 3200 * megaraid_mbox_support_ha - Do we support clustering 3201 * @adapter : soft state for the controller 3202 * @init_id : ID of the initiator 3203 * 3204 * Determine if the firmware supports clustering and the ID of the initiator. 3205 */ 3206 static int 3207 megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id) 3208 { 3209 mbox_t *mbox; 3210 uint8_t raw_mbox[sizeof(mbox_t)]; 3211 int rval; 3212 3213 3214 mbox = (mbox_t *)raw_mbox; 3215 3216 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); 3217 3218 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; 3219 3220 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); 3221 3222 raw_mbox[0] = GET_TARGET_ID; 3223 3224 // Issue the command 3225 *init_id = 7; 3226 rval = -1; 3227 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { 3228 3229 *init_id = *(uint8_t *)adapter->ibuf; 3230 3231 con_log(CL_ANN, (KERN_INFO 3232 "megaraid: cluster firmware, initiator ID: %d\n", 3233 *init_id)); 3234 3235 rval = 0; 3236 } 3237 3238 return rval; 3239 } 3240 3241 3242 /** 3243 * megaraid_mbox_support_random_del - Do we support random deletion 3244 * @adapter : soft state for the controller 3245 * 3246 * Determine if the firmware supports random deletion. 3247 * Return: 1 is operation supported, 0 otherwise 3248 */ 3249 static int 3250 megaraid_mbox_support_random_del(adapter_t *adapter) 3251 { 3252 mbox_t *mbox; 3253 uint8_t raw_mbox[sizeof(mbox_t)]; 3254 int rval; 3255 3256 3257 mbox = (mbox_t *)raw_mbox; 3258 3259 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3260 3261 raw_mbox[0] = FC_DEL_LOGDRV; 3262 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3263 3264 // Issue the command 3265 rval = 0; 3266 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { 3267 3268 con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n")); 3269 3270 rval = 1; 3271 } 3272 3273 return rval; 3274 } 3275 3276 3277 /** 3278 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware 3279 * @adapter : soft state for the controller 3280 * 3281 * Find out the maximum number of scatter-gather elements supported by the 3282 * firmware. 3283 */ 3284 static int 3285 megaraid_mbox_get_max_sg(adapter_t *adapter) 3286 { 3287 mbox_t *mbox; 3288 uint8_t raw_mbox[sizeof(mbox_t)]; 3289 int nsg; 3290 3291 3292 mbox = (mbox_t *)raw_mbox; 3293 3294 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3295 3296 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; 3297 3298 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); 3299 3300 raw_mbox[0] = MAIN_MISC_OPCODE; 3301 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3302 3303 // Issue the command 3304 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { 3305 nsg = *(uint8_t *)adapter->ibuf; 3306 } 3307 else { 3308 nsg = MBOX_DEFAULT_SG_SIZE; 3309 } 3310 3311 if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE; 3312 3313 return nsg; 3314 } 3315 3316 3317 /** 3318 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels 3319 * @adapter : soft state for the controller 3320 * 3321 * Enumerate the RAID and SCSI channels for ROMB platforms so that channels 3322 * can be exported as regular SCSI channels. 3323 */ 3324 static void 3325 megaraid_mbox_enum_raid_scsi(adapter_t *adapter) 3326 { 3327 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3328 mbox_t *mbox; 3329 uint8_t raw_mbox[sizeof(mbox_t)]; 3330 3331 3332 mbox = (mbox_t *)raw_mbox; 3333 3334 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3335 3336 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; 3337 3338 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); 3339 3340 raw_mbox[0] = CHNL_CLASS; 3341 raw_mbox[2] = GET_CHNL_CLASS; 3342 3343 // Issue the command. If the command fails, all channels are RAID 3344 // channels 3345 raid_dev->channel_class = 0xFF; 3346 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { 3347 raid_dev->channel_class = *(uint8_t *)adapter->ibuf; 3348 } 3349 3350 return; 3351 } 3352 3353 3354 /** 3355 * megaraid_mbox_flush_cache - flush adapter and disks cache 3356 * @adapter : soft state for the controller 3357 * 3358 * Flush adapter cache followed by disks cache. 3359 */ 3360 static void 3361 megaraid_mbox_flush_cache(adapter_t *adapter) 3362 { 3363 mbox_t *mbox; 3364 uint8_t raw_mbox[sizeof(mbox_t)]; 3365 3366 3367 mbox = (mbox_t *)raw_mbox; 3368 3369 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3370 3371 raw_mbox[0] = FLUSH_ADAPTER; 3372 3373 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { 3374 con_log(CL_ANN, ("megaraid: flush adapter failed\n")); 3375 } 3376 3377 raw_mbox[0] = FLUSH_SYSTEM; 3378 3379 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { 3380 con_log(CL_ANN, ("megaraid: flush disks cache failed\n")); 3381 } 3382 3383 return; 3384 } 3385 3386 3387 /** 3388 * megaraid_mbox_fire_sync_cmd - fire the sync cmd 3389 * @adapter : soft state for the controller 3390 * 3391 * Clears the pending cmds in FW and reinits its RAID structs. 3392 */ 3393 static int 3394 megaraid_mbox_fire_sync_cmd(adapter_t *adapter) 3395 { 3396 mbox_t *mbox; 3397 uint8_t raw_mbox[sizeof(mbox_t)]; 3398 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3399 mbox64_t *mbox64; 3400 int status = 0; 3401 int i; 3402 uint32_t dword; 3403 3404 mbox = (mbox_t *)raw_mbox; 3405 3406 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3407 3408 raw_mbox[0] = 0xFF; 3409 3410 mbox64 = raid_dev->mbox64; 3411 mbox = raid_dev->mbox; 3412 3413 /* Wait until mailbox is free */ 3414 if (megaraid_busywait_mbox(raid_dev) != 0) { 3415 status = 1; 3416 goto blocked_mailbox; 3417 } 3418 3419 /* Copy mailbox data into host structure */ 3420 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); 3421 mbox->cmdid = 0xFE; 3422 mbox->busy = 1; 3423 mbox->poll = 0; 3424 mbox->ack = 0; 3425 mbox->numstatus = 0; 3426 mbox->status = 0; 3427 3428 wmb(); 3429 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 3430 3431 /* Wait for maximum 1 min for status to post. 3432 * If the Firmware SUPPORTS the ABOVE COMMAND, 3433 * mbox->cmd will be set to 0 3434 * else 3435 * the firmware will reject the command with 3436 * mbox->numstatus set to 1 3437 */ 3438 3439 i = 0; 3440 status = 0; 3441 while (!mbox->numstatus && mbox->cmd == 0xFF) { 3442 rmb(); 3443 msleep(1); 3444 i++; 3445 if (i > 1000 * 60) { 3446 status = 1; 3447 break; 3448 } 3449 } 3450 if (mbox->numstatus == 1) 3451 status = 1; /*cmd not supported*/ 3452 3453 /* Check for interrupt line */ 3454 dword = RDOUTDOOR(raid_dev); 3455 WROUTDOOR(raid_dev, dword); 3456 WRINDOOR(raid_dev,2); 3457 3458 return status; 3459 3460 blocked_mailbox: 3461 con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n")); 3462 return status; 3463 } 3464 3465 /** 3466 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes 3467 * @adapter : controller's soft state 3468 * @scb : SCB to be displayed 3469 * @level : debug level for console print 3470 * 3471 * Diplay information about the given SCB iff the current debug level is 3472 * verbose. 3473 */ 3474 static void 3475 megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) 3476 { 3477 mbox_ccb_t *ccb; 3478 struct scsi_cmnd *scp; 3479 mbox_t *mbox; 3480 int level; 3481 int i; 3482 3483 3484 ccb = (mbox_ccb_t *)scb->ccb; 3485 scp = scb->scp; 3486 mbox = ccb->mbox; 3487 3488 level = CL_DLEVEL3; 3489 3490 con_log(level, (KERN_NOTICE 3491 "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status, 3492 mbox->cmd, scb->sno)); 3493 3494 con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n", 3495 mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv, 3496 mbox->numsge)); 3497 3498 if (!scp) return; 3499 3500 con_log(level, (KERN_NOTICE "scsi cmnd: ")); 3501 3502 for (i = 0; i < scp->cmd_len; i++) { 3503 con_log(level, ("%#2.02x ", scp->cmnd[i])); 3504 } 3505 3506 con_log(level, ("\n")); 3507 3508 return; 3509 } 3510 3511 3512 /** 3513 * megaraid_mbox_setup_device_map - manage device ids 3514 * @adapter : Driver's soft state 3515 * 3516 * Manange the device ids to have an appropraite mapping between the kernel 3517 * scsi addresses and megaraid scsi and logical drive addresses. We export 3518 * scsi devices on their actual addresses, whereas the logical drives are 3519 * exported on a virtual scsi channel. 3520 */ 3521 static void 3522 megaraid_mbox_setup_device_map(adapter_t *adapter) 3523 { 3524 uint8_t c; 3525 uint8_t t; 3526 3527 /* 3528 * First fill the values on the logical drive channel 3529 */ 3530 for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) 3531 adapter->device_ids[adapter->max_channel][t] = 3532 (t < adapter->init_id) ? t : t - 1; 3533 3534 adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF; 3535 3536 /* 3537 * Fill the values on the physical devices channels 3538 */ 3539 for (c = 0; c < adapter->max_channel; c++) 3540 for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) 3541 adapter->device_ids[c][t] = (c << 8) | t; 3542 } 3543 3544 3545 /* 3546 * END: internal commands library 3547 */ 3548 3549 /* 3550 * START: Interface for the common management module 3551 * 3552 * This is the module, which interfaces with the common mangement module to 3553 * provide support for ioctl and sysfs 3554 */ 3555 3556 /** 3557 * megaraid_cmm_register - register with the mangement module 3558 * @adapter : HBA soft state 3559 * 3560 * Register with the management module, which allows applications to issue 3561 * ioctl calls to the drivers. This interface is used by the management module 3562 * to setup sysfs support as well. 3563 */ 3564 static int 3565 megaraid_cmm_register(adapter_t *adapter) 3566 { 3567 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3568 mraid_mmadp_t adp; 3569 scb_t *scb; 3570 mbox_ccb_t *ccb; 3571 int rval; 3572 int i; 3573 3574 // Allocate memory for the base list of scb for management module. 3575 adapter->uscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_USER_CMDS, 3576 GFP_KERNEL); 3577 3578 if (adapter->uscb_list == NULL) { 3579 con_log(CL_ANN, (KERN_WARNING 3580 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3581 __LINE__)); 3582 return -1; 3583 } 3584 memset(adapter->uscb_list, 0, sizeof(scb_t) * MBOX_MAX_USER_CMDS); 3585 3586 3587 // Initialize the synchronization parameters for resources for 3588 // commands for management module 3589 INIT_LIST_HEAD(&adapter->uscb_pool); 3590 3591 spin_lock_init(USER_FREE_LIST_LOCK(adapter)); 3592 3593 3594 3595 // link all the packets. Note, CCB for commands, coming from the 3596 // commom management module, mailbox physical address are already 3597 // setup by it. We just need placeholder for that in our local command 3598 // control blocks 3599 for (i = 0; i < MBOX_MAX_USER_CMDS; i++) { 3600 3601 scb = adapter->uscb_list + i; 3602 ccb = raid_dev->uccb_list + i; 3603 3604 scb->ccb = (caddr_t)ccb; 3605 ccb->mbox64 = raid_dev->umbox64 + i; 3606 ccb->mbox = &ccb->mbox64->mbox32; 3607 ccb->raw_mbox = (uint8_t *)ccb->mbox; 3608 3609 scb->gp = 0; 3610 3611 // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR 3612 // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER) 3613 scb->sno = i + MBOX_MAX_SCSI_CMDS; 3614 3615 scb->scp = NULL; 3616 scb->state = SCB_FREE; 3617 scb->dma_direction = PCI_DMA_NONE; 3618 scb->dma_type = MRAID_DMA_NONE; 3619 scb->dev_channel = -1; 3620 scb->dev_target = -1; 3621 3622 // put scb in the free pool 3623 list_add_tail(&scb->list, &adapter->uscb_pool); 3624 } 3625 3626 adp.unique_id = adapter->unique_id; 3627 adp.drvr_type = DRVRTYPE_MBOX; 3628 adp.drvr_data = (unsigned long)adapter; 3629 adp.pdev = adapter->pdev; 3630 adp.issue_uioc = megaraid_mbox_mm_handler; 3631 adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; 3632 adp.max_kioc = MBOX_MAX_USER_CMDS; 3633 3634 if ((rval = mraid_mm_register_adp(&adp)) != 0) { 3635 3636 con_log(CL_ANN, (KERN_WARNING 3637 "megaraid mbox: did not register with CMM\n")); 3638 3639 kfree(adapter->uscb_list); 3640 } 3641 3642 return rval; 3643 } 3644 3645 3646 /** 3647 * megaraid_cmm_unregister - un-register with the mangement module 3648 * @adapter : HBA soft state 3649 * 3650 * Un-register with the management module. 3651 * FIXME: mgmt module must return failure for unregister if it has pending 3652 * commands in LLD. 3653 */ 3654 static int 3655 megaraid_cmm_unregister(adapter_t *adapter) 3656 { 3657 kfree(adapter->uscb_list); 3658 mraid_mm_unregister_adp(adapter->unique_id); 3659 return 0; 3660 } 3661 3662 3663 /** 3664 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD 3665 * @drvr_data : LLD specific data 3666 * @kioc : CMM interface packet 3667 * @action : command action 3668 * 3669 * This routine is invoked whenever the Common Mangement Module (CMM) has a 3670 * command for us. The 'action' parameter specifies if this is a new command 3671 * or otherwise. 3672 */ 3673 static int 3674 megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action) 3675 { 3676 adapter_t *adapter; 3677 3678 if (action != IOCTL_ISSUE) { 3679 con_log(CL_ANN, (KERN_WARNING 3680 "megaraid: unsupported management action:%#2x\n", 3681 action)); 3682 return (-ENOTSUPP); 3683 } 3684 3685 adapter = (adapter_t *)drvr_data; 3686 3687 // make sure this adapter is not being detached right now. 3688 if (atomic_read(&adapter->being_detached)) { 3689 con_log(CL_ANN, (KERN_WARNING 3690 "megaraid: reject management request, detaching\n")); 3691 return (-ENODEV); 3692 } 3693 3694 switch (kioc->opcode) { 3695 3696 case GET_ADAP_INFO: 3697 3698 kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *) 3699 (unsigned long)kioc->buf_vaddr); 3700 3701 kioc->done(kioc); 3702 3703 return kioc->status; 3704 3705 case MBOX_CMD: 3706 3707 return megaraid_mbox_mm_command(adapter, kioc); 3708 3709 default: 3710 kioc->status = (-EINVAL); 3711 kioc->done(kioc); 3712 return (-EINVAL); 3713 } 3714 3715 return 0; // not reached 3716 } 3717 3718 /** 3719 * megaraid_mbox_mm_command - issues commands routed through CMM 3720 * @adapter : HBA soft state 3721 * @kioc : management command packet 3722 * 3723 * Issues commands, which are routed through the management module. 3724 */ 3725 static int 3726 megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc) 3727 { 3728 struct list_head *head = &adapter->uscb_pool; 3729 mbox64_t *mbox64; 3730 uint8_t *raw_mbox; 3731 scb_t *scb; 3732 mbox_ccb_t *ccb; 3733 unsigned long flags; 3734 3735 // detach one scb from free pool 3736 spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); 3737 3738 if (list_empty(head)) { // should never happen because of CMM 3739 3740 con_log(CL_ANN, (KERN_WARNING 3741 "megaraid mbox: bug in cmm handler, lost resources\n")); 3742 3743 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); 3744 3745 return (-EINVAL); 3746 } 3747 3748 scb = list_entry(head->next, scb_t, list); 3749 list_del_init(&scb->list); 3750 3751 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); 3752 3753 scb->state = SCB_ACTIVE; 3754 scb->dma_type = MRAID_DMA_NONE; 3755 scb->dma_direction = PCI_DMA_NONE; 3756 3757 ccb = (mbox_ccb_t *)scb->ccb; 3758 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 3759 raw_mbox = (uint8_t *)&mbox64->mbox32; 3760 3761 memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t)); 3762 3763 scb->gp = (unsigned long)kioc; 3764 3765 /* 3766 * If it is a logdrv random delete operation, we have to wait till 3767 * there are no outstanding cmds at the fw and then issue it directly 3768 */ 3769 if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { 3770 3771 if (wait_till_fw_empty(adapter)) { 3772 con_log(CL_ANN, (KERN_NOTICE 3773 "megaraid mbox: LD delete, timed out\n")); 3774 3775 kioc->status = -ETIME; 3776 3777 scb->status = -1; 3778 3779 megaraid_mbox_mm_done(adapter, scb); 3780 3781 return (-ETIME); 3782 } 3783 3784 INIT_LIST_HEAD(&scb->list); 3785 3786 scb->state = SCB_ISSUED; 3787 if (mbox_post_cmd(adapter, scb) != 0) { 3788 3789 con_log(CL_ANN, (KERN_NOTICE 3790 "megaraid mbox: LD delete, mailbox busy\n")); 3791 3792 kioc->status = -EBUSY; 3793 3794 scb->status = -1; 3795 3796 megaraid_mbox_mm_done(adapter, scb); 3797 3798 return (-EBUSY); 3799 } 3800 3801 return 0; 3802 } 3803 3804 // put the command on the pending list and execute 3805 megaraid_mbox_runpendq(adapter, scb); 3806 3807 return 0; 3808 } 3809 3810 3811 static int 3812 wait_till_fw_empty(adapter_t *adapter) 3813 { 3814 unsigned long flags = 0; 3815 int i; 3816 3817 3818 /* 3819 * Set the quiescent flag to stop issuing cmds to FW. 3820 */ 3821 spin_lock_irqsave(&adapter->lock, flags); 3822 adapter->quiescent++; 3823 spin_unlock_irqrestore(&adapter->lock, flags); 3824 3825 /* 3826 * Wait till there are no more cmds outstanding at FW. Try for at most 3827 * 60 seconds 3828 */ 3829 for (i = 0; i < 60 && adapter->outstanding_cmds; i++) { 3830 con_log(CL_DLEVEL1, (KERN_INFO 3831 "megaraid: FW has %d pending commands\n", 3832 adapter->outstanding_cmds)); 3833 3834 msleep(1000); 3835 } 3836 3837 return adapter->outstanding_cmds; 3838 } 3839 3840 3841 /** 3842 * megaraid_mbox_mm_done - callback for CMM commands 3843 * @adapter : HBA soft state 3844 * @scb : completed command 3845 * 3846 * Callback routine for internal commands originated from the management 3847 * module. 3848 */ 3849 static void 3850 megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) 3851 { 3852 uioc_t *kioc; 3853 mbox64_t *mbox64; 3854 uint8_t *raw_mbox; 3855 unsigned long flags; 3856 3857 kioc = (uioc_t *)scb->gp; 3858 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 3859 mbox64->mbox32.status = scb->status; 3860 raw_mbox = (uint8_t *)&mbox64->mbox32; 3861 3862 3863 // put scb in the free pool 3864 scb->state = SCB_FREE; 3865 scb->scp = NULL; 3866 3867 spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); 3868 3869 list_add(&scb->list, &adapter->uscb_pool); 3870 3871 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); 3872 3873 // if a delete logical drive operation succeeded, restart the 3874 // controller 3875 if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { 3876 3877 adapter->quiescent--; 3878 3879 megaraid_mbox_runpendq(adapter, NULL); 3880 } 3881 3882 kioc->done(kioc); 3883 3884 return; 3885 } 3886 3887 3888 /** 3889 * gather_hbainfo - HBA characteristics for the applications 3890 * @adapter : HBA soft state 3891 * @hinfo : pointer to the caller's host info strucuture 3892 */ 3893 static int 3894 gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) 3895 { 3896 uint8_t dmajor; 3897 3898 dmajor = megaraid_mbox_version[0]; 3899 3900 hinfo->pci_vendor_id = adapter->pdev->vendor; 3901 hinfo->pci_device_id = adapter->pdev->device; 3902 hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor; 3903 hinfo->subsys_device_id = adapter->pdev->subsystem_device; 3904 3905 hinfo->pci_bus = adapter->pdev->bus->number; 3906 hinfo->pci_dev_fn = adapter->pdev->devfn; 3907 hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn); 3908 hinfo->irq = adapter->host->irq; 3909 hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport; 3910 3911 hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn; 3912 hinfo->host_no = adapter->host->host_no; 3913 3914 return 0; 3915 } 3916 3917 /* 3918 * END: Interface for the common management module 3919 */ 3920 3921 3922 3923 /** 3924 * megaraid_sysfs_alloc_resources - allocate sysfs related resources 3925 * @adapter : controller's soft state 3926 * 3927 * Allocate packets required to issue FW calls whenever the sysfs attributes 3928 * are read. These attributes would require up-to-date information from the 3929 * FW. Also set up resources for mutual exclusion to share these resources and 3930 * the wait queue. 3931 * 3932 * Return 0 on success. 3933 * Return -ERROR_CODE on failure. 3934 */ 3935 static int 3936 megaraid_sysfs_alloc_resources(adapter_t *adapter) 3937 { 3938 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3939 int rval = 0; 3940 3941 raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL); 3942 3943 raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL); 3944 3945 raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev, 3946 PAGE_SIZE, &raid_dev->sysfs_buffer_dma); 3947 3948 if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 || 3949 !raid_dev->sysfs_buffer) { 3950 3951 con_log(CL_ANN, (KERN_WARNING 3952 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3953 __LINE__)); 3954 3955 rval = -ENOMEM; 3956 3957 megaraid_sysfs_free_resources(adapter); 3958 } 3959 3960 sema_init(&raid_dev->sysfs_sem, 1); 3961 3962 init_waitqueue_head(&raid_dev->sysfs_wait_q); 3963 3964 return rval; 3965 } 3966 3967 3968 /** 3969 * megaraid_sysfs_free_resources - free sysfs related resources 3970 * @adapter : controller's soft state 3971 * 3972 * Free packets allocated for sysfs FW commands 3973 */ 3974 static void 3975 megaraid_sysfs_free_resources(adapter_t *adapter) 3976 { 3977 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3978 3979 kfree(raid_dev->sysfs_uioc); 3980 kfree(raid_dev->sysfs_mbox64); 3981 3982 if (raid_dev->sysfs_buffer) { 3983 pci_free_consistent(adapter->pdev, PAGE_SIZE, 3984 raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma); 3985 } 3986 } 3987 3988 3989 /** 3990 * megaraid_sysfs_get_ldmap_done - callback for get ldmap 3991 * @uioc : completed packet 3992 * 3993 * Callback routine called in the ISR/tasklet context for get ldmap call 3994 */ 3995 static void 3996 megaraid_sysfs_get_ldmap_done(uioc_t *uioc) 3997 { 3998 adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; 3999 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 4000 4001 uioc->status = 0; 4002 4003 wake_up(&raid_dev->sysfs_wait_q); 4004 } 4005 4006 4007 /** 4008 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap 4009 * @data : timed out packet 4010 * 4011 * Timeout routine to recover and return to application, in case the adapter 4012 * has stopped responding. A timeout of 60 seconds for this command seems like 4013 * a good value. 4014 */ 4015 static void 4016 megaraid_sysfs_get_ldmap_timeout(unsigned long data) 4017 { 4018 uioc_t *uioc = (uioc_t *)data; 4019 adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; 4020 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 4021 4022 uioc->status = -ETIME; 4023 4024 wake_up(&raid_dev->sysfs_wait_q); 4025 } 4026 4027 4028 /** 4029 * megaraid_sysfs_get_ldmap - get update logical drive map 4030 * @adapter : controller's soft state 4031 * 4032 * This routine will be called whenever user reads the logical drive 4033 * attributes, go get the current logical drive mapping table from the 4034 * firmware. We use the managment API's to issue commands to the controller. 4035 * 4036 * NOTE: The commands issuance functionality is not generalized and 4037 * implemented in context of "get ld map" command only. If required, the 4038 * command issuance logical can be trivially pulled out and implemented as a 4039 * standalone libary. For now, this should suffice since there is no other 4040 * user of this interface. 4041 * 4042 * Return 0 on success. 4043 * Return -1 on failure. 4044 */ 4045 static int 4046 megaraid_sysfs_get_ldmap(adapter_t *adapter) 4047 { 4048 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 4049 uioc_t *uioc; 4050 mbox64_t *mbox64; 4051 mbox_t *mbox; 4052 char *raw_mbox; 4053 struct timer_list sysfs_timer; 4054 struct timer_list *timerp; 4055 caddr_t ldmap; 4056 int rval = 0; 4057 4058 /* 4059 * Allow only one read at a time to go through the sysfs attributes 4060 */ 4061 down(&raid_dev->sysfs_sem); 4062 4063 uioc = raid_dev->sysfs_uioc; 4064 mbox64 = raid_dev->sysfs_mbox64; 4065 ldmap = raid_dev->sysfs_buffer; 4066 4067 memset(uioc, 0, sizeof(uioc_t)); 4068 memset(mbox64, 0, sizeof(mbox64_t)); 4069 memset(ldmap, 0, sizeof(raid_dev->curr_ldmap)); 4070 4071 mbox = &mbox64->mbox32; 4072 raw_mbox = (char *)mbox; 4073 uioc->cmdbuf = (uint64_t)(unsigned long)mbox64; 4074 uioc->buf_vaddr = (caddr_t)adapter; 4075 uioc->status = -ENODATA; 4076 uioc->done = megaraid_sysfs_get_ldmap_done; 4077 4078 /* 4079 * Prepare the mailbox packet to get the current logical drive mapping 4080 * table 4081 */ 4082 mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma; 4083 4084 raw_mbox[0] = FC_DEL_LOGDRV; 4085 raw_mbox[2] = OP_GET_LDID_MAP; 4086 4087 /* 4088 * Setup a timer to recover from a non-responding controller 4089 */ 4090 timerp = &sysfs_timer; 4091 init_timer(timerp); 4092 4093 timerp->function = megaraid_sysfs_get_ldmap_timeout; 4094 timerp->data = (unsigned long)uioc; 4095 timerp->expires = jiffies + 60 * HZ; 4096 4097 add_timer(timerp); 4098 4099 /* 4100 * Send the command to the firmware 4101 */ 4102 rval = megaraid_mbox_mm_command(adapter, uioc); 4103 4104 if (rval == 0) { // command successfully issued 4105 wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA)); 4106 4107 /* 4108 * Check if the command timed out 4109 */ 4110 if (uioc->status == -ETIME) { 4111 con_log(CL_ANN, (KERN_NOTICE 4112 "megaraid: sysfs get ld map timed out\n")); 4113 4114 rval = -ETIME; 4115 } 4116 else { 4117 rval = mbox->status; 4118 } 4119 4120 if (rval == 0) { 4121 memcpy(raid_dev->curr_ldmap, ldmap, 4122 sizeof(raid_dev->curr_ldmap)); 4123 } 4124 else { 4125 con_log(CL_ANN, (KERN_NOTICE 4126 "megaraid: get ld map failed with %x\n", rval)); 4127 } 4128 } 4129 else { 4130 con_log(CL_ANN, (KERN_NOTICE 4131 "megaraid: could not issue ldmap command:%x\n", rval)); 4132 } 4133 4134 4135 del_timer_sync(timerp); 4136 4137 up(&raid_dev->sysfs_sem); 4138 4139 return rval; 4140 } 4141 4142 4143 /** 4144 * megaraid_sysfs_show_app_hndl - display application handle for this adapter 4145 * @cdev : class device object representation for the host 4146 * @buf : buffer to send data to 4147 * 4148 * Display the handle used by the applications while executing management 4149 * tasks on the adapter. We invoke a management module API to get the adapter 4150 * handle, since we do not interface with applications directly. 4151 */ 4152 static ssize_t 4153 megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf) 4154 { 4155 struct Scsi_Host *shost = class_to_shost(cdev); 4156 adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost); 4157 uint32_t app_hndl; 4158 4159 app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id); 4160 4161 return snprintf(buf, 8, "%u\n", app_hndl); 4162 } 4163 4164 4165 /** 4166 * megaraid_sysfs_show_ldnum - display the logical drive number for this device 4167 * @dev : device object representation for the scsi device 4168 * @attr : device attribute to show 4169 * @buf : buffer to send data to 4170 * 4171 * Display the logical drive number for the device in question, if it a valid 4172 * logical drive. For physical devices, "-1" is returned. 4173 * 4174 * The logical drive number is displayed in following format: 4175 * 4176 * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE> 4177 * 4178 * <int> <int> <int> <int> 4179 */ 4180 static ssize_t 4181 megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf) 4182 { 4183 struct scsi_device *sdev = to_scsi_device(dev); 4184 adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host); 4185 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 4186 int scsi_id = -1; 4187 int logical_drv = -1; 4188 int ldid_map = -1; 4189 uint32_t app_hndl = 0; 4190 int mapped_sdev_id; 4191 int rval; 4192 int i; 4193 4194 if (raid_dev->random_del_supported && 4195 MRAID_IS_LOGICAL_SDEV(adapter, sdev)) { 4196 4197 rval = megaraid_sysfs_get_ldmap(adapter); 4198 if (rval == 0) { 4199 4200 for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) { 4201 4202 mapped_sdev_id = sdev->id; 4203 4204 if (sdev->id > adapter->init_id) { 4205 mapped_sdev_id -= 1; 4206 } 4207 4208 if (raid_dev->curr_ldmap[i] == mapped_sdev_id) { 4209 4210 scsi_id = sdev->id; 4211 4212 logical_drv = i; 4213 4214 ldid_map = raid_dev->curr_ldmap[i]; 4215 4216 app_hndl = mraid_mm_adapter_app_handle( 4217 adapter->unique_id); 4218 4219 break; 4220 } 4221 } 4222 } 4223 else { 4224 con_log(CL_ANN, (KERN_NOTICE 4225 "megaraid: sysfs get ld map failed: %x\n", 4226 rval)); 4227 } 4228 } 4229 4230 return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv, 4231 ldid_map, app_hndl); 4232 } 4233 4234 4235 /* 4236 * END: Mailbox Low Level Driver 4237 */ 4238 module_init(megaraid_init); 4239 module_exit(megaraid_exit); 4240 4241 /* vim: set ts=8 sw=8 tw=78 ai si: */ 4242