1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2002 LSI Logic Corporation. 7 * 8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 9 * - fixes 10 * - speed-ups (list handling fixes, issued_list, optimizations.) 11 * - lots of cleanups. 12 * 13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de> 14 * - new-style, hotplug-aware pci probing and scsi registration 15 * 16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju 17 * <Seokmann.Ju@lsil.com> 18 * 19 * Description: Linux device driver for LSI Logic MegaRAID controller 20 * 21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 22 * 518, 520, 531, 532 23 * 24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, 25 * and others. Please send updates to the mailing list 26 * linux-scsi@vger.kernel.org . 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/fs.h> 31 #include <linux/blkdev.h> 32 #include <linux/uaccess.h> 33 #include <asm/io.h> 34 #include <linux/completion.h> 35 #include <linux/delay.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/reboot.h> 39 #include <linux/module.h> 40 #include <linux/list.h> 41 #include <linux/interrupt.h> 42 #include <linux/pci.h> 43 #include <linux/init.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/mutex.h> 46 #include <linux/slab.h> 47 #include <scsi/scsicam.h> 48 49 #include "scsi.h" 50 #include <scsi/scsi_host.h> 51 52 #include "megaraid.h" 53 54 #define MEGARAID_MODULE_VERSION "2.00.4" 55 56 MODULE_AUTHOR ("sju@lsil.com"); 57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); 58 MODULE_LICENSE ("GPL"); 59 MODULE_VERSION(MEGARAID_MODULE_VERSION); 60 61 static DEFINE_MUTEX(megadev_mutex); 62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; 63 module_param(max_cmd_per_lun, uint, 0); 64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); 65 66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; 67 module_param(max_sectors_per_io, ushort, 0); 68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 69 70 71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; 72 module_param(max_mbox_busy_wait, ushort, 0); 73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74 75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) 76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) 77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) 78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) 79 80 /* 81 * Global variables 82 */ 83 84 static int hba_count; 85 static adapter_t *hba_soft_state[MAX_CONTROLLERS]; 86 static struct proc_dir_entry *mega_proc_dir_entry; 87 88 /* For controller re-ordering */ 89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 90 91 static long 92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 93 94 /* 95 * The File Operations structure for the serial/ioctl interface of the driver 96 */ 97 static const struct file_operations megadev_fops = { 98 .owner = THIS_MODULE, 99 .unlocked_ioctl = megadev_unlocked_ioctl, 100 .open = megadev_open, 101 .llseek = noop_llseek, 102 }; 103 104 /* 105 * Array to structures for storing the information about the controllers. This 106 * information is sent to the user level applications, when they do an ioctl 107 * for this information. 108 */ 109 static struct mcontroller mcontroller[MAX_CONTROLLERS]; 110 111 /* The current driver version */ 112 static u32 driver_ver = 0x02000000; 113 114 /* major number used by the device for character interface */ 115 static int major; 116 117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) 118 119 120 /* 121 * Debug variable to print some diagnostic messages 122 */ 123 static int trace_level; 124 125 /** 126 * mega_setup_mailbox() 127 * @adapter: pointer to our soft state 128 * 129 * Allocates a 8 byte aligned memory for the handshake mailbox. 130 */ 131 static int 132 mega_setup_mailbox(adapter_t *adapter) 133 { 134 unsigned long align; 135 136 adapter->una_mbox64 = pci_alloc_consistent(adapter->dev, 137 sizeof(mbox64_t), &adapter->una_mbox64_dma); 138 139 if( !adapter->una_mbox64 ) return -1; 140 141 adapter->mbox = &adapter->una_mbox64->mbox; 142 143 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & 144 (~0UL ^ 0xFUL)); 145 146 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); 147 148 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); 149 150 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; 151 152 /* 153 * Register the mailbox if the controller is an io-mapped controller 154 */ 155 if( adapter->flag & BOARD_IOMAP ) { 156 157 outb(adapter->mbox_dma & 0xFF, 158 adapter->host->io_port + MBOX_PORT0); 159 160 outb((adapter->mbox_dma >> 8) & 0xFF, 161 adapter->host->io_port + MBOX_PORT1); 162 163 outb((adapter->mbox_dma >> 16) & 0xFF, 164 adapter->host->io_port + MBOX_PORT2); 165 166 outb((adapter->mbox_dma >> 24) & 0xFF, 167 adapter->host->io_port + MBOX_PORT3); 168 169 outb(ENABLE_MBOX_BYTE, 170 adapter->host->io_port + ENABLE_MBOX_REGION); 171 172 irq_ack(adapter); 173 174 irq_enable(adapter); 175 } 176 177 return 0; 178 } 179 180 181 /* 182 * mega_query_adapter() 183 * @adapter - pointer to our soft state 184 * 185 * Issue the adapter inquiry commands to the controller and find out 186 * information and parameter about the devices attached 187 */ 188 static int 189 mega_query_adapter(adapter_t *adapter) 190 { 191 dma_addr_t prod_info_dma_handle; 192 mega_inquiry3 *inquiry3; 193 u8 raw_mbox[sizeof(struct mbox_out)]; 194 mbox_t *mbox; 195 int retval; 196 197 /* Initialize adapter inquiry mailbox */ 198 199 mbox = (mbox_t *)raw_mbox; 200 201 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 202 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 203 204 /* 205 * Try to issue Inquiry3 command 206 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 207 * update enquiry3 structure 208 */ 209 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 210 211 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 212 213 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 214 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ 215 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ 216 217 /* Issue a blocking command to the card */ 218 if ((retval = issue_scb_block(adapter, raw_mbox))) { 219 /* the adapter does not support 40ld */ 220 221 mraid_ext_inquiry *ext_inq; 222 mraid_inquiry *inq; 223 dma_addr_t dma_handle; 224 225 ext_inq = pci_alloc_consistent(adapter->dev, 226 sizeof(mraid_ext_inquiry), &dma_handle); 227 228 if( ext_inq == NULL ) return -1; 229 230 inq = &ext_inq->raid_inq; 231 232 mbox->m_out.xferaddr = (u32)dma_handle; 233 234 /*issue old 0x04 command to adapter */ 235 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ; 236 237 issue_scb_block(adapter, raw_mbox); 238 239 /* 240 * update Enquiry3 and ProductInfo structures with 241 * mraid_inquiry structure 242 */ 243 mega_8_to_40ld(inq, inquiry3, 244 (mega_product_info *)&adapter->product_info); 245 246 pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry), 247 ext_inq, dma_handle); 248 249 } else { /*adapter supports 40ld */ 250 adapter->flag |= BOARD_40LD; 251 252 /* 253 * get product_info, which is static information and will be 254 * unchanged 255 */ 256 prod_info_dma_handle = pci_map_single(adapter->dev, (void *) 257 &adapter->product_info, 258 sizeof(mega_product_info), PCI_DMA_FROMDEVICE); 259 260 mbox->m_out.xferaddr = prod_info_dma_handle; 261 262 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 263 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 264 265 if ((retval = issue_scb_block(adapter, raw_mbox))) 266 dev_warn(&adapter->dev->dev, 267 "Product_info cmd failed with error: %d\n", 268 retval); 269 270 pci_unmap_single(adapter->dev, prod_info_dma_handle, 271 sizeof(mega_product_info), PCI_DMA_FROMDEVICE); 272 } 273 274 275 /* 276 * kernel scans the channels from 0 to <= max_channel 277 */ 278 adapter->host->max_channel = 279 adapter->product_info.nchannels + NVIRT_CHAN -1; 280 281 adapter->host->max_id = 16; /* max targets per channel */ 282 283 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ 284 285 adapter->host->cmd_per_lun = max_cmd_per_lun; 286 287 adapter->numldrv = inquiry3->num_ldrv; 288 289 adapter->max_cmds = adapter->product_info.max_commands; 290 291 if(adapter->max_cmds > MAX_COMMANDS) 292 adapter->max_cmds = MAX_COMMANDS; 293 294 adapter->host->can_queue = adapter->max_cmds - 1; 295 296 /* 297 * Get the maximum number of scatter-gather elements supported by this 298 * firmware 299 */ 300 mega_get_max_sgl(adapter); 301 302 adapter->host->sg_tablesize = adapter->sglen; 303 304 /* use HP firmware and bios version encoding 305 Note: fw_version[0|1] and bios_version[0|1] were originally shifted 306 right 8 bits making them zero. This 0 value was hardcoded to fix 307 sparse warnings. */ 308 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { 309 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 310 "%c%d%d.%d%d", 311 adapter->product_info.fw_version[2], 312 0, 313 adapter->product_info.fw_version[1] & 0x0f, 314 0, 315 adapter->product_info.fw_version[0] & 0x0f); 316 snprintf(adapter->bios_version, sizeof(adapter->fw_version), 317 "%c%d%d.%d%d", 318 adapter->product_info.bios_version[2], 319 0, 320 adapter->product_info.bios_version[1] & 0x0f, 321 0, 322 adapter->product_info.bios_version[0] & 0x0f); 323 } else { 324 memcpy(adapter->fw_version, 325 (char *)adapter->product_info.fw_version, 4); 326 adapter->fw_version[4] = 0; 327 328 memcpy(adapter->bios_version, 329 (char *)adapter->product_info.bios_version, 4); 330 331 adapter->bios_version[4] = 0; 332 } 333 334 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", 335 adapter->fw_version, adapter->bios_version, adapter->numldrv); 336 337 /* 338 * Do we support extended (>10 bytes) cdbs 339 */ 340 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 341 if (adapter->support_ext_cdb) 342 dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); 343 344 345 return 0; 346 } 347 348 /** 349 * mega_runpendq() 350 * @adapter: pointer to our soft state 351 * 352 * Runs through the list of pending requests. 353 */ 354 static inline void 355 mega_runpendq(adapter_t *adapter) 356 { 357 if(!list_empty(&adapter->pending_list)) 358 __mega_runpendq(adapter); 359 } 360 361 /* 362 * megaraid_queue() 363 * @scmd - Issue this scsi command 364 * @done - the callback hook into the scsi mid-layer 365 * 366 * The command queuing entry point for the mid-layer. 367 */ 368 static int 369 megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 370 { 371 adapter_t *adapter; 372 scb_t *scb; 373 int busy=0; 374 unsigned long flags; 375 376 adapter = (adapter_t *)scmd->device->host->hostdata; 377 378 scmd->scsi_done = done; 379 380 381 /* 382 * Allocate and build a SCB request 383 * busy flag will be set if mega_build_cmd() command could not 384 * allocate scb. We will return non-zero status in that case. 385 * NOTE: scb can be null even though certain commands completed 386 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would 387 * return 0 in that case. 388 */ 389 390 spin_lock_irqsave(&adapter->lock, flags); 391 scb = mega_build_cmd(adapter, scmd, &busy); 392 if (!scb) 393 goto out; 394 395 scb->state |= SCB_PENDQ; 396 list_add_tail(&scb->list, &adapter->pending_list); 397 398 /* 399 * Check if the HBA is in quiescent state, e.g., during a 400 * delete logical drive opertion. If it is, don't run 401 * the pending_list. 402 */ 403 if (atomic_read(&adapter->quiescent) == 0) 404 mega_runpendq(adapter); 405 406 busy = 0; 407 out: 408 spin_unlock_irqrestore(&adapter->lock, flags); 409 return busy; 410 } 411 412 static DEF_SCSI_QCMD(megaraid_queue) 413 414 /** 415 * mega_allocate_scb() 416 * @adapter: pointer to our soft state 417 * @cmd: scsi command from the mid-layer 418 * 419 * Allocate a SCB structure. This is the central structure for controller 420 * commands. 421 */ 422 static inline scb_t * 423 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) 424 { 425 struct list_head *head = &adapter->free_list; 426 scb_t *scb; 427 428 /* Unlink command from Free List */ 429 if( !list_empty(head) ) { 430 431 scb = list_entry(head->next, scb_t, list); 432 433 list_del_init(head->next); 434 435 scb->state = SCB_ACTIVE; 436 scb->cmd = cmd; 437 scb->dma_type = MEGA_DMA_TYPE_NONE; 438 439 return scb; 440 } 441 442 return NULL; 443 } 444 445 /** 446 * mega_get_ldrv_num() 447 * @adapter: pointer to our soft state 448 * @cmd: scsi mid layer command 449 * @channel: channel on the controller 450 * 451 * Calculate the logical drive number based on the information in scsi command 452 * and the channel number. 453 */ 454 static inline int 455 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) 456 { 457 int tgt; 458 int ldrv_num; 459 460 tgt = cmd->device->id; 461 462 if ( tgt > adapter->this_id ) 463 tgt--; /* we do not get inquires for initiator id */ 464 465 ldrv_num = (channel * 15) + tgt; 466 467 468 /* 469 * If we have a logical drive with boot enabled, project it first 470 */ 471 if( adapter->boot_ldrv_enabled ) { 472 if( ldrv_num == 0 ) { 473 ldrv_num = adapter->boot_ldrv; 474 } 475 else { 476 if( ldrv_num <= adapter->boot_ldrv ) { 477 ldrv_num--; 478 } 479 } 480 } 481 482 /* 483 * If "delete logical drive" feature is enabled on this controller. 484 * Do only if at least one delete logical drive operation was done. 485 * 486 * Also, after logical drive deletion, instead of logical drive number, 487 * the value returned should be 0x80+logical drive id. 488 * 489 * These is valid only for IO commands. 490 */ 491 492 if (adapter->support_random_del && adapter->read_ldidmap ) 493 switch (cmd->cmnd[0]) { 494 case READ_6: /* fall through */ 495 case WRITE_6: /* fall through */ 496 case READ_10: /* fall through */ 497 case WRITE_10: 498 ldrv_num += 0x80; 499 } 500 501 return ldrv_num; 502 } 503 504 /** 505 * mega_build_cmd() 506 * @adapter: pointer to our soft state 507 * @cmd: Prepare using this scsi command 508 * @busy: busy flag if no resources 509 * 510 * Prepares a command and scatter gather list for the controller. This routine 511 * also finds out if the commands is intended for a logical drive or a 512 * physical device and prepares the controller command accordingly. 513 * 514 * We also re-order the logical drives and physical devices based on their 515 * boot settings. 516 */ 517 static scb_t * 518 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 519 { 520 mega_passthru *pthru; 521 scb_t *scb; 522 mbox_t *mbox; 523 u32 seg; 524 char islogical; 525 int max_ldrv_num; 526 int channel = 0; 527 int target = 0; 528 int ldrv_num = 0; /* logical drive number */ 529 530 /* 531 * We know what channels our logical drives are on - mega_find_card() 532 */ 533 islogical = adapter->logdrv_chan[cmd->device->channel]; 534 535 /* 536 * The theory: If physical drive is chosen for boot, all the physical 537 * devices are exported before the logical drives, otherwise physical 538 * devices are pushed after logical drives, in which case - Kernel sees 539 * the physical devices on virtual channel which is obviously converted 540 * to actual channel on the HBA. 541 */ 542 if( adapter->boot_pdrv_enabled ) { 543 if( islogical ) { 544 /* logical channel */ 545 channel = cmd->device->channel - 546 adapter->product_info.nchannels; 547 } 548 else { 549 /* this is physical channel */ 550 channel = cmd->device->channel; 551 target = cmd->device->id; 552 553 /* 554 * boot from a physical disk, that disk needs to be 555 * exposed first IF both the channels are SCSI, then 556 * booting from the second channel is not allowed. 557 */ 558 if( target == 0 ) { 559 target = adapter->boot_pdrv_tgt; 560 } 561 else if( target == adapter->boot_pdrv_tgt ) { 562 target = 0; 563 } 564 } 565 } 566 else { 567 if( islogical ) { 568 /* this is the logical channel */ 569 channel = cmd->device->channel; 570 } 571 else { 572 /* physical channel */ 573 channel = cmd->device->channel - NVIRT_CHAN; 574 target = cmd->device->id; 575 } 576 } 577 578 579 if(islogical) { 580 581 /* have just LUN 0 for each target on virtual channels */ 582 if (cmd->device->lun) { 583 cmd->result = (DID_BAD_TARGET << 16); 584 cmd->scsi_done(cmd); 585 return NULL; 586 } 587 588 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); 589 590 591 max_ldrv_num = (adapter->flag & BOARD_40LD) ? 592 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; 593 594 /* 595 * max_ldrv_num increases by 0x80 if some logical drive was 596 * deleted. 597 */ 598 if(adapter->read_ldidmap) 599 max_ldrv_num += 0x80; 600 601 if(ldrv_num > max_ldrv_num ) { 602 cmd->result = (DID_BAD_TARGET << 16); 603 cmd->scsi_done(cmd); 604 return NULL; 605 } 606 607 } 608 else { 609 if( cmd->device->lun > 7) { 610 /* 611 * Do not support lun >7 for physically accessed 612 * devices 613 */ 614 cmd->result = (DID_BAD_TARGET << 16); 615 cmd->scsi_done(cmd); 616 return NULL; 617 } 618 } 619 620 /* 621 * 622 * Logical drive commands 623 * 624 */ 625 if(islogical) { 626 switch (cmd->cmnd[0]) { 627 case TEST_UNIT_READY: 628 #if MEGA_HAVE_CLUSTERING 629 /* 630 * Do we support clustering and is the support enabled 631 * If no, return success always 632 */ 633 if( !adapter->has_cluster ) { 634 cmd->result = (DID_OK << 16); 635 cmd->scsi_done(cmd); 636 return NULL; 637 } 638 639 if(!(scb = mega_allocate_scb(adapter, cmd))) { 640 *busy = 1; 641 return NULL; 642 } 643 644 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 645 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; 646 scb->raw_mbox[3] = ldrv_num; 647 648 scb->dma_direction = PCI_DMA_NONE; 649 650 return scb; 651 #else 652 cmd->result = (DID_OK << 16); 653 cmd->scsi_done(cmd); 654 return NULL; 655 #endif 656 657 case MODE_SENSE: { 658 char *buf; 659 struct scatterlist *sg; 660 661 sg = scsi_sglist(cmd); 662 buf = kmap_atomic(sg_page(sg)) + sg->offset; 663 664 memset(buf, 0, cmd->cmnd[4]); 665 kunmap_atomic(buf - sg->offset); 666 667 cmd->result = (DID_OK << 16); 668 cmd->scsi_done(cmd); 669 return NULL; 670 } 671 672 case READ_CAPACITY: 673 case INQUIRY: 674 675 if(!(adapter->flag & (1L << cmd->device->channel))) { 676 677 dev_notice(&adapter->dev->dev, 678 "scsi%d: scanning scsi channel %d " 679 "for logical drives\n", 680 adapter->host->host_no, 681 cmd->device->channel); 682 683 adapter->flag |= (1L << cmd->device->channel); 684 } 685 686 /* Allocate a SCB and initialize passthru */ 687 if(!(scb = mega_allocate_scb(adapter, cmd))) { 688 *busy = 1; 689 return NULL; 690 } 691 pthru = scb->pthru; 692 693 mbox = (mbox_t *)scb->raw_mbox; 694 memset(mbox, 0, sizeof(scb->raw_mbox)); 695 memset(pthru, 0, sizeof(mega_passthru)); 696 697 pthru->timeout = 0; 698 pthru->ars = 1; 699 pthru->reqsenselen = 14; 700 pthru->islogical = 1; 701 pthru->logdrv = ldrv_num; 702 pthru->cdblen = cmd->cmd_len; 703 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 704 705 if( adapter->has_64bit_addr ) { 706 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 707 } 708 else { 709 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 710 } 711 712 scb->dma_direction = PCI_DMA_FROMDEVICE; 713 714 pthru->numsgelements = mega_build_sglist(adapter, scb, 715 &pthru->dataxferaddr, &pthru->dataxferlen); 716 717 mbox->m_out.xferaddr = scb->pthru_dma_addr; 718 719 return scb; 720 721 case READ_6: 722 case WRITE_6: 723 case READ_10: 724 case WRITE_10: 725 case READ_12: 726 case WRITE_12: 727 728 /* Allocate a SCB and initialize mailbox */ 729 if(!(scb = mega_allocate_scb(adapter, cmd))) { 730 *busy = 1; 731 return NULL; 732 } 733 mbox = (mbox_t *)scb->raw_mbox; 734 735 memset(mbox, 0, sizeof(scb->raw_mbox)); 736 mbox->m_out.logdrv = ldrv_num; 737 738 /* 739 * A little hack: 2nd bit is zero for all scsi read 740 * commands and is set for all scsi write commands 741 */ 742 if( adapter->has_64bit_addr ) { 743 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 744 MEGA_MBOXCMD_LWRITE64: 745 MEGA_MBOXCMD_LREAD64 ; 746 } 747 else { 748 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 749 MEGA_MBOXCMD_LWRITE: 750 MEGA_MBOXCMD_LREAD ; 751 } 752 753 /* 754 * 6-byte READ(0x08) or WRITE(0x0A) cdb 755 */ 756 if( cmd->cmd_len == 6 ) { 757 mbox->m_out.numsectors = (u32) cmd->cmnd[4]; 758 mbox->m_out.lba = 759 ((u32)cmd->cmnd[1] << 16) | 760 ((u32)cmd->cmnd[2] << 8) | 761 (u32)cmd->cmnd[3]; 762 763 mbox->m_out.lba &= 0x1FFFFF; 764 765 #if MEGA_HAVE_STATS 766 /* 767 * Take modulo 0x80, since the logical drive 768 * number increases by 0x80 when a logical 769 * drive was deleted 770 */ 771 if (*cmd->cmnd == READ_6) { 772 adapter->nreads[ldrv_num%0x80]++; 773 adapter->nreadblocks[ldrv_num%0x80] += 774 mbox->m_out.numsectors; 775 } else { 776 adapter->nwrites[ldrv_num%0x80]++; 777 adapter->nwriteblocks[ldrv_num%0x80] += 778 mbox->m_out.numsectors; 779 } 780 #endif 781 } 782 783 /* 784 * 10-byte READ(0x28) or WRITE(0x2A) cdb 785 */ 786 if( cmd->cmd_len == 10 ) { 787 mbox->m_out.numsectors = 788 (u32)cmd->cmnd[8] | 789 ((u32)cmd->cmnd[7] << 8); 790 mbox->m_out.lba = 791 ((u32)cmd->cmnd[2] << 24) | 792 ((u32)cmd->cmnd[3] << 16) | 793 ((u32)cmd->cmnd[4] << 8) | 794 (u32)cmd->cmnd[5]; 795 796 #if MEGA_HAVE_STATS 797 if (*cmd->cmnd == READ_10) { 798 adapter->nreads[ldrv_num%0x80]++; 799 adapter->nreadblocks[ldrv_num%0x80] += 800 mbox->m_out.numsectors; 801 } else { 802 adapter->nwrites[ldrv_num%0x80]++; 803 adapter->nwriteblocks[ldrv_num%0x80] += 804 mbox->m_out.numsectors; 805 } 806 #endif 807 } 808 809 /* 810 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 811 */ 812 if( cmd->cmd_len == 12 ) { 813 mbox->m_out.lba = 814 ((u32)cmd->cmnd[2] << 24) | 815 ((u32)cmd->cmnd[3] << 16) | 816 ((u32)cmd->cmnd[4] << 8) | 817 (u32)cmd->cmnd[5]; 818 819 mbox->m_out.numsectors = 820 ((u32)cmd->cmnd[6] << 24) | 821 ((u32)cmd->cmnd[7] << 16) | 822 ((u32)cmd->cmnd[8] << 8) | 823 (u32)cmd->cmnd[9]; 824 825 #if MEGA_HAVE_STATS 826 if (*cmd->cmnd == READ_12) { 827 adapter->nreads[ldrv_num%0x80]++; 828 adapter->nreadblocks[ldrv_num%0x80] += 829 mbox->m_out.numsectors; 830 } else { 831 adapter->nwrites[ldrv_num%0x80]++; 832 adapter->nwriteblocks[ldrv_num%0x80] += 833 mbox->m_out.numsectors; 834 } 835 #endif 836 } 837 838 /* 839 * If it is a read command 840 */ 841 if( (*cmd->cmnd & 0x0F) == 0x08 ) { 842 scb->dma_direction = PCI_DMA_FROMDEVICE; 843 } 844 else { 845 scb->dma_direction = PCI_DMA_TODEVICE; 846 } 847 848 /* Calculate Scatter-Gather info */ 849 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 850 (u32 *)&mbox->m_out.xferaddr, &seg); 851 852 return scb; 853 854 #if MEGA_HAVE_CLUSTERING 855 case RESERVE: /* Fall through */ 856 case RELEASE: 857 858 /* 859 * Do we support clustering and is the support enabled 860 */ 861 if( ! adapter->has_cluster ) { 862 863 cmd->result = (DID_BAD_TARGET << 16); 864 cmd->scsi_done(cmd); 865 return NULL; 866 } 867 868 /* Allocate a SCB and initialize mailbox */ 869 if(!(scb = mega_allocate_scb(adapter, cmd))) { 870 *busy = 1; 871 return NULL; 872 } 873 874 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 875 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? 876 MEGA_RESERVE_LD : MEGA_RELEASE_LD; 877 878 scb->raw_mbox[3] = ldrv_num; 879 880 scb->dma_direction = PCI_DMA_NONE; 881 882 return scb; 883 #endif 884 885 default: 886 cmd->result = (DID_BAD_TARGET << 16); 887 cmd->scsi_done(cmd); 888 return NULL; 889 } 890 } 891 892 /* 893 * Passthru drive commands 894 */ 895 else { 896 /* Allocate a SCB and initialize passthru */ 897 if(!(scb = mega_allocate_scb(adapter, cmd))) { 898 *busy = 1; 899 return NULL; 900 } 901 902 mbox = (mbox_t *)scb->raw_mbox; 903 memset(mbox, 0, sizeof(scb->raw_mbox)); 904 905 if( adapter->support_ext_cdb ) { 906 907 mega_prepare_extpassthru(adapter, scb, cmd, 908 channel, target); 909 910 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; 911 912 mbox->m_out.xferaddr = scb->epthru_dma_addr; 913 914 } 915 else { 916 917 pthru = mega_prepare_passthru(adapter, scb, cmd, 918 channel, target); 919 920 /* Initialize mailbox */ 921 if( adapter->has_64bit_addr ) { 922 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 923 } 924 else { 925 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 926 } 927 928 mbox->m_out.xferaddr = scb->pthru_dma_addr; 929 930 } 931 return scb; 932 } 933 return NULL; 934 } 935 936 937 /** 938 * mega_prepare_passthru() 939 * @adapter: pointer to our soft state 940 * @scb: our scsi control block 941 * @cmd: scsi command from the mid-layer 942 * @channel: actual channel on the controller 943 * @target: actual id on the controller. 944 * 945 * prepare a command for the scsi physical devices. 946 */ 947 static mega_passthru * 948 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, 949 int channel, int target) 950 { 951 mega_passthru *pthru; 952 953 pthru = scb->pthru; 954 memset(pthru, 0, sizeof (mega_passthru)); 955 956 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 957 pthru->timeout = 2; 958 959 pthru->ars = 1; 960 pthru->reqsenselen = 14; 961 pthru->islogical = 0; 962 963 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 964 965 pthru->target = (adapter->flag & BOARD_40LD) ? 966 (channel << 4) | target : target; 967 968 pthru->cdblen = cmd->cmd_len; 969 pthru->logdrv = cmd->device->lun; 970 971 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 972 973 /* Not sure about the direction */ 974 scb->dma_direction = PCI_DMA_BIDIRECTIONAL; 975 976 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ 977 switch (cmd->cmnd[0]) { 978 case INQUIRY: 979 case READ_CAPACITY: 980 if(!(adapter->flag & (1L << cmd->device->channel))) { 981 982 dev_notice(&adapter->dev->dev, 983 "scsi%d: scanning scsi channel %d [P%d] " 984 "for physical devices\n", 985 adapter->host->host_no, 986 cmd->device->channel, channel); 987 988 adapter->flag |= (1L << cmd->device->channel); 989 } 990 /* Fall through */ 991 default: 992 pthru->numsgelements = mega_build_sglist(adapter, scb, 993 &pthru->dataxferaddr, &pthru->dataxferlen); 994 break; 995 } 996 return pthru; 997 } 998 999 1000 /** 1001 * mega_prepare_extpassthru() 1002 * @adapter: pointer to our soft state 1003 * @scb: our scsi control block 1004 * @cmd: scsi command from the mid-layer 1005 * @channel: actual channel on the controller 1006 * @target: actual id on the controller. 1007 * 1008 * prepare a command for the scsi physical devices. This rountine prepares 1009 * commands for devices which can take extended CDBs (>10 bytes) 1010 */ 1011 static mega_ext_passthru * 1012 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, 1013 struct scsi_cmnd *cmd, 1014 int channel, int target) 1015 { 1016 mega_ext_passthru *epthru; 1017 1018 epthru = scb->epthru; 1019 memset(epthru, 0, sizeof(mega_ext_passthru)); 1020 1021 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 1022 epthru->timeout = 2; 1023 1024 epthru->ars = 1; 1025 epthru->reqsenselen = 14; 1026 epthru->islogical = 0; 1027 1028 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 1029 epthru->target = (adapter->flag & BOARD_40LD) ? 1030 (channel << 4) | target : target; 1031 1032 epthru->cdblen = cmd->cmd_len; 1033 epthru->logdrv = cmd->device->lun; 1034 1035 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); 1036 1037 /* Not sure about the direction */ 1038 scb->dma_direction = PCI_DMA_BIDIRECTIONAL; 1039 1040 switch(cmd->cmnd[0]) { 1041 case INQUIRY: 1042 case READ_CAPACITY: 1043 if(!(adapter->flag & (1L << cmd->device->channel))) { 1044 1045 dev_notice(&adapter->dev->dev, 1046 "scsi%d: scanning scsi channel %d [P%d] " 1047 "for physical devices\n", 1048 adapter->host->host_no, 1049 cmd->device->channel, channel); 1050 1051 adapter->flag |= (1L << cmd->device->channel); 1052 } 1053 /* Fall through */ 1054 default: 1055 epthru->numsgelements = mega_build_sglist(adapter, scb, 1056 &epthru->dataxferaddr, &epthru->dataxferlen); 1057 break; 1058 } 1059 1060 return epthru; 1061 } 1062 1063 static void 1064 __mega_runpendq(adapter_t *adapter) 1065 { 1066 scb_t *scb; 1067 struct list_head *pos, *next; 1068 1069 /* Issue any pending commands to the card */ 1070 list_for_each_safe(pos, next, &adapter->pending_list) { 1071 1072 scb = list_entry(pos, scb_t, list); 1073 1074 if( !(scb->state & SCB_ISSUED) ) { 1075 1076 if( issue_scb(adapter, scb) != 0 ) 1077 return; 1078 } 1079 } 1080 1081 return; 1082 } 1083 1084 1085 /** 1086 * issue_scb() 1087 * @adapter: pointer to our soft state 1088 * @scb: scsi control block 1089 * 1090 * Post a command to the card if the mailbox is available, otherwise return 1091 * busy. We also take the scb from the pending list if the mailbox is 1092 * available. 1093 */ 1094 static int 1095 issue_scb(adapter_t *adapter, scb_t *scb) 1096 { 1097 volatile mbox64_t *mbox64 = adapter->mbox64; 1098 volatile mbox_t *mbox = adapter->mbox; 1099 unsigned int i = 0; 1100 1101 if(unlikely(mbox->m_in.busy)) { 1102 do { 1103 udelay(1); 1104 i++; 1105 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); 1106 1107 if(mbox->m_in.busy) return -1; 1108 } 1109 1110 /* Copy mailbox data into host structure */ 1111 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, 1112 sizeof(struct mbox_out)); 1113 1114 mbox->m_out.cmdid = scb->idx; /* Set cmdid */ 1115 mbox->m_in.busy = 1; /* Set busy */ 1116 1117 1118 /* 1119 * Increment the pending queue counter 1120 */ 1121 atomic_inc(&adapter->pend_cmds); 1122 1123 switch (mbox->m_out.cmd) { 1124 case MEGA_MBOXCMD_LREAD64: 1125 case MEGA_MBOXCMD_LWRITE64: 1126 case MEGA_MBOXCMD_PASSTHRU64: 1127 case MEGA_MBOXCMD_EXTPTHRU: 1128 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1129 mbox64->xfer_segment_hi = 0; 1130 mbox->m_out.xferaddr = 0xFFFFFFFF; 1131 break; 1132 default: 1133 mbox64->xfer_segment_lo = 0; 1134 mbox64->xfer_segment_hi = 0; 1135 } 1136 1137 /* 1138 * post the command 1139 */ 1140 scb->state |= SCB_ISSUED; 1141 1142 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1143 mbox->m_in.poll = 0; 1144 mbox->m_in.ack = 0; 1145 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1146 } 1147 else { 1148 irq_enable(adapter); 1149 issue_command(adapter); 1150 } 1151 1152 return 0; 1153 } 1154 1155 /* 1156 * Wait until the controller's mailbox is available 1157 */ 1158 static inline int 1159 mega_busywait_mbox (adapter_t *adapter) 1160 { 1161 if (adapter->mbox->m_in.busy) 1162 return __mega_busywait_mbox(adapter); 1163 return 0; 1164 } 1165 1166 /** 1167 * issue_scb_block() 1168 * @adapter: pointer to our soft state 1169 * @raw_mbox: the mailbox 1170 * 1171 * Issue a scb in synchronous and non-interrupt mode 1172 */ 1173 static int 1174 issue_scb_block(adapter_t *adapter, u_char *raw_mbox) 1175 { 1176 volatile mbox64_t *mbox64 = adapter->mbox64; 1177 volatile mbox_t *mbox = adapter->mbox; 1178 u8 byte; 1179 1180 /* Wait until mailbox is free */ 1181 if(mega_busywait_mbox (adapter)) 1182 goto bug_blocked_mailbox; 1183 1184 /* Copy mailbox data into host structure */ 1185 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); 1186 mbox->m_out.cmdid = 0xFE; 1187 mbox->m_in.busy = 1; 1188 1189 switch (raw_mbox[0]) { 1190 case MEGA_MBOXCMD_LREAD64: 1191 case MEGA_MBOXCMD_LWRITE64: 1192 case MEGA_MBOXCMD_PASSTHRU64: 1193 case MEGA_MBOXCMD_EXTPTHRU: 1194 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1195 mbox64->xfer_segment_hi = 0; 1196 mbox->m_out.xferaddr = 0xFFFFFFFF; 1197 break; 1198 default: 1199 mbox64->xfer_segment_lo = 0; 1200 mbox64->xfer_segment_hi = 0; 1201 } 1202 1203 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1204 mbox->m_in.poll = 0; 1205 mbox->m_in.ack = 0; 1206 mbox->m_in.numstatus = 0xFF; 1207 mbox->m_in.status = 0xFF; 1208 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1209 1210 while((volatile u8)mbox->m_in.numstatus == 0xFF) 1211 cpu_relax(); 1212 1213 mbox->m_in.numstatus = 0xFF; 1214 1215 while( (volatile u8)mbox->m_in.poll != 0x77 ) 1216 cpu_relax(); 1217 1218 mbox->m_in.poll = 0; 1219 mbox->m_in.ack = 0x77; 1220 1221 WRINDOOR(adapter, adapter->mbox_dma | 0x2); 1222 1223 while(RDINDOOR(adapter) & 0x2) 1224 cpu_relax(); 1225 } 1226 else { 1227 irq_disable(adapter); 1228 issue_command(adapter); 1229 1230 while (!((byte = irq_state(adapter)) & INTR_VALID)) 1231 cpu_relax(); 1232 1233 set_irq_state(adapter, byte); 1234 irq_enable(adapter); 1235 irq_ack(adapter); 1236 } 1237 1238 return mbox->m_in.status; 1239 1240 bug_blocked_mailbox: 1241 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); 1242 udelay (1000); 1243 return -1; 1244 } 1245 1246 1247 /** 1248 * megaraid_isr_iomapped() 1249 * @irq: irq 1250 * @devp: pointer to our soft state 1251 * 1252 * Interrupt service routine for io-mapped controllers. 1253 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1254 * and service the completed commands. 1255 */ 1256 static irqreturn_t 1257 megaraid_isr_iomapped(int irq, void *devp) 1258 { 1259 adapter_t *adapter = devp; 1260 unsigned long flags; 1261 u8 status; 1262 u8 nstatus; 1263 u8 completed[MAX_FIRMWARE_STATUS]; 1264 u8 byte; 1265 int handled = 0; 1266 1267 1268 /* 1269 * loop till F/W has more commands for us to complete. 1270 */ 1271 spin_lock_irqsave(&adapter->lock, flags); 1272 1273 do { 1274 /* Check if a valid interrupt is pending */ 1275 byte = irq_state(adapter); 1276 if( (byte & VALID_INTR_BYTE) == 0 ) { 1277 /* 1278 * No more pending commands 1279 */ 1280 goto out_unlock; 1281 } 1282 set_irq_state(adapter, byte); 1283 1284 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1285 == 0xFF) 1286 cpu_relax(); 1287 adapter->mbox->m_in.numstatus = 0xFF; 1288 1289 status = adapter->mbox->m_in.status; 1290 1291 /* 1292 * decrement the pending queue counter 1293 */ 1294 atomic_sub(nstatus, &adapter->pend_cmds); 1295 1296 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1297 nstatus); 1298 1299 /* Acknowledge interrupt */ 1300 irq_ack(adapter); 1301 1302 mega_cmd_done(adapter, completed, nstatus, status); 1303 1304 mega_rundoneq(adapter); 1305 1306 handled = 1; 1307 1308 /* Loop through any pending requests */ 1309 if(atomic_read(&adapter->quiescent) == 0) { 1310 mega_runpendq(adapter); 1311 } 1312 1313 } while(1); 1314 1315 out_unlock: 1316 1317 spin_unlock_irqrestore(&adapter->lock, flags); 1318 1319 return IRQ_RETVAL(handled); 1320 } 1321 1322 1323 /** 1324 * megaraid_isr_memmapped() 1325 * @irq: irq 1326 * @devp: pointer to our soft state 1327 * 1328 * Interrupt service routine for memory-mapped controllers. 1329 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1330 * and service the completed commands. 1331 */ 1332 static irqreturn_t 1333 megaraid_isr_memmapped(int irq, void *devp) 1334 { 1335 adapter_t *adapter = devp; 1336 unsigned long flags; 1337 u8 status; 1338 u32 dword = 0; 1339 u8 nstatus; 1340 u8 completed[MAX_FIRMWARE_STATUS]; 1341 int handled = 0; 1342 1343 1344 /* 1345 * loop till F/W has more commands for us to complete. 1346 */ 1347 spin_lock_irqsave(&adapter->lock, flags); 1348 1349 do { 1350 /* Check if a valid interrupt is pending */ 1351 dword = RDOUTDOOR(adapter); 1352 if(dword != 0x10001234) { 1353 /* 1354 * No more pending commands 1355 */ 1356 goto out_unlock; 1357 } 1358 WROUTDOOR(adapter, 0x10001234); 1359 1360 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1361 == 0xFF) { 1362 cpu_relax(); 1363 } 1364 adapter->mbox->m_in.numstatus = 0xFF; 1365 1366 status = adapter->mbox->m_in.status; 1367 1368 /* 1369 * decrement the pending queue counter 1370 */ 1371 atomic_sub(nstatus, &adapter->pend_cmds); 1372 1373 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1374 nstatus); 1375 1376 /* Acknowledge interrupt */ 1377 WRINDOOR(adapter, 0x2); 1378 1379 handled = 1; 1380 1381 while( RDINDOOR(adapter) & 0x02 ) 1382 cpu_relax(); 1383 1384 mega_cmd_done(adapter, completed, nstatus, status); 1385 1386 mega_rundoneq(adapter); 1387 1388 /* Loop through any pending requests */ 1389 if(atomic_read(&adapter->quiescent) == 0) { 1390 mega_runpendq(adapter); 1391 } 1392 1393 } while(1); 1394 1395 out_unlock: 1396 1397 spin_unlock_irqrestore(&adapter->lock, flags); 1398 1399 return IRQ_RETVAL(handled); 1400 } 1401 /** 1402 * mega_cmd_done() 1403 * @adapter: pointer to our soft state 1404 * @completed: array of ids of completed commands 1405 * @nstatus: number of completed commands 1406 * @status: status of the last command completed 1407 * 1408 * Complete the commands and call the scsi mid-layer callback hooks. 1409 */ 1410 static void 1411 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) 1412 { 1413 mega_ext_passthru *epthru = NULL; 1414 struct scatterlist *sgl; 1415 struct scsi_cmnd *cmd = NULL; 1416 mega_passthru *pthru = NULL; 1417 mbox_t *mbox = NULL; 1418 u8 c; 1419 scb_t *scb; 1420 int islogical; 1421 int cmdid; 1422 int i; 1423 1424 /* 1425 * for all the commands completed, call the mid-layer callback routine 1426 * and free the scb. 1427 */ 1428 for( i = 0; i < nstatus; i++ ) { 1429 1430 cmdid = completed[i]; 1431 1432 /* 1433 * Only free SCBs for the commands coming down from the 1434 * mid-layer, not for which were issued internally 1435 * 1436 * For internal command, restore the status returned by the 1437 * firmware so that user can interpret it. 1438 */ 1439 if (cmdid == CMDID_INT_CMDS) { 1440 scb = &adapter->int_scb; 1441 1442 list_del_init(&scb->list); 1443 scb->state = SCB_FREE; 1444 1445 adapter->int_status = status; 1446 complete(&adapter->int_waitq); 1447 } else { 1448 scb = &adapter->scb_list[cmdid]; 1449 1450 /* 1451 * Make sure f/w has completed a valid command 1452 */ 1453 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1454 dev_crit(&adapter->dev->dev, "invalid command " 1455 "Id %d, scb->state:%x, scsi cmd:%p\n", 1456 cmdid, scb->state, scb->cmd); 1457 1458 continue; 1459 } 1460 1461 /* 1462 * Was a abort issued for this command 1463 */ 1464 if( scb->state & SCB_ABORT ) { 1465 1466 dev_warn(&adapter->dev->dev, 1467 "aborted cmd [%x] complete\n", 1468 scb->idx); 1469 1470 scb->cmd->result = (DID_ABORT << 16); 1471 1472 list_add_tail(SCSI_LIST(scb->cmd), 1473 &adapter->completed_list); 1474 1475 mega_free_scb(adapter, scb); 1476 1477 continue; 1478 } 1479 1480 /* 1481 * Was a reset issued for this command 1482 */ 1483 if( scb->state & SCB_RESET ) { 1484 1485 dev_warn(&adapter->dev->dev, 1486 "reset cmd [%x] complete\n", 1487 scb->idx); 1488 1489 scb->cmd->result = (DID_RESET << 16); 1490 1491 list_add_tail(SCSI_LIST(scb->cmd), 1492 &adapter->completed_list); 1493 1494 mega_free_scb (adapter, scb); 1495 1496 continue; 1497 } 1498 1499 cmd = scb->cmd; 1500 pthru = scb->pthru; 1501 epthru = scb->epthru; 1502 mbox = (mbox_t *)scb->raw_mbox; 1503 1504 #if MEGA_HAVE_STATS 1505 { 1506 1507 int logdrv = mbox->m_out.logdrv; 1508 1509 islogical = adapter->logdrv_chan[cmd->channel]; 1510 /* 1511 * Maintain an error counter for the logical drive. 1512 * Some application like SNMP agent need such 1513 * statistics 1514 */ 1515 if( status && islogical && (cmd->cmnd[0] == READ_6 || 1516 cmd->cmnd[0] == READ_10 || 1517 cmd->cmnd[0] == READ_12)) { 1518 /* 1519 * Logical drive number increases by 0x80 when 1520 * a logical drive is deleted 1521 */ 1522 adapter->rd_errors[logdrv%0x80]++; 1523 } 1524 1525 if( status && islogical && (cmd->cmnd[0] == WRITE_6 || 1526 cmd->cmnd[0] == WRITE_10 || 1527 cmd->cmnd[0] == WRITE_12)) { 1528 /* 1529 * Logical drive number increases by 0x80 when 1530 * a logical drive is deleted 1531 */ 1532 adapter->wr_errors[logdrv%0x80]++; 1533 } 1534 1535 } 1536 #endif 1537 } 1538 1539 /* 1540 * Do not return the presence of hard disk on the channel so, 1541 * inquiry sent, and returned data==hard disk or removable 1542 * hard disk and not logical, request should return failure! - 1543 * PJ 1544 */ 1545 islogical = adapter->logdrv_chan[cmd->device->channel]; 1546 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1547 1548 sgl = scsi_sglist(cmd); 1549 if( sg_page(sgl) ) { 1550 c = *(unsigned char *) sg_virt(&sgl[0]); 1551 } else { 1552 dev_warn(&adapter->dev->dev, "invalid sg\n"); 1553 c = 0; 1554 } 1555 1556 if(IS_RAID_CH(adapter, cmd->device->channel) && 1557 ((c & 0x1F ) == TYPE_DISK)) { 1558 status = 0xF0; 1559 } 1560 } 1561 1562 /* clear result; otherwise, success returns corrupt value */ 1563 cmd->result = 0; 1564 1565 /* Convert MegaRAID status to Linux error code */ 1566 switch (status) { 1567 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ 1568 cmd->result |= (DID_OK << 16); 1569 break; 1570 1571 case 0x02: /* ERROR_ABORTED, i.e. 1572 SCSI_STATUS_CHECK_CONDITION */ 1573 1574 /* set sense_buffer and result fields */ 1575 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || 1576 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { 1577 1578 memcpy(cmd->sense_buffer, pthru->reqsensearea, 1579 14); 1580 1581 cmd->result = (DRIVER_SENSE << 24) | 1582 (DID_OK << 16) | 1583 (CHECK_CONDITION << 1); 1584 } 1585 else { 1586 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { 1587 1588 memcpy(cmd->sense_buffer, 1589 epthru->reqsensearea, 14); 1590 1591 cmd->result = (DRIVER_SENSE << 24) | 1592 (DID_OK << 16) | 1593 (CHECK_CONDITION << 1); 1594 } else { 1595 cmd->sense_buffer[0] = 0x70; 1596 cmd->sense_buffer[2] = ABORTED_COMMAND; 1597 cmd->result |= (CHECK_CONDITION << 1); 1598 } 1599 } 1600 break; 1601 1602 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. 1603 SCSI_STATUS_BUSY */ 1604 cmd->result |= (DID_BUS_BUSY << 16) | status; 1605 break; 1606 1607 default: 1608 #if MEGA_HAVE_CLUSTERING 1609 /* 1610 * If TEST_UNIT_READY fails, we know 1611 * MEGA_RESERVATION_STATUS failed 1612 */ 1613 if( cmd->cmnd[0] == TEST_UNIT_READY ) { 1614 cmd->result |= (DID_ERROR << 16) | 1615 (RESERVATION_CONFLICT << 1); 1616 } 1617 else 1618 /* 1619 * Error code returned is 1 if Reserve or Release 1620 * failed or the input parameter is invalid 1621 */ 1622 if( status == 1 && 1623 (cmd->cmnd[0] == RESERVE || 1624 cmd->cmnd[0] == RELEASE) ) { 1625 1626 cmd->result |= (DID_ERROR << 16) | 1627 (RESERVATION_CONFLICT << 1); 1628 } 1629 else 1630 #endif 1631 cmd->result |= (DID_BAD_TARGET << 16)|status; 1632 } 1633 1634 mega_free_scb(adapter, scb); 1635 1636 /* Add Scsi_Command to end of completed queue */ 1637 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1638 } 1639 } 1640 1641 1642 /* 1643 * mega_runpendq() 1644 * 1645 * Run through the list of completed requests and finish it 1646 */ 1647 static void 1648 mega_rundoneq (adapter_t *adapter) 1649 { 1650 struct scsi_cmnd *cmd; 1651 struct list_head *pos; 1652 1653 list_for_each(pos, &adapter->completed_list) { 1654 1655 struct scsi_pointer* spos = (struct scsi_pointer *)pos; 1656 1657 cmd = list_entry(spos, struct scsi_cmnd, SCp); 1658 cmd->scsi_done(cmd); 1659 } 1660 1661 INIT_LIST_HEAD(&adapter->completed_list); 1662 } 1663 1664 1665 /* 1666 * Free a SCB structure 1667 * Note: We assume the scsi commands associated with this scb is not free yet. 1668 */ 1669 static void 1670 mega_free_scb(adapter_t *adapter, scb_t *scb) 1671 { 1672 switch( scb->dma_type ) { 1673 1674 case MEGA_DMA_TYPE_NONE: 1675 break; 1676 1677 case MEGA_SGLIST: 1678 scsi_dma_unmap(scb->cmd); 1679 break; 1680 default: 1681 break; 1682 } 1683 1684 /* 1685 * Remove from the pending list 1686 */ 1687 list_del_init(&scb->list); 1688 1689 /* Link the scb back into free list */ 1690 scb->state = SCB_FREE; 1691 scb->cmd = NULL; 1692 1693 list_add(&scb->list, &adapter->free_list); 1694 } 1695 1696 1697 static int 1698 __mega_busywait_mbox (adapter_t *adapter) 1699 { 1700 volatile mbox_t *mbox = adapter->mbox; 1701 long counter; 1702 1703 for (counter = 0; counter < 10000; counter++) { 1704 if (!mbox->m_in.busy) 1705 return 0; 1706 udelay(100); 1707 cond_resched(); 1708 } 1709 return -1; /* give up after 1 second */ 1710 } 1711 1712 /* 1713 * Copies data to SGLIST 1714 * Note: For 64 bit cards, we need a minimum of one SG element for read/write 1715 */ 1716 static int 1717 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1718 { 1719 struct scatterlist *sg; 1720 struct scsi_cmnd *cmd; 1721 int sgcnt; 1722 int idx; 1723 1724 cmd = scb->cmd; 1725 1726 /* 1727 * Copy Scatter-Gather list info into controller structure. 1728 * 1729 * The number of sg elements returned must not exceed our limit 1730 */ 1731 sgcnt = scsi_dma_map(cmd); 1732 1733 scb->dma_type = MEGA_SGLIST; 1734 1735 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); 1736 1737 *len = 0; 1738 1739 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { 1740 sg = scsi_sglist(cmd); 1741 scb->dma_h_bulkdata = sg_dma_address(sg); 1742 *buf = (u32)scb->dma_h_bulkdata; 1743 *len = sg_dma_len(sg); 1744 return 0; 1745 } 1746 1747 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1748 if (adapter->has_64bit_addr) { 1749 scb->sgl64[idx].address = sg_dma_address(sg); 1750 *len += scb->sgl64[idx].length = sg_dma_len(sg); 1751 } else { 1752 scb->sgl[idx].address = sg_dma_address(sg); 1753 *len += scb->sgl[idx].length = sg_dma_len(sg); 1754 } 1755 } 1756 1757 /* Reset pointer and length fields */ 1758 *buf = scb->sgl_dma_addr; 1759 1760 /* Return count of SG requests */ 1761 return sgcnt; 1762 } 1763 1764 1765 /* 1766 * mega_8_to_40ld() 1767 * 1768 * takes all info in AdapterInquiry structure and puts it into ProductInfo and 1769 * Enquiry3 structures for later use 1770 */ 1771 static void 1772 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, 1773 mega_product_info *product_info) 1774 { 1775 int i; 1776 1777 product_info->max_commands = inquiry->adapter_info.max_commands; 1778 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; 1779 product_info->nchannels = inquiry->adapter_info.nchannels; 1780 1781 for (i = 0; i < 4; i++) { 1782 product_info->fw_version[i] = 1783 inquiry->adapter_info.fw_version[i]; 1784 1785 product_info->bios_version[i] = 1786 inquiry->adapter_info.bios_version[i]; 1787 } 1788 enquiry3->cache_flush_interval = 1789 inquiry->adapter_info.cache_flush_interval; 1790 1791 product_info->dram_size = inquiry->adapter_info.dram_size; 1792 1793 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; 1794 1795 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { 1796 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; 1797 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; 1798 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; 1799 } 1800 1801 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) 1802 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; 1803 } 1804 1805 static inline void 1806 mega_free_sgl(adapter_t *adapter) 1807 { 1808 scb_t *scb; 1809 int i; 1810 1811 for(i = 0; i < adapter->max_cmds; i++) { 1812 1813 scb = &adapter->scb_list[i]; 1814 1815 if( scb->sgl64 ) { 1816 pci_free_consistent(adapter->dev, 1817 sizeof(mega_sgl64) * adapter->sglen, 1818 scb->sgl64, 1819 scb->sgl_dma_addr); 1820 1821 scb->sgl64 = NULL; 1822 } 1823 1824 if( scb->pthru ) { 1825 pci_free_consistent(adapter->dev, sizeof(mega_passthru), 1826 scb->pthru, scb->pthru_dma_addr); 1827 1828 scb->pthru = NULL; 1829 } 1830 1831 if( scb->epthru ) { 1832 pci_free_consistent(adapter->dev, 1833 sizeof(mega_ext_passthru), 1834 scb->epthru, scb->epthru_dma_addr); 1835 1836 scb->epthru = NULL; 1837 } 1838 1839 } 1840 } 1841 1842 1843 /* 1844 * Get information about the card/driver 1845 */ 1846 const char * 1847 megaraid_info(struct Scsi_Host *host) 1848 { 1849 static char buffer[512]; 1850 adapter_t *adapter; 1851 1852 adapter = (adapter_t *)host->hostdata; 1853 1854 sprintf (buffer, 1855 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", 1856 adapter->fw_version, adapter->product_info.max_commands, 1857 adapter->host->max_id, adapter->host->max_channel, 1858 (u32)adapter->host->max_lun); 1859 return buffer; 1860 } 1861 1862 /* 1863 * Abort a previous SCSI request. Only commands on the pending list can be 1864 * aborted. All the commands issued to the F/W must complete. 1865 */ 1866 static int 1867 megaraid_abort(struct scsi_cmnd *cmd) 1868 { 1869 adapter_t *adapter; 1870 int rval; 1871 1872 adapter = (adapter_t *)cmd->device->host->hostdata; 1873 1874 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); 1875 1876 /* 1877 * This is required here to complete any completed requests 1878 * to be communicated over to the mid layer. 1879 */ 1880 mega_rundoneq(adapter); 1881 1882 return rval; 1883 } 1884 1885 1886 static int 1887 megaraid_reset(struct scsi_cmnd *cmd) 1888 { 1889 adapter_t *adapter; 1890 megacmd_t mc; 1891 int rval; 1892 1893 adapter = (adapter_t *)cmd->device->host->hostdata; 1894 1895 #if MEGA_HAVE_CLUSTERING 1896 mc.cmd = MEGA_CLUSTER_CMD; 1897 mc.opcode = MEGA_RESET_RESERVATIONS; 1898 1899 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1900 dev_warn(&adapter->dev->dev, "reservation reset failed\n"); 1901 } 1902 else { 1903 dev_info(&adapter->dev->dev, "reservation reset\n"); 1904 } 1905 #endif 1906 1907 spin_lock_irq(&adapter->lock); 1908 1909 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); 1910 1911 /* 1912 * This is required here to complete any completed requests 1913 * to be communicated over to the mid layer. 1914 */ 1915 mega_rundoneq(adapter); 1916 spin_unlock_irq(&adapter->lock); 1917 1918 return rval; 1919 } 1920 1921 /** 1922 * megaraid_abort_and_reset() 1923 * @adapter: megaraid soft state 1924 * @cmd: scsi command to be aborted or reset 1925 * @aor: abort or reset flag 1926 * 1927 * Try to locate the scsi command in the pending queue. If found and is not 1928 * issued to the controller, abort/reset it. Otherwise return failure 1929 */ 1930 static int 1931 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) 1932 { 1933 struct list_head *pos, *next; 1934 scb_t *scb; 1935 1936 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", 1937 (aor == SCB_ABORT)? "ABORTING":"RESET", 1938 cmd->cmnd[0], cmd->device->channel, 1939 cmd->device->id, (u32)cmd->device->lun); 1940 1941 if(list_empty(&adapter->pending_list)) 1942 return FAILED; 1943 1944 list_for_each_safe(pos, next, &adapter->pending_list) { 1945 1946 scb = list_entry(pos, scb_t, list); 1947 1948 if (scb->cmd == cmd) { /* Found command */ 1949 1950 scb->state |= aor; 1951 1952 /* 1953 * Check if this command has firmware ownership. If 1954 * yes, we cannot reset this command. Whenever f/w 1955 * completes this command, we will return appropriate 1956 * status from ISR. 1957 */ 1958 if( scb->state & SCB_ISSUED ) { 1959 1960 dev_warn(&adapter->dev->dev, 1961 "%s[%x], fw owner\n", 1962 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1963 scb->idx); 1964 1965 return FAILED; 1966 } 1967 else { 1968 1969 /* 1970 * Not yet issued! Remove from the pending 1971 * list 1972 */ 1973 dev_warn(&adapter->dev->dev, 1974 "%s-[%x], driver owner\n", 1975 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1976 scb->idx); 1977 1978 mega_free_scb(adapter, scb); 1979 1980 if( aor == SCB_ABORT ) { 1981 cmd->result = (DID_ABORT << 16); 1982 } 1983 else { 1984 cmd->result = (DID_RESET << 16); 1985 } 1986 1987 list_add_tail(SCSI_LIST(cmd), 1988 &adapter->completed_list); 1989 1990 return SUCCESS; 1991 } 1992 } 1993 } 1994 1995 return FAILED; 1996 } 1997 1998 static inline int 1999 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 2000 { 2001 *pdev = pci_alloc_dev(NULL); 2002 2003 if( *pdev == NULL ) return -1; 2004 2005 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 2006 2007 if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) { 2008 kfree(*pdev); 2009 return -1; 2010 } 2011 2012 return 0; 2013 } 2014 2015 static inline void 2016 free_local_pdev(struct pci_dev *pdev) 2017 { 2018 kfree(pdev); 2019 } 2020 2021 /** 2022 * mega_allocate_inquiry() 2023 * @dma_handle: handle returned for dma address 2024 * @pdev: handle to pci device 2025 * 2026 * allocates memory for inquiry structure 2027 */ 2028 static inline void * 2029 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) 2030 { 2031 return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle); 2032 } 2033 2034 2035 static inline void 2036 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) 2037 { 2038 pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle); 2039 } 2040 2041 2042 #ifdef CONFIG_PROC_FS 2043 /* Following code handles /proc fs */ 2044 2045 /** 2046 * proc_show_config() 2047 * @m: Synthetic file construction data 2048 * @v: File iterator 2049 * 2050 * Display configuration information about the controller. 2051 */ 2052 static int 2053 proc_show_config(struct seq_file *m, void *v) 2054 { 2055 2056 adapter_t *adapter = m->private; 2057 2058 seq_puts(m, MEGARAID_VERSION); 2059 if(adapter->product_info.product_name[0]) 2060 seq_printf(m, "%s\n", adapter->product_info.product_name); 2061 2062 seq_puts(m, "Controller Type: "); 2063 2064 if( adapter->flag & BOARD_MEMMAP ) 2065 seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); 2066 else 2067 seq_puts(m, "418/428/434\n"); 2068 2069 if(adapter->flag & BOARD_40LD) 2070 seq_puts(m, "Controller Supports 40 Logical Drives\n"); 2071 2072 if(adapter->flag & BOARD_64BIT) 2073 seq_puts(m, "Controller capable of 64-bit memory addressing\n"); 2074 if( adapter->has_64bit_addr ) 2075 seq_puts(m, "Controller using 64-bit memory addressing\n"); 2076 else 2077 seq_puts(m, "Controller is not using 64-bit memory addressing\n"); 2078 2079 seq_printf(m, "Base = %08lx, Irq = %d, ", 2080 adapter->base, adapter->host->irq); 2081 2082 seq_printf(m, "Logical Drives = %d, Channels = %d\n", 2083 adapter->numldrv, adapter->product_info.nchannels); 2084 2085 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", 2086 adapter->fw_version, adapter->bios_version, 2087 adapter->product_info.dram_size); 2088 2089 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", 2090 adapter->product_info.max_commands, adapter->max_cmds); 2091 2092 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); 2093 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); 2094 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); 2095 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); 2096 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); 2097 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); 2098 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); 2099 seq_printf(m, "quiescent = %d\n", 2100 atomic_read(&adapter->quiescent)); 2101 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); 2102 2103 seq_puts(m, "\nModule Parameters:\n"); 2104 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); 2105 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); 2106 return 0; 2107 } 2108 2109 /** 2110 * proc_show_stat() 2111 * @m: Synthetic file construction data 2112 * @v: File iterator 2113 * 2114 * Display statistical information about the I/O activity. 2115 */ 2116 static int 2117 proc_show_stat(struct seq_file *m, void *v) 2118 { 2119 adapter_t *adapter = m->private; 2120 #if MEGA_HAVE_STATS 2121 int i; 2122 #endif 2123 2124 seq_puts(m, "Statistical Information for this controller\n"); 2125 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); 2126 #if MEGA_HAVE_STATS 2127 for(i = 0; i < adapter->numldrv; i++) { 2128 seq_printf(m, "Logical Drive %d:\n", i); 2129 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", 2130 adapter->nreads[i], adapter->nwrites[i]); 2131 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", 2132 adapter->nreadblocks[i], adapter->nwriteblocks[i]); 2133 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", 2134 adapter->rd_errors[i], adapter->wr_errors[i]); 2135 } 2136 #else 2137 seq_puts(m, "IO and error counters not compiled in driver.\n"); 2138 #endif 2139 return 0; 2140 } 2141 2142 2143 /** 2144 * proc_show_mbox() 2145 * @m: Synthetic file construction data 2146 * @v: File iterator 2147 * 2148 * Display mailbox information for the last command issued. This information 2149 * is good for debugging. 2150 */ 2151 static int 2152 proc_show_mbox(struct seq_file *m, void *v) 2153 { 2154 adapter_t *adapter = m->private; 2155 volatile mbox_t *mbox = adapter->mbox; 2156 2157 seq_puts(m, "Contents of Mail Box Structure\n"); 2158 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); 2159 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); 2160 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); 2161 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); 2162 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); 2163 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); 2164 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); 2165 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); 2166 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); 2167 return 0; 2168 } 2169 2170 2171 /** 2172 * proc_show_rebuild_rate() 2173 * @m: Synthetic file construction data 2174 * @v: File iterator 2175 * 2176 * Display current rebuild rate 2177 */ 2178 static int 2179 proc_show_rebuild_rate(struct seq_file *m, void *v) 2180 { 2181 adapter_t *adapter = m->private; 2182 dma_addr_t dma_handle; 2183 caddr_t inquiry; 2184 struct pci_dev *pdev; 2185 2186 if( make_local_pdev(adapter, &pdev) != 0 ) 2187 return 0; 2188 2189 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2190 goto free_pdev; 2191 2192 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2193 seq_puts(m, "Adapter inquiry failed.\n"); 2194 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2195 goto free_inquiry; 2196 } 2197 2198 if( adapter->flag & BOARD_40LD ) 2199 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2200 ((mega_inquiry3 *)inquiry)->rebuild_rate); 2201 else 2202 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2203 ((mraid_ext_inquiry *) 2204 inquiry)->raid_inq.adapter_info.rebuild_rate); 2205 2206 free_inquiry: 2207 mega_free_inquiry(inquiry, dma_handle, pdev); 2208 free_pdev: 2209 free_local_pdev(pdev); 2210 return 0; 2211 } 2212 2213 2214 /** 2215 * proc_show_battery() 2216 * @m: Synthetic file construction data 2217 * @v: File iterator 2218 * 2219 * Display information about the battery module on the controller. 2220 */ 2221 static int 2222 proc_show_battery(struct seq_file *m, void *v) 2223 { 2224 adapter_t *adapter = m->private; 2225 dma_addr_t dma_handle; 2226 caddr_t inquiry; 2227 struct pci_dev *pdev; 2228 u8 battery_status; 2229 2230 if( make_local_pdev(adapter, &pdev) != 0 ) 2231 return 0; 2232 2233 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2234 goto free_pdev; 2235 2236 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2237 seq_puts(m, "Adapter inquiry failed.\n"); 2238 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2239 goto free_inquiry; 2240 } 2241 2242 if( adapter->flag & BOARD_40LD ) { 2243 battery_status = ((mega_inquiry3 *)inquiry)->battery_status; 2244 } 2245 else { 2246 battery_status = ((mraid_ext_inquiry *)inquiry)-> 2247 raid_inq.adapter_info.battery_status; 2248 } 2249 2250 /* 2251 * Decode the battery status 2252 */ 2253 seq_printf(m, "Battery Status:[%d]", battery_status); 2254 2255 if(battery_status == MEGA_BATT_CHARGE_DONE) 2256 seq_puts(m, " Charge Done"); 2257 2258 if(battery_status & MEGA_BATT_MODULE_MISSING) 2259 seq_puts(m, " Module Missing"); 2260 2261 if(battery_status & MEGA_BATT_LOW_VOLTAGE) 2262 seq_puts(m, " Low Voltage"); 2263 2264 if(battery_status & MEGA_BATT_TEMP_HIGH) 2265 seq_puts(m, " Temperature High"); 2266 2267 if(battery_status & MEGA_BATT_PACK_MISSING) 2268 seq_puts(m, " Pack Missing"); 2269 2270 if(battery_status & MEGA_BATT_CHARGE_INPROG) 2271 seq_puts(m, " Charge In-progress"); 2272 2273 if(battery_status & MEGA_BATT_CHARGE_FAIL) 2274 seq_puts(m, " Charge Fail"); 2275 2276 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) 2277 seq_puts(m, " Cycles Exceeded"); 2278 2279 seq_putc(m, '\n'); 2280 2281 free_inquiry: 2282 mega_free_inquiry(inquiry, dma_handle, pdev); 2283 free_pdev: 2284 free_local_pdev(pdev); 2285 return 0; 2286 } 2287 2288 2289 /* 2290 * Display scsi inquiry 2291 */ 2292 static void 2293 mega_print_inquiry(struct seq_file *m, char *scsi_inq) 2294 { 2295 int i; 2296 2297 seq_puts(m, " Vendor: "); 2298 seq_write(m, scsi_inq + 8, 8); 2299 seq_puts(m, " Model: "); 2300 seq_write(m, scsi_inq + 16, 16); 2301 seq_puts(m, " Rev: "); 2302 seq_write(m, scsi_inq + 32, 4); 2303 seq_putc(m, '\n'); 2304 2305 i = scsi_inq[0] & 0x1f; 2306 seq_printf(m, " Type: %s ", scsi_device_type(i)); 2307 2308 seq_printf(m, " ANSI SCSI revision: %02x", 2309 scsi_inq[2] & 0x07); 2310 2311 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) 2312 seq_puts(m, " CCS\n"); 2313 else 2314 seq_putc(m, '\n'); 2315 } 2316 2317 /** 2318 * proc_show_pdrv() 2319 * @m: Synthetic file construction data 2320 * @adapter: pointer to our soft state 2321 * @channel: channel 2322 * 2323 * Display information about the physical drives. 2324 */ 2325 static int 2326 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) 2327 { 2328 dma_addr_t dma_handle; 2329 char *scsi_inq; 2330 dma_addr_t scsi_inq_dma_handle; 2331 caddr_t inquiry; 2332 struct pci_dev *pdev; 2333 u8 *pdrv_state; 2334 u8 state; 2335 int tgt; 2336 int max_channels; 2337 int i; 2338 2339 if( make_local_pdev(adapter, &pdev) != 0 ) 2340 return 0; 2341 2342 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2343 goto free_pdev; 2344 2345 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2346 seq_puts(m, "Adapter inquiry failed.\n"); 2347 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2348 goto free_inquiry; 2349 } 2350 2351 2352 scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle); 2353 if( scsi_inq == NULL ) { 2354 seq_puts(m, "memory not available for scsi inq.\n"); 2355 goto free_inquiry; 2356 } 2357 2358 if( adapter->flag & BOARD_40LD ) { 2359 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; 2360 } 2361 else { 2362 pdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2363 raid_inq.pdrv_info.pdrv_state; 2364 } 2365 2366 max_channels = adapter->product_info.nchannels; 2367 2368 if( channel >= max_channels ) { 2369 goto free_pci; 2370 } 2371 2372 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { 2373 2374 i = channel*16 + tgt; 2375 2376 state = *(pdrv_state + i); 2377 switch( state & 0x0F ) { 2378 case PDRV_ONLINE: 2379 seq_printf(m, "Channel:%2d Id:%2d State: Online", 2380 channel, tgt); 2381 break; 2382 2383 case PDRV_FAILED: 2384 seq_printf(m, "Channel:%2d Id:%2d State: Failed", 2385 channel, tgt); 2386 break; 2387 2388 case PDRV_RBLD: 2389 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", 2390 channel, tgt); 2391 break; 2392 2393 case PDRV_HOTSPARE: 2394 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", 2395 channel, tgt); 2396 break; 2397 2398 default: 2399 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", 2400 channel, tgt); 2401 break; 2402 } 2403 2404 /* 2405 * This interface displays inquiries for disk drives 2406 * only. Inquries for logical drives and non-disk 2407 * devices are available through /proc/scsi/scsi 2408 */ 2409 memset(scsi_inq, 0, 256); 2410 if( mega_internal_dev_inquiry(adapter, channel, tgt, 2411 scsi_inq_dma_handle) || 2412 (scsi_inq[0] & 0x1F) != TYPE_DISK ) { 2413 continue; 2414 } 2415 2416 /* 2417 * Check for overflow. We print less than 240 2418 * characters for inquiry 2419 */ 2420 seq_puts(m, ".\n"); 2421 mega_print_inquiry(m, scsi_inq); 2422 } 2423 2424 free_pci: 2425 pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle); 2426 free_inquiry: 2427 mega_free_inquiry(inquiry, dma_handle, pdev); 2428 free_pdev: 2429 free_local_pdev(pdev); 2430 return 0; 2431 } 2432 2433 /** 2434 * proc_show_pdrv_ch0() 2435 * @m: Synthetic file construction data 2436 * @v: File iterator 2437 * 2438 * Display information about the physical drives on physical channel 0. 2439 */ 2440 static int 2441 proc_show_pdrv_ch0(struct seq_file *m, void *v) 2442 { 2443 return proc_show_pdrv(m, m->private, 0); 2444 } 2445 2446 2447 /** 2448 * proc_show_pdrv_ch1() 2449 * @m: Synthetic file construction data 2450 * @v: File iterator 2451 * 2452 * Display information about the physical drives on physical channel 1. 2453 */ 2454 static int 2455 proc_show_pdrv_ch1(struct seq_file *m, void *v) 2456 { 2457 return proc_show_pdrv(m, m->private, 1); 2458 } 2459 2460 2461 /** 2462 * proc_show_pdrv_ch2() 2463 * @m: Synthetic file construction data 2464 * @v: File iterator 2465 * 2466 * Display information about the physical drives on physical channel 2. 2467 */ 2468 static int 2469 proc_show_pdrv_ch2(struct seq_file *m, void *v) 2470 { 2471 return proc_show_pdrv(m, m->private, 2); 2472 } 2473 2474 2475 /** 2476 * proc_show_pdrv_ch3() 2477 * @m: Synthetic file construction data 2478 * @v: File iterator 2479 * 2480 * Display information about the physical drives on physical channel 3. 2481 */ 2482 static int 2483 proc_show_pdrv_ch3(struct seq_file *m, void *v) 2484 { 2485 return proc_show_pdrv(m, m->private, 3); 2486 } 2487 2488 2489 /** 2490 * proc_show_rdrv() 2491 * @m: Synthetic file construction data 2492 * @adapter: pointer to our soft state 2493 * @start: starting logical drive to display 2494 * @end: ending logical drive to display 2495 * 2496 * We do not print the inquiry information since its already available through 2497 * /proc/scsi/scsi interface 2498 */ 2499 static int 2500 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) 2501 { 2502 dma_addr_t dma_handle; 2503 logdrv_param *lparam; 2504 megacmd_t mc; 2505 char *disk_array; 2506 dma_addr_t disk_array_dma_handle; 2507 caddr_t inquiry; 2508 struct pci_dev *pdev; 2509 u8 *rdrv_state; 2510 int num_ldrv; 2511 u32 array_sz; 2512 int i; 2513 2514 if( make_local_pdev(adapter, &pdev) != 0 ) 2515 return 0; 2516 2517 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2518 goto free_pdev; 2519 2520 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2521 seq_puts(m, "Adapter inquiry failed.\n"); 2522 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2523 goto free_inquiry; 2524 } 2525 2526 memset(&mc, 0, sizeof(megacmd_t)); 2527 2528 if( adapter->flag & BOARD_40LD ) { 2529 array_sz = sizeof(disk_array_40ld); 2530 2531 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; 2532 2533 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; 2534 } 2535 else { 2536 array_sz = sizeof(disk_array_8ld); 2537 2538 rdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2539 raid_inq.logdrv_info.ldrv_state; 2540 2541 num_ldrv = ((mraid_ext_inquiry *)inquiry)-> 2542 raid_inq.logdrv_info.num_ldrv; 2543 } 2544 2545 disk_array = pci_alloc_consistent(pdev, array_sz, 2546 &disk_array_dma_handle); 2547 2548 if( disk_array == NULL ) { 2549 seq_puts(m, "memory not available.\n"); 2550 goto free_inquiry; 2551 } 2552 2553 mc.xferaddr = (u32)disk_array_dma_handle; 2554 2555 if( adapter->flag & BOARD_40LD ) { 2556 mc.cmd = FC_NEW_CONFIG; 2557 mc.opcode = OP_DCMD_READ_CONFIG; 2558 2559 if( mega_internal_command(adapter, &mc, NULL) ) { 2560 seq_puts(m, "40LD read config failed.\n"); 2561 goto free_pci; 2562 } 2563 2564 } 2565 else { 2566 mc.cmd = NEW_READ_CONFIG_8LD; 2567 2568 if( mega_internal_command(adapter, &mc, NULL) ) { 2569 mc.cmd = READ_CONFIG_8LD; 2570 if( mega_internal_command(adapter, &mc, NULL) ) { 2571 seq_puts(m, "8LD read config failed.\n"); 2572 goto free_pci; 2573 } 2574 } 2575 } 2576 2577 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { 2578 2579 if( adapter->flag & BOARD_40LD ) { 2580 lparam = 2581 &((disk_array_40ld *)disk_array)->ldrv[i].lparam; 2582 } 2583 else { 2584 lparam = 2585 &((disk_array_8ld *)disk_array)->ldrv[i].lparam; 2586 } 2587 2588 /* 2589 * Check for overflow. We print less than 240 characters for 2590 * information about each logical drive. 2591 */ 2592 seq_printf(m, "Logical drive:%2d:, ", i); 2593 2594 switch( rdrv_state[i] & 0x0F ) { 2595 case RDRV_OFFLINE: 2596 seq_puts(m, "state: offline"); 2597 break; 2598 case RDRV_DEGRADED: 2599 seq_puts(m, "state: degraded"); 2600 break; 2601 case RDRV_OPTIMAL: 2602 seq_puts(m, "state: optimal"); 2603 break; 2604 case RDRV_DELETED: 2605 seq_puts(m, "state: deleted"); 2606 break; 2607 default: 2608 seq_puts(m, "state: unknown"); 2609 break; 2610 } 2611 2612 /* 2613 * Check if check consistency or initialization is going on 2614 * for this logical drive. 2615 */ 2616 if( (rdrv_state[i] & 0xF0) == 0x20 ) 2617 seq_puts(m, ", check-consistency in progress"); 2618 else if( (rdrv_state[i] & 0xF0) == 0x10 ) 2619 seq_puts(m, ", initialization in progress"); 2620 2621 seq_putc(m, '\n'); 2622 2623 seq_printf(m, "Span depth:%3d, ", lparam->span_depth); 2624 seq_printf(m, "RAID level:%3d, ", lparam->level); 2625 seq_printf(m, "Stripe size:%3d, ", 2626 lparam->stripe_sz ? lparam->stripe_sz/2: 128); 2627 seq_printf(m, "Row size:%3d\n", lparam->row_size); 2628 2629 seq_puts(m, "Read Policy: "); 2630 switch(lparam->read_ahead) { 2631 case NO_READ_AHEAD: 2632 seq_puts(m, "No read ahead, "); 2633 break; 2634 case READ_AHEAD: 2635 seq_puts(m, "Read ahead, "); 2636 break; 2637 case ADAP_READ_AHEAD: 2638 seq_puts(m, "Adaptive, "); 2639 break; 2640 2641 } 2642 2643 seq_puts(m, "Write Policy: "); 2644 switch(lparam->write_mode) { 2645 case WRMODE_WRITE_THRU: 2646 seq_puts(m, "Write thru, "); 2647 break; 2648 case WRMODE_WRITE_BACK: 2649 seq_puts(m, "Write back, "); 2650 break; 2651 } 2652 2653 seq_puts(m, "Cache Policy: "); 2654 switch(lparam->direct_io) { 2655 case CACHED_IO: 2656 seq_puts(m, "Cached IO\n\n"); 2657 break; 2658 case DIRECT_IO: 2659 seq_puts(m, "Direct IO\n\n"); 2660 break; 2661 } 2662 } 2663 2664 free_pci: 2665 pci_free_consistent(pdev, array_sz, disk_array, 2666 disk_array_dma_handle); 2667 free_inquiry: 2668 mega_free_inquiry(inquiry, dma_handle, pdev); 2669 free_pdev: 2670 free_local_pdev(pdev); 2671 return 0; 2672 } 2673 2674 /** 2675 * proc_show_rdrv_10() 2676 * @m: Synthetic file construction data 2677 * @v: File iterator 2678 * 2679 * Display real time information about the logical drives 0 through 9. 2680 */ 2681 static int 2682 proc_show_rdrv_10(struct seq_file *m, void *v) 2683 { 2684 return proc_show_rdrv(m, m->private, 0, 9); 2685 } 2686 2687 2688 /** 2689 * proc_show_rdrv_20() 2690 * @m: Synthetic file construction data 2691 * @v: File iterator 2692 * 2693 * Display real time information about the logical drives 0 through 9. 2694 */ 2695 static int 2696 proc_show_rdrv_20(struct seq_file *m, void *v) 2697 { 2698 return proc_show_rdrv(m, m->private, 10, 19); 2699 } 2700 2701 2702 /** 2703 * proc_show_rdrv_30() 2704 * @m: Synthetic file construction data 2705 * @v: File iterator 2706 * 2707 * Display real time information about the logical drives 0 through 9. 2708 */ 2709 static int 2710 proc_show_rdrv_30(struct seq_file *m, void *v) 2711 { 2712 return proc_show_rdrv(m, m->private, 20, 29); 2713 } 2714 2715 2716 /** 2717 * proc_show_rdrv_40() 2718 * @m: Synthetic file construction data 2719 * @v: File iterator 2720 * 2721 * Display real time information about the logical drives 0 through 9. 2722 */ 2723 static int 2724 proc_show_rdrv_40(struct seq_file *m, void *v) 2725 { 2726 return proc_show_rdrv(m, m->private, 30, 39); 2727 } 2728 2729 /** 2730 * mega_create_proc_entry() 2731 * @index: index in soft state array 2732 * @parent: parent node for this /proc entry 2733 * 2734 * Creates /proc entries for our controllers. 2735 */ 2736 static void 2737 mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2738 { 2739 adapter_t *adapter = hba_soft_state[index]; 2740 struct proc_dir_entry *dir; 2741 u8 string[16]; 2742 2743 sprintf(string, "hba%d", adapter->host->host_no); 2744 dir = proc_mkdir_data(string, 0, parent, adapter); 2745 if (!dir) { 2746 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); 2747 return; 2748 } 2749 2750 proc_create_single_data("config", S_IRUSR, dir, 2751 proc_show_config, adapter); 2752 proc_create_single_data("stat", S_IRUSR, dir, 2753 proc_show_stat, adapter); 2754 proc_create_single_data("mailbox", S_IRUSR, dir, 2755 proc_show_mbox, adapter); 2756 #if MEGA_HAVE_ENH_PROC 2757 proc_create_single_data("rebuild-rate", S_IRUSR, dir, 2758 proc_show_rebuild_rate, adapter); 2759 proc_create_single_data("battery-status", S_IRUSR, dir, 2760 proc_show_battery, adapter); 2761 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, 2762 proc_show_pdrv_ch0, adapter); 2763 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, 2764 proc_show_pdrv_ch1, adapter); 2765 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, 2766 proc_show_pdrv_ch2, adapter); 2767 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, 2768 proc_show_pdrv_ch3, adapter); 2769 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, 2770 proc_show_rdrv_10, adapter); 2771 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, 2772 proc_show_rdrv_20, adapter); 2773 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, 2774 proc_show_rdrv_30, adapter); 2775 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, 2776 proc_show_rdrv_40, adapter); 2777 #endif 2778 } 2779 2780 #else 2781 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2782 { 2783 } 2784 #endif 2785 2786 2787 /* 2788 * megaraid_biosparam() 2789 * 2790 * Return the disk geometry for a particular disk 2791 */ 2792 static int 2793 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, 2794 sector_t capacity, int geom[]) 2795 { 2796 adapter_t *adapter; 2797 int heads; 2798 int sectors; 2799 int cylinders; 2800 2801 /* Get pointer to host config structure */ 2802 adapter = (adapter_t *)sdev->host->hostdata; 2803 2804 if (IS_RAID_CH(adapter, sdev->channel)) { 2805 /* Default heads (64) & sectors (32) */ 2806 heads = 64; 2807 sectors = 32; 2808 cylinders = (ulong)capacity / (heads * sectors); 2809 2810 /* 2811 * Handle extended translation size for logical drives 2812 * > 1Gb 2813 */ 2814 if ((ulong)capacity >= 0x200000) { 2815 heads = 255; 2816 sectors = 63; 2817 cylinders = (ulong)capacity / (heads * sectors); 2818 } 2819 2820 /* return result */ 2821 geom[0] = heads; 2822 geom[1] = sectors; 2823 geom[2] = cylinders; 2824 } 2825 else { 2826 if (scsi_partsize(bdev, capacity, geom)) 2827 return 0; 2828 2829 dev_info(&adapter->dev->dev, 2830 "invalid partition on this disk on channel %d\n", 2831 sdev->channel); 2832 2833 /* Default heads (64) & sectors (32) */ 2834 heads = 64; 2835 sectors = 32; 2836 cylinders = (ulong)capacity / (heads * sectors); 2837 2838 /* Handle extended translation size for logical drives > 1Gb */ 2839 if ((ulong)capacity >= 0x200000) { 2840 heads = 255; 2841 sectors = 63; 2842 cylinders = (ulong)capacity / (heads * sectors); 2843 } 2844 2845 /* return result */ 2846 geom[0] = heads; 2847 geom[1] = sectors; 2848 geom[2] = cylinders; 2849 } 2850 2851 return 0; 2852 } 2853 2854 /** 2855 * mega_init_scb() 2856 * @adapter: pointer to our soft state 2857 * 2858 * Allocate memory for the various pointers in the scb structures: 2859 * scatter-gather list pointer, passthru and extended passthru structure 2860 * pointers. 2861 */ 2862 static int 2863 mega_init_scb(adapter_t *adapter) 2864 { 2865 scb_t *scb; 2866 int i; 2867 2868 for( i = 0; i < adapter->max_cmds; i++ ) { 2869 2870 scb = &adapter->scb_list[i]; 2871 2872 scb->sgl64 = NULL; 2873 scb->sgl = NULL; 2874 scb->pthru = NULL; 2875 scb->epthru = NULL; 2876 } 2877 2878 for( i = 0; i < adapter->max_cmds; i++ ) { 2879 2880 scb = &adapter->scb_list[i]; 2881 2882 scb->idx = i; 2883 2884 scb->sgl64 = pci_alloc_consistent(adapter->dev, 2885 sizeof(mega_sgl64) * adapter->sglen, 2886 &scb->sgl_dma_addr); 2887 2888 scb->sgl = (mega_sglist *)scb->sgl64; 2889 2890 if( !scb->sgl ) { 2891 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); 2892 mega_free_sgl(adapter); 2893 return -1; 2894 } 2895 2896 scb->pthru = pci_alloc_consistent(adapter->dev, 2897 sizeof(mega_passthru), 2898 &scb->pthru_dma_addr); 2899 2900 if( !scb->pthru ) { 2901 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); 2902 mega_free_sgl(adapter); 2903 return -1; 2904 } 2905 2906 scb->epthru = pci_alloc_consistent(adapter->dev, 2907 sizeof(mega_ext_passthru), 2908 &scb->epthru_dma_addr); 2909 2910 if( !scb->epthru ) { 2911 dev_warn(&adapter->dev->dev, 2912 "Can't allocate extended passthru\n"); 2913 mega_free_sgl(adapter); 2914 return -1; 2915 } 2916 2917 2918 scb->dma_type = MEGA_DMA_TYPE_NONE; 2919 2920 /* 2921 * Link to free list 2922 * lock not required since we are loading the driver, so no 2923 * commands possible right now. 2924 */ 2925 scb->state = SCB_FREE; 2926 scb->cmd = NULL; 2927 list_add(&scb->list, &adapter->free_list); 2928 } 2929 2930 return 0; 2931 } 2932 2933 2934 /** 2935 * megadev_open() 2936 * @inode: unused 2937 * @filep: unused 2938 * 2939 * Routines for the character/ioctl interface to the driver. Find out if this 2940 * is a valid open. 2941 */ 2942 static int 2943 megadev_open (struct inode *inode, struct file *filep) 2944 { 2945 /* 2946 * Only allow superuser to access private ioctl interface 2947 */ 2948 if( !capable(CAP_SYS_ADMIN) ) return -EACCES; 2949 2950 return 0; 2951 } 2952 2953 2954 /** 2955 * megadev_ioctl() 2956 * @filep: Our device file 2957 * @cmd: ioctl command 2958 * @arg: user buffer 2959 * 2960 * ioctl entry point for our private ioctl interface. We move the data in from 2961 * the user space, prepare the command (if necessary, convert the old MIMD 2962 * ioctl to new ioctl command), and issue a synchronous command to the 2963 * controller. 2964 */ 2965 static int 2966 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2967 { 2968 adapter_t *adapter; 2969 nitioctl_t uioc; 2970 int adapno; 2971 int rval; 2972 mega_passthru __user *upthru; /* user address for passthru */ 2973 mega_passthru *pthru; /* copy user passthru here */ 2974 dma_addr_t pthru_dma_hndl; 2975 void *data = NULL; /* data to be transferred */ 2976 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ 2977 megacmd_t mc; 2978 #if MEGA_HAVE_STATS 2979 megastat_t __user *ustats = NULL; 2980 int num_ldrv = 0; 2981 #endif 2982 u32 uxferaddr = 0; 2983 struct pci_dev *pdev; 2984 2985 /* 2986 * Make sure only USCSICMD are issued through this interface. 2987 * MIMD application would still fire different command. 2988 */ 2989 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { 2990 return -EINVAL; 2991 } 2992 2993 /* 2994 * Check and convert a possible MIMD command to NIT command. 2995 * mega_m_to_n() copies the data from the user space, so we do not 2996 * have to do it here. 2997 * NOTE: We will need some user address to copyout the data, therefore 2998 * the inteface layer will also provide us with the required user 2999 * addresses. 3000 */ 3001 memset(&uioc, 0, sizeof(nitioctl_t)); 3002 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) 3003 return rval; 3004 3005 3006 switch( uioc.opcode ) { 3007 3008 case GET_DRIVER_VER: 3009 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) 3010 return (-EFAULT); 3011 3012 break; 3013 3014 case GET_N_ADAP: 3015 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) 3016 return (-EFAULT); 3017 3018 /* 3019 * Shucks. MIMD interface returns a positive value for number 3020 * of adapters. TODO: Change it to return 0 when there is no 3021 * applicatio using mimd interface. 3022 */ 3023 return hba_count; 3024 3025 case GET_ADAP_INFO: 3026 3027 /* 3028 * Which adapter 3029 */ 3030 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3031 return (-ENODEV); 3032 3033 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, 3034 sizeof(struct mcontroller)) ) 3035 return (-EFAULT); 3036 break; 3037 3038 #if MEGA_HAVE_STATS 3039 3040 case GET_STATS: 3041 /* 3042 * Which adapter 3043 */ 3044 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3045 return (-ENODEV); 3046 3047 adapter = hba_soft_state[adapno]; 3048 3049 ustats = uioc.uioc_uaddr; 3050 3051 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) 3052 return (-EFAULT); 3053 3054 /* 3055 * Check for the validity of the logical drive number 3056 */ 3057 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; 3058 3059 if( copy_to_user(ustats->nreads, adapter->nreads, 3060 num_ldrv*sizeof(u32)) ) 3061 return -EFAULT; 3062 3063 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, 3064 num_ldrv*sizeof(u32)) ) 3065 return -EFAULT; 3066 3067 if( copy_to_user(ustats->nwrites, adapter->nwrites, 3068 num_ldrv*sizeof(u32)) ) 3069 return -EFAULT; 3070 3071 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, 3072 num_ldrv*sizeof(u32)) ) 3073 return -EFAULT; 3074 3075 if( copy_to_user(ustats->rd_errors, adapter->rd_errors, 3076 num_ldrv*sizeof(u32)) ) 3077 return -EFAULT; 3078 3079 if( copy_to_user(ustats->wr_errors, adapter->wr_errors, 3080 num_ldrv*sizeof(u32)) ) 3081 return -EFAULT; 3082 3083 return 0; 3084 3085 #endif 3086 case MBOX_CMD: 3087 3088 /* 3089 * Which adapter 3090 */ 3091 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3092 return (-ENODEV); 3093 3094 adapter = hba_soft_state[adapno]; 3095 3096 /* 3097 * Deletion of logical drive is a special case. The adapter 3098 * should be quiescent before this command is issued. 3099 */ 3100 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && 3101 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { 3102 3103 /* 3104 * Do we support this feature 3105 */ 3106 if( !adapter->support_random_del ) { 3107 dev_warn(&adapter->dev->dev, "logdrv " 3108 "delete on non-supporting F/W\n"); 3109 3110 return (-EINVAL); 3111 } 3112 3113 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); 3114 3115 if( rval == 0 ) { 3116 memset(&mc, 0, sizeof(megacmd_t)); 3117 3118 mc.status = rval; 3119 3120 rval = mega_n_to_m((void __user *)arg, &mc); 3121 } 3122 3123 return rval; 3124 } 3125 /* 3126 * This interface only support the regular passthru commands. 3127 * Reject extended passthru and 64-bit passthru 3128 */ 3129 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3130 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3131 3132 dev_warn(&adapter->dev->dev, "rejected passthru\n"); 3133 3134 return (-EINVAL); 3135 } 3136 3137 /* 3138 * For all internal commands, the buffer must be allocated in 3139 * <4GB address range 3140 */ 3141 if( make_local_pdev(adapter, &pdev) != 0 ) 3142 return -EIO; 3143 3144 /* Is it a passthru command or a DCMD */ 3145 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { 3146 /* Passthru commands */ 3147 3148 pthru = pci_alloc_consistent(pdev, 3149 sizeof(mega_passthru), 3150 &pthru_dma_hndl); 3151 3152 if( pthru == NULL ) { 3153 free_local_pdev(pdev); 3154 return (-ENOMEM); 3155 } 3156 3157 /* 3158 * The user passthru structure 3159 */ 3160 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; 3161 3162 /* 3163 * Copy in the user passthru here. 3164 */ 3165 if( copy_from_user(pthru, upthru, 3166 sizeof(mega_passthru)) ) { 3167 3168 pci_free_consistent(pdev, 3169 sizeof(mega_passthru), pthru, 3170 pthru_dma_hndl); 3171 3172 free_local_pdev(pdev); 3173 3174 return (-EFAULT); 3175 } 3176 3177 /* 3178 * Is there a data transfer 3179 */ 3180 if( pthru->dataxferlen ) { 3181 data = pci_alloc_consistent(pdev, 3182 pthru->dataxferlen, 3183 &data_dma_hndl); 3184 3185 if( data == NULL ) { 3186 pci_free_consistent(pdev, 3187 sizeof(mega_passthru), 3188 pthru, 3189 pthru_dma_hndl); 3190 3191 free_local_pdev(pdev); 3192 3193 return (-ENOMEM); 3194 } 3195 3196 /* 3197 * Save the user address and point the kernel 3198 * address at just allocated memory 3199 */ 3200 uxferaddr = pthru->dataxferaddr; 3201 pthru->dataxferaddr = data_dma_hndl; 3202 } 3203 3204 3205 /* 3206 * Is data coming down-stream 3207 */ 3208 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { 3209 /* 3210 * Get the user data 3211 */ 3212 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3213 pthru->dataxferlen) ) { 3214 rval = (-EFAULT); 3215 goto freemem_and_return; 3216 } 3217 } 3218 3219 memset(&mc, 0, sizeof(megacmd_t)); 3220 3221 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 3222 mc.xferaddr = (u32)pthru_dma_hndl; 3223 3224 /* 3225 * Issue the command 3226 */ 3227 mega_internal_command(adapter, &mc, pthru); 3228 3229 rval = mega_n_to_m((void __user *)arg, &mc); 3230 3231 if( rval ) goto freemem_and_return; 3232 3233 3234 /* 3235 * Is data going up-stream 3236 */ 3237 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3238 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3239 pthru->dataxferlen) ) { 3240 rval = (-EFAULT); 3241 } 3242 } 3243 3244 /* 3245 * Send the request sense data also, irrespective of 3246 * whether the user has asked for it or not. 3247 */ 3248 if (copy_to_user(upthru->reqsensearea, 3249 pthru->reqsensearea, 14)) 3250 rval = -EFAULT; 3251 3252 freemem_and_return: 3253 if( pthru->dataxferlen ) { 3254 pci_free_consistent(pdev, 3255 pthru->dataxferlen, data, 3256 data_dma_hndl); 3257 } 3258 3259 pci_free_consistent(pdev, sizeof(mega_passthru), 3260 pthru, pthru_dma_hndl); 3261 3262 free_local_pdev(pdev); 3263 3264 return rval; 3265 } 3266 else { 3267 /* DCMD commands */ 3268 3269 /* 3270 * Is there a data transfer 3271 */ 3272 if( uioc.xferlen ) { 3273 data = pci_alloc_consistent(pdev, 3274 uioc.xferlen, &data_dma_hndl); 3275 3276 if( data == NULL ) { 3277 free_local_pdev(pdev); 3278 return (-ENOMEM); 3279 } 3280 3281 uxferaddr = MBOX(uioc)->xferaddr; 3282 } 3283 3284 /* 3285 * Is data coming down-stream 3286 */ 3287 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { 3288 /* 3289 * Get the user data 3290 */ 3291 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3292 uioc.xferlen) ) { 3293 3294 pci_free_consistent(pdev, 3295 uioc.xferlen, 3296 data, data_dma_hndl); 3297 3298 free_local_pdev(pdev); 3299 3300 return (-EFAULT); 3301 } 3302 } 3303 3304 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); 3305 3306 mc.xferaddr = (u32)data_dma_hndl; 3307 3308 /* 3309 * Issue the command 3310 */ 3311 mega_internal_command(adapter, &mc, NULL); 3312 3313 rval = mega_n_to_m((void __user *)arg, &mc); 3314 3315 if( rval ) { 3316 if( uioc.xferlen ) { 3317 pci_free_consistent(pdev, 3318 uioc.xferlen, data, 3319 data_dma_hndl); 3320 } 3321 3322 free_local_pdev(pdev); 3323 3324 return rval; 3325 } 3326 3327 /* 3328 * Is data going up-stream 3329 */ 3330 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3331 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3332 uioc.xferlen) ) { 3333 3334 rval = (-EFAULT); 3335 } 3336 } 3337 3338 if( uioc.xferlen ) { 3339 pci_free_consistent(pdev, 3340 uioc.xferlen, data, 3341 data_dma_hndl); 3342 } 3343 3344 free_local_pdev(pdev); 3345 3346 return rval; 3347 } 3348 3349 default: 3350 return (-EINVAL); 3351 } 3352 3353 return 0; 3354 } 3355 3356 static long 3357 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3358 { 3359 int ret; 3360 3361 mutex_lock(&megadev_mutex); 3362 ret = megadev_ioctl(filep, cmd, arg); 3363 mutex_unlock(&megadev_mutex); 3364 3365 return ret; 3366 } 3367 3368 /** 3369 * mega_m_to_n() 3370 * @arg: user address 3371 * @uioc: new ioctl structure 3372 * 3373 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl 3374 * structure 3375 * 3376 * Converts the older mimd ioctl structure to newer NIT structure 3377 */ 3378 static int 3379 mega_m_to_n(void __user *arg, nitioctl_t *uioc) 3380 { 3381 struct uioctl_t uioc_mimd; 3382 char signature[8] = {0}; 3383 u8 opcode; 3384 u8 subopcode; 3385 3386 3387 /* 3388 * check is the application conforms to NIT. We do not have to do much 3389 * in that case. 3390 * We exploit the fact that the signature is stored in the very 3391 * beginning of the structure. 3392 */ 3393 3394 if( copy_from_user(signature, arg, 7) ) 3395 return (-EFAULT); 3396 3397 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3398 3399 /* 3400 * NOTE NOTE: The nit ioctl is still under flux because of 3401 * change of mailbox definition, in HPE. No applications yet 3402 * use this interface and let's not have applications use this 3403 * interface till the new specifitions are in place. 3404 */ 3405 return -EINVAL; 3406 #if 0 3407 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) 3408 return (-EFAULT); 3409 return 0; 3410 #endif 3411 } 3412 3413 /* 3414 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t 3415 * 3416 * Get the user ioctl structure 3417 */ 3418 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) 3419 return (-EFAULT); 3420 3421 3422 /* 3423 * Get the opcode and subopcode for the commands 3424 */ 3425 opcode = uioc_mimd.ui.fcs.opcode; 3426 subopcode = uioc_mimd.ui.fcs.subopcode; 3427 3428 switch (opcode) { 3429 case 0x82: 3430 3431 switch (subopcode) { 3432 3433 case MEGAIOC_QDRVRVER: /* Query driver version */ 3434 uioc->opcode = GET_DRIVER_VER; 3435 uioc->uioc_uaddr = uioc_mimd.data; 3436 break; 3437 3438 case MEGAIOC_QNADAP: /* Get # of adapters */ 3439 uioc->opcode = GET_N_ADAP; 3440 uioc->uioc_uaddr = uioc_mimd.data; 3441 break; 3442 3443 case MEGAIOC_QADAPINFO: /* Get adapter information */ 3444 uioc->opcode = GET_ADAP_INFO; 3445 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3446 uioc->uioc_uaddr = uioc_mimd.data; 3447 break; 3448 3449 default: 3450 return(-EINVAL); 3451 } 3452 3453 break; 3454 3455 3456 case 0x81: 3457 3458 uioc->opcode = MBOX_CMD; 3459 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3460 3461 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3462 3463 uioc->xferlen = uioc_mimd.ui.fcs.length; 3464 3465 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3466 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3467 3468 break; 3469 3470 case 0x80: 3471 3472 uioc->opcode = MBOX_CMD; 3473 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3474 3475 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3476 3477 /* 3478 * Choose the xferlen bigger of input and output data 3479 */ 3480 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? 3481 uioc_mimd.outlen : uioc_mimd.inlen; 3482 3483 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3484 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3485 3486 break; 3487 3488 default: 3489 return (-EINVAL); 3490 3491 } 3492 3493 return 0; 3494 } 3495 3496 /* 3497 * mega_n_to_m() 3498 * @arg: user address 3499 * @mc: mailbox command 3500 * 3501 * Updates the status information to the application, depending on application 3502 * conforms to older mimd ioctl interface or newer NIT ioctl interface 3503 */ 3504 static int 3505 mega_n_to_m(void __user *arg, megacmd_t *mc) 3506 { 3507 nitioctl_t __user *uiocp; 3508 megacmd_t __user *umc; 3509 mega_passthru __user *upthru; 3510 struct uioctl_t __user *uioc_mimd; 3511 char signature[8] = {0}; 3512 3513 /* 3514 * check is the application conforms to NIT. 3515 */ 3516 if( copy_from_user(signature, arg, 7) ) 3517 return -EFAULT; 3518 3519 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3520 3521 uiocp = arg; 3522 3523 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) 3524 return (-EFAULT); 3525 3526 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3527 3528 umc = MBOX_P(uiocp); 3529 3530 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3531 return -EFAULT; 3532 3533 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) 3534 return (-EFAULT); 3535 } 3536 } 3537 else { 3538 uioc_mimd = arg; 3539 3540 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) 3541 return (-EFAULT); 3542 3543 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3544 3545 umc = (megacmd_t __user *)uioc_mimd->mbox; 3546 3547 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3548 return (-EFAULT); 3549 3550 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) 3551 return (-EFAULT); 3552 } 3553 } 3554 3555 return 0; 3556 } 3557 3558 3559 /* 3560 * MEGARAID 'FW' commands. 3561 */ 3562 3563 /** 3564 * mega_is_bios_enabled() 3565 * @adapter: pointer to our soft state 3566 * 3567 * issue command to find out if the BIOS is enabled for this controller 3568 */ 3569 static int 3570 mega_is_bios_enabled(adapter_t *adapter) 3571 { 3572 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3573 mbox_t *mbox; 3574 3575 mbox = (mbox_t *)raw_mbox; 3576 3577 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3578 3579 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3580 3581 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3582 3583 raw_mbox[0] = IS_BIOS_ENABLED; 3584 raw_mbox[2] = GET_BIOS; 3585 3586 issue_scb_block(adapter, raw_mbox); 3587 3588 return *(char *)adapter->mega_buffer; 3589 } 3590 3591 3592 /** 3593 * mega_enum_raid_scsi() 3594 * @adapter: pointer to our soft state 3595 * 3596 * Find out what channels are RAID/SCSI. This information is used to 3597 * differentiate the virtual channels and physical channels and to support 3598 * ROMB feature and non-disk devices. 3599 */ 3600 static void 3601 mega_enum_raid_scsi(adapter_t *adapter) 3602 { 3603 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3604 mbox_t *mbox; 3605 int i; 3606 3607 mbox = (mbox_t *)raw_mbox; 3608 3609 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3610 3611 /* 3612 * issue command to find out what channels are raid/scsi 3613 */ 3614 raw_mbox[0] = CHNL_CLASS; 3615 raw_mbox[2] = GET_CHNL_CLASS; 3616 3617 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3618 3619 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3620 3621 /* 3622 * Non-ROMB firmware fail this command, so all channels 3623 * must be shown RAID 3624 */ 3625 adapter->mega_ch_class = 0xFF; 3626 3627 if(!issue_scb_block(adapter, raw_mbox)) { 3628 adapter->mega_ch_class = *((char *)adapter->mega_buffer); 3629 3630 } 3631 3632 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3633 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3634 dev_info(&adapter->dev->dev, "channel[%d] is raid\n", 3635 i); 3636 } 3637 else { 3638 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", 3639 i); 3640 } 3641 } 3642 3643 return; 3644 } 3645 3646 3647 /** 3648 * mega_get_boot_drv() 3649 * @adapter: pointer to our soft state 3650 * 3651 * Find out which device is the boot device. Note, any logical drive or any 3652 * phyical device (e.g., a CDROM) can be designated as a boot device. 3653 */ 3654 static void 3655 mega_get_boot_drv(adapter_t *adapter) 3656 { 3657 struct private_bios_data *prv_bios_data; 3658 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3659 mbox_t *mbox; 3660 u16 cksum = 0; 3661 u8 *cksum_p; 3662 u8 boot_pdrv; 3663 int i; 3664 3665 mbox = (mbox_t *)raw_mbox; 3666 3667 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3668 3669 raw_mbox[0] = BIOS_PVT_DATA; 3670 raw_mbox[2] = GET_BIOS_PVT_DATA; 3671 3672 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3673 3674 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3675 3676 adapter->boot_ldrv_enabled = 0; 3677 adapter->boot_ldrv = 0; 3678 3679 adapter->boot_pdrv_enabled = 0; 3680 adapter->boot_pdrv_ch = 0; 3681 adapter->boot_pdrv_tgt = 0; 3682 3683 if(issue_scb_block(adapter, raw_mbox) == 0) { 3684 prv_bios_data = 3685 (struct private_bios_data *)adapter->mega_buffer; 3686 3687 cksum = 0; 3688 cksum_p = (char *)prv_bios_data; 3689 for (i = 0; i < 14; i++ ) { 3690 cksum += (u16)(*cksum_p++); 3691 } 3692 3693 if (prv_bios_data->cksum == (u16)(0-cksum) ) { 3694 3695 /* 3696 * If MSB is set, a physical drive is set as boot 3697 * device 3698 */ 3699 if( prv_bios_data->boot_drv & 0x80 ) { 3700 adapter->boot_pdrv_enabled = 1; 3701 boot_pdrv = prv_bios_data->boot_drv & 0x7F; 3702 adapter->boot_pdrv_ch = boot_pdrv / 16; 3703 adapter->boot_pdrv_tgt = boot_pdrv % 16; 3704 } 3705 else { 3706 adapter->boot_ldrv_enabled = 1; 3707 adapter->boot_ldrv = prv_bios_data->boot_drv; 3708 } 3709 } 3710 } 3711 3712 } 3713 3714 /** 3715 * mega_support_random_del() 3716 * @adapter: pointer to our soft state 3717 * 3718 * Find out if this controller supports random deletion and addition of 3719 * logical drives 3720 */ 3721 static int 3722 mega_support_random_del(adapter_t *adapter) 3723 { 3724 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3725 mbox_t *mbox; 3726 int rval; 3727 3728 mbox = (mbox_t *)raw_mbox; 3729 3730 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3731 3732 /* 3733 * issue command 3734 */ 3735 raw_mbox[0] = FC_DEL_LOGDRV; 3736 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3737 3738 rval = issue_scb_block(adapter, raw_mbox); 3739 3740 return !rval; 3741 } 3742 3743 3744 /** 3745 * mega_support_ext_cdb() 3746 * @adapter: pointer to our soft state 3747 * 3748 * Find out if this firmware support cdblen > 10 3749 */ 3750 static int 3751 mega_support_ext_cdb(adapter_t *adapter) 3752 { 3753 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3754 mbox_t *mbox; 3755 int rval; 3756 3757 mbox = (mbox_t *)raw_mbox; 3758 3759 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3760 /* 3761 * issue command to find out if controller supports extended CDBs. 3762 */ 3763 raw_mbox[0] = 0xA4; 3764 raw_mbox[2] = 0x16; 3765 3766 rval = issue_scb_block(adapter, raw_mbox); 3767 3768 return !rval; 3769 } 3770 3771 3772 /** 3773 * mega_del_logdrv() 3774 * @adapter: pointer to our soft state 3775 * @logdrv: logical drive to be deleted 3776 * 3777 * Delete the specified logical drive. It is the responsibility of the user 3778 * app to let the OS know about this operation. 3779 */ 3780 static int 3781 mega_del_logdrv(adapter_t *adapter, int logdrv) 3782 { 3783 unsigned long flags; 3784 scb_t *scb; 3785 int rval; 3786 3787 /* 3788 * Stop sending commands to the controller, queue them internally. 3789 * When deletion is complete, ISR will flush the queue. 3790 */ 3791 atomic_set(&adapter->quiescent, 1); 3792 3793 /* 3794 * Wait till all the issued commands are complete and there are no 3795 * commands in the pending queue 3796 */ 3797 while (atomic_read(&adapter->pend_cmds) > 0 || 3798 !list_empty(&adapter->pending_list)) 3799 msleep(1000); /* sleep for 1s */ 3800 3801 rval = mega_do_del_logdrv(adapter, logdrv); 3802 3803 spin_lock_irqsave(&adapter->lock, flags); 3804 3805 /* 3806 * If delete operation was successful, add 0x80 to the logical drive 3807 * ids for commands in the pending queue. 3808 */ 3809 if (adapter->read_ldidmap) { 3810 struct list_head *pos; 3811 list_for_each(pos, &adapter->pending_list) { 3812 scb = list_entry(pos, scb_t, list); 3813 if (scb->pthru->logdrv < 0x80 ) 3814 scb->pthru->logdrv += 0x80; 3815 } 3816 } 3817 3818 atomic_set(&adapter->quiescent, 0); 3819 3820 mega_runpendq(adapter); 3821 3822 spin_unlock_irqrestore(&adapter->lock, flags); 3823 3824 return rval; 3825 } 3826 3827 3828 static int 3829 mega_do_del_logdrv(adapter_t *adapter, int logdrv) 3830 { 3831 megacmd_t mc; 3832 int rval; 3833 3834 memset( &mc, 0, sizeof(megacmd_t)); 3835 3836 mc.cmd = FC_DEL_LOGDRV; 3837 mc.opcode = OP_DEL_LOGDRV; 3838 mc.subopcode = logdrv; 3839 3840 rval = mega_internal_command(adapter, &mc, NULL); 3841 3842 /* log this event */ 3843 if(rval) { 3844 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); 3845 return rval; 3846 } 3847 3848 /* 3849 * After deleting first logical drive, the logical drives must be 3850 * addressed by adding 0x80 to the logical drive id. 3851 */ 3852 adapter->read_ldidmap = 1; 3853 3854 return rval; 3855 } 3856 3857 3858 /** 3859 * mega_get_max_sgl() 3860 * @adapter: pointer to our soft state 3861 * 3862 * Find out the maximum number of scatter-gather elements supported by this 3863 * version of the firmware 3864 */ 3865 static void 3866 mega_get_max_sgl(adapter_t *adapter) 3867 { 3868 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3869 mbox_t *mbox; 3870 3871 mbox = (mbox_t *)raw_mbox; 3872 3873 memset(mbox, 0, sizeof(raw_mbox)); 3874 3875 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3876 3877 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3878 3879 raw_mbox[0] = MAIN_MISC_OPCODE; 3880 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3881 3882 3883 if( issue_scb_block(adapter, raw_mbox) ) { 3884 /* 3885 * f/w does not support this command. Choose the default value 3886 */ 3887 adapter->sglen = MIN_SGLIST; 3888 } 3889 else { 3890 adapter->sglen = *((char *)adapter->mega_buffer); 3891 3892 /* 3893 * Make sure this is not more than the resources we are 3894 * planning to allocate 3895 */ 3896 if ( adapter->sglen > MAX_SGLIST ) 3897 adapter->sglen = MAX_SGLIST; 3898 } 3899 3900 return; 3901 } 3902 3903 3904 /** 3905 * mega_support_cluster() 3906 * @adapter: pointer to our soft state 3907 * 3908 * Find out if this firmware support cluster calls. 3909 */ 3910 static int 3911 mega_support_cluster(adapter_t *adapter) 3912 { 3913 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3914 mbox_t *mbox; 3915 3916 mbox = (mbox_t *)raw_mbox; 3917 3918 memset(mbox, 0, sizeof(raw_mbox)); 3919 3920 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3921 3922 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3923 3924 /* 3925 * Try to get the initiator id. This command will succeed iff the 3926 * clustering is available on this HBA. 3927 */ 3928 raw_mbox[0] = MEGA_GET_TARGET_ID; 3929 3930 if( issue_scb_block(adapter, raw_mbox) == 0 ) { 3931 3932 /* 3933 * Cluster support available. Get the initiator target id. 3934 * Tell our id to mid-layer too. 3935 */ 3936 adapter->this_id = *(u32 *)adapter->mega_buffer; 3937 adapter->host->this_id = adapter->this_id; 3938 3939 return 1; 3940 } 3941 3942 return 0; 3943 } 3944 3945 #ifdef CONFIG_PROC_FS 3946 /** 3947 * mega_adapinq() 3948 * @adapter: pointer to our soft state 3949 * @dma_handle: DMA address of the buffer 3950 * 3951 * Issue internal commands while interrupts are available. 3952 * We only issue direct mailbox commands from within the driver. ioctl() 3953 * interface using these routines can issue passthru commands. 3954 */ 3955 static int 3956 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) 3957 { 3958 megacmd_t mc; 3959 3960 memset(&mc, 0, sizeof(megacmd_t)); 3961 3962 if( adapter->flag & BOARD_40LD ) { 3963 mc.cmd = FC_NEW_CONFIG; 3964 mc.opcode = NC_SUBOP_ENQUIRY3; 3965 mc.subopcode = ENQ3_GET_SOLICITED_FULL; 3966 } 3967 else { 3968 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; 3969 } 3970 3971 mc.xferaddr = (u32)dma_handle; 3972 3973 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { 3974 return -1; 3975 } 3976 3977 return 0; 3978 } 3979 3980 3981 /** 3982 * mega_internal_dev_inquiry() 3983 * @adapter: pointer to our soft state 3984 * @ch: channel for this device 3985 * @tgt: ID of this device 3986 * @buf_dma_handle: DMA address of the buffer 3987 * 3988 * Issue the scsi inquiry for the specified device. 3989 */ 3990 static int 3991 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, 3992 dma_addr_t buf_dma_handle) 3993 { 3994 mega_passthru *pthru; 3995 dma_addr_t pthru_dma_handle; 3996 megacmd_t mc; 3997 int rval; 3998 struct pci_dev *pdev; 3999 4000 4001 /* 4002 * For all internal commands, the buffer must be allocated in <4GB 4003 * address range 4004 */ 4005 if( make_local_pdev(adapter, &pdev) != 0 ) return -1; 4006 4007 pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru), 4008 &pthru_dma_handle); 4009 4010 if( pthru == NULL ) { 4011 free_local_pdev(pdev); 4012 return -1; 4013 } 4014 4015 pthru->timeout = 2; 4016 pthru->ars = 1; 4017 pthru->reqsenselen = 14; 4018 pthru->islogical = 0; 4019 4020 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; 4021 4022 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; 4023 4024 pthru->cdblen = 6; 4025 4026 pthru->cdb[0] = INQUIRY; 4027 pthru->cdb[1] = 0; 4028 pthru->cdb[2] = 0; 4029 pthru->cdb[3] = 0; 4030 pthru->cdb[4] = 255; 4031 pthru->cdb[5] = 0; 4032 4033 4034 pthru->dataxferaddr = (u32)buf_dma_handle; 4035 pthru->dataxferlen = 256; 4036 4037 memset(&mc, 0, sizeof(megacmd_t)); 4038 4039 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 4040 mc.xferaddr = (u32)pthru_dma_handle; 4041 4042 rval = mega_internal_command(adapter, &mc, pthru); 4043 4044 pci_free_consistent(pdev, sizeof(mega_passthru), pthru, 4045 pthru_dma_handle); 4046 4047 free_local_pdev(pdev); 4048 4049 return rval; 4050 } 4051 #endif 4052 4053 /** 4054 * mega_internal_command() 4055 * @adapter: pointer to our soft state 4056 * @mc: the mailbox command 4057 * @pthru: Passthru structure for DCDB commands 4058 * 4059 * Issue the internal commands in interrupt mode. 4060 * The last argument is the address of the passthru structure if the command 4061 * to be fired is a passthru command 4062 * 4063 * Note: parameter 'pthru' is null for non-passthru commands. 4064 */ 4065 static int 4066 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4067 { 4068 unsigned long flags; 4069 scb_t *scb; 4070 int rval; 4071 4072 /* 4073 * The internal commands share one command id and hence are 4074 * serialized. This is so because we want to reserve maximum number of 4075 * available command ids for the I/O commands. 4076 */ 4077 mutex_lock(&adapter->int_mtx); 4078 4079 scb = &adapter->int_scb; 4080 memset(scb, 0, sizeof(scb_t)); 4081 4082 scb->idx = CMDID_INT_CMDS; 4083 scb->state |= SCB_ACTIVE | SCB_PENDQ; 4084 4085 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4086 4087 /* 4088 * Is it a passthru command 4089 */ 4090 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) 4091 scb->pthru = pthru; 4092 4093 spin_lock_irqsave(&adapter->lock, flags); 4094 list_add_tail(&scb->list, &adapter->pending_list); 4095 /* 4096 * Check if the HBA is in quiescent state, e.g., during a 4097 * delete logical drive opertion. If it is, don't run 4098 * the pending_list. 4099 */ 4100 if (atomic_read(&adapter->quiescent) == 0) 4101 mega_runpendq(adapter); 4102 spin_unlock_irqrestore(&adapter->lock, flags); 4103 4104 wait_for_completion(&adapter->int_waitq); 4105 4106 mc->status = rval = adapter->int_status; 4107 4108 /* 4109 * Print a debug message for all failed commands. Applications can use 4110 * this information. 4111 */ 4112 if (rval && trace_level) { 4113 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", 4114 mc->cmd, mc->opcode, mc->subopcode, rval); 4115 } 4116 4117 mutex_unlock(&adapter->int_mtx); 4118 return rval; 4119 } 4120 4121 static struct scsi_host_template megaraid_template = { 4122 .module = THIS_MODULE, 4123 .name = "MegaRAID", 4124 .proc_name = "megaraid_legacy", 4125 .info = megaraid_info, 4126 .queuecommand = megaraid_queue, 4127 .bios_param = megaraid_biosparam, 4128 .max_sectors = MAX_SECTORS_PER_IO, 4129 .can_queue = MAX_COMMANDS, 4130 .this_id = DEFAULT_INITIATOR_ID, 4131 .sg_tablesize = MAX_SGLIST, 4132 .cmd_per_lun = DEF_CMD_PER_LUN, 4133 .eh_abort_handler = megaraid_abort, 4134 .eh_device_reset_handler = megaraid_reset, 4135 .eh_bus_reset_handler = megaraid_reset, 4136 .eh_host_reset_handler = megaraid_reset, 4137 .no_write_same = 1, 4138 }; 4139 4140 static int 4141 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4142 { 4143 struct Scsi_Host *host; 4144 adapter_t *adapter; 4145 unsigned long mega_baseport, tbase, flag = 0; 4146 u16 subsysid, subsysvid; 4147 u8 pci_bus, pci_dev_func; 4148 int irq, i, j; 4149 int error = -ENODEV; 4150 4151 if (hba_count >= MAX_CONTROLLERS) 4152 goto out; 4153 4154 if (pci_enable_device(pdev)) 4155 goto out; 4156 pci_set_master(pdev); 4157 4158 pci_bus = pdev->bus->number; 4159 pci_dev_func = pdev->devfn; 4160 4161 /* 4162 * The megaraid3 stuff reports the ID of the Intel part which is not 4163 * remotely specific to the megaraid 4164 */ 4165 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 4166 u16 magic; 4167 /* 4168 * Don't fall over the Compaq management cards using the same 4169 * PCI identifier 4170 */ 4171 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && 4172 pdev->subsystem_device == 0xC000) 4173 goto out_disable_device; 4174 /* Now check the magic signature byte */ 4175 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); 4176 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) 4177 goto out_disable_device; 4178 /* Ok it is probably a megaraid */ 4179 } 4180 4181 /* 4182 * For these vendor and device ids, signature offsets are not 4183 * valid and 64 bit is implicit 4184 */ 4185 if (id->driver_data & BOARD_64BIT) 4186 flag |= BOARD_64BIT; 4187 else { 4188 u32 magic64; 4189 4190 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); 4191 if (magic64 == HBA_SIGNATURE_64BIT) 4192 flag |= BOARD_64BIT; 4193 } 4194 4195 subsysvid = pdev->subsystem_vendor; 4196 subsysid = pdev->subsystem_device; 4197 4198 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", 4199 id->vendor, id->device); 4200 4201 /* Read the base port and IRQ from PCI */ 4202 mega_baseport = pci_resource_start(pdev, 0); 4203 irq = pdev->irq; 4204 4205 tbase = mega_baseport; 4206 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { 4207 flag |= BOARD_MEMMAP; 4208 4209 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4210 dev_warn(&pdev->dev, "mem region busy!\n"); 4211 goto out_disable_device; 4212 } 4213 4214 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4215 if (!mega_baseport) { 4216 dev_warn(&pdev->dev, "could not map hba memory\n"); 4217 goto out_release_region; 4218 } 4219 } else { 4220 flag |= BOARD_IOMAP; 4221 mega_baseport += 0x10; 4222 4223 if (!request_region(mega_baseport, 16, "megaraid")) 4224 goto out_disable_device; 4225 } 4226 4227 /* Initialize SCSI Host structure */ 4228 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); 4229 if (!host) 4230 goto out_iounmap; 4231 4232 adapter = (adapter_t *)host->hostdata; 4233 memset(adapter, 0, sizeof(adapter_t)); 4234 4235 dev_notice(&pdev->dev, 4236 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4237 host->host_no, mega_baseport, irq); 4238 4239 adapter->base = mega_baseport; 4240 if (flag & BOARD_MEMMAP) 4241 adapter->mmio_base = (void __iomem *) mega_baseport; 4242 4243 INIT_LIST_HEAD(&adapter->free_list); 4244 INIT_LIST_HEAD(&adapter->pending_list); 4245 INIT_LIST_HEAD(&adapter->completed_list); 4246 4247 adapter->flag = flag; 4248 spin_lock_init(&adapter->lock); 4249 4250 host->cmd_per_lun = max_cmd_per_lun; 4251 host->max_sectors = max_sectors_per_io; 4252 4253 adapter->dev = pdev; 4254 adapter->host = host; 4255 4256 adapter->host->irq = irq; 4257 4258 if (flag & BOARD_MEMMAP) 4259 adapter->host->base = tbase; 4260 else { 4261 adapter->host->io_port = tbase; 4262 adapter->host->n_io_port = 16; 4263 } 4264 4265 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; 4266 4267 /* 4268 * Allocate buffer to issue internal commands. 4269 */ 4270 adapter->mega_buffer = pci_alloc_consistent(adapter->dev, 4271 MEGA_BUFFER_SIZE, &adapter->buf_dma_handle); 4272 if (!adapter->mega_buffer) { 4273 dev_warn(&pdev->dev, "out of RAM\n"); 4274 goto out_host_put; 4275 } 4276 4277 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), 4278 GFP_KERNEL); 4279 if (!adapter->scb_list) { 4280 dev_warn(&pdev->dev, "out of RAM\n"); 4281 goto out_free_cmd_buffer; 4282 } 4283 4284 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4285 megaraid_isr_memmapped : megaraid_isr_iomapped, 4286 IRQF_SHARED, "megaraid", adapter)) { 4287 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); 4288 goto out_free_scb_list; 4289 } 4290 4291 if (mega_setup_mailbox(adapter)) 4292 goto out_free_irq; 4293 4294 if (mega_query_adapter(adapter)) 4295 goto out_free_mbox; 4296 4297 /* 4298 * Have checks for some buggy f/w 4299 */ 4300 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { 4301 /* 4302 * Which firmware 4303 */ 4304 if (!strcmp(adapter->fw_version, "3.00") || 4305 !strcmp(adapter->fw_version, "3.01")) { 4306 4307 dev_warn(&pdev->dev, 4308 "Your card is a Dell PERC " 4309 "2/SC RAID controller with " 4310 "firmware\nmegaraid: 3.00 or 3.01. " 4311 "This driver is known to have " 4312 "corruption issues\nmegaraid: with " 4313 "those firmware versions on this " 4314 "specific card. In order\nmegaraid: " 4315 "to protect your data, please upgrade " 4316 "your firmware to version\nmegaraid: " 4317 "3.10 or later, available from the " 4318 "Dell Technical Support web\n" 4319 "megaraid: site at\nhttp://support." 4320 "dell.com/us/en/filelib/download/" 4321 "index.asp?fileid=2940\n" 4322 ); 4323 } 4324 } 4325 4326 /* 4327 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with 4328 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit 4329 * support, since this firmware cannot handle 64 bit 4330 * addressing 4331 */ 4332 if ((subsysvid == PCI_VENDOR_ID_HP) && 4333 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { 4334 /* 4335 * which firmware 4336 */ 4337 if (!strcmp(adapter->fw_version, "H01.07") || 4338 !strcmp(adapter->fw_version, "H01.08") || 4339 !strcmp(adapter->fw_version, "H01.09") ) { 4340 dev_warn(&pdev->dev, 4341 "Firmware H.01.07, " 4342 "H.01.08, and H.01.09 on 1M/2M " 4343 "controllers\n" 4344 "do not support 64 bit " 4345 "addressing.\nDISABLING " 4346 "64 bit support.\n"); 4347 adapter->flag &= ~BOARD_64BIT; 4348 } 4349 } 4350 4351 if (mega_is_bios_enabled(adapter)) 4352 mega_hbas[hba_count].is_bios_enabled = 1; 4353 mega_hbas[hba_count].hostdata_addr = adapter; 4354 4355 /* 4356 * Find out which channel is raid and which is scsi. This is 4357 * for ROMB support. 4358 */ 4359 mega_enum_raid_scsi(adapter); 4360 4361 /* 4362 * Find out if a logical drive is set as the boot drive. If 4363 * there is one, will make that as the first logical drive. 4364 * ROMB: Do we have to boot from a physical drive. Then all 4365 * the physical drives would appear before the logical disks. 4366 * Else, all the physical drives would be exported to the mid 4367 * layer after logical drives. 4368 */ 4369 mega_get_boot_drv(adapter); 4370 4371 if (adapter->boot_pdrv_enabled) { 4372 j = adapter->product_info.nchannels; 4373 for( i = 0; i < j; i++ ) 4374 adapter->logdrv_chan[i] = 0; 4375 for( i = j; i < NVIRT_CHAN + j; i++ ) 4376 adapter->logdrv_chan[i] = 1; 4377 } else { 4378 for (i = 0; i < NVIRT_CHAN; i++) 4379 adapter->logdrv_chan[i] = 1; 4380 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) 4381 adapter->logdrv_chan[i] = 0; 4382 adapter->mega_ch_class <<= NVIRT_CHAN; 4383 } 4384 4385 /* 4386 * Do we support random deletion and addition of logical 4387 * drives 4388 */ 4389 adapter->read_ldidmap = 0; /* set it after first logdrv 4390 delete cmd */ 4391 adapter->support_random_del = mega_support_random_del(adapter); 4392 4393 /* Initialize SCBs */ 4394 if (mega_init_scb(adapter)) 4395 goto out_free_mbox; 4396 4397 /* 4398 * Reset the pending commands counter 4399 */ 4400 atomic_set(&adapter->pend_cmds, 0); 4401 4402 /* 4403 * Reset the adapter quiescent flag 4404 */ 4405 atomic_set(&adapter->quiescent, 0); 4406 4407 hba_soft_state[hba_count] = adapter; 4408 4409 /* 4410 * Fill in the structure which needs to be passed back to the 4411 * application when it does an ioctl() for controller related 4412 * information. 4413 */ 4414 i = hba_count; 4415 4416 mcontroller[i].base = mega_baseport; 4417 mcontroller[i].irq = irq; 4418 mcontroller[i].numldrv = adapter->numldrv; 4419 mcontroller[i].pcibus = pci_bus; 4420 mcontroller[i].pcidev = id->device; 4421 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); 4422 mcontroller[i].pciid = -1; 4423 mcontroller[i].pcivendor = id->vendor; 4424 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); 4425 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; 4426 4427 4428 /* Set the Mode of addressing to 64 bit if we can */ 4429 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4430 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4431 adapter->has_64bit_addr = 1; 4432 } else { 4433 pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4434 adapter->has_64bit_addr = 0; 4435 } 4436 4437 mutex_init(&adapter->int_mtx); 4438 init_completion(&adapter->int_waitq); 4439 4440 adapter->this_id = DEFAULT_INITIATOR_ID; 4441 adapter->host->this_id = DEFAULT_INITIATOR_ID; 4442 4443 #if MEGA_HAVE_CLUSTERING 4444 /* 4445 * Is cluster support enabled on this controller 4446 * Note: In a cluster the HBAs ( the initiators ) will have 4447 * different target IDs and we cannot assume it to be 7. Call 4448 * to mega_support_cluster() will get the target ids also if 4449 * the cluster support is available 4450 */ 4451 adapter->has_cluster = mega_support_cluster(adapter); 4452 if (adapter->has_cluster) { 4453 dev_notice(&pdev->dev, 4454 "Cluster driver, initiator id:%d\n", 4455 adapter->this_id); 4456 } 4457 #endif 4458 4459 pci_set_drvdata(pdev, host); 4460 4461 mega_create_proc_entry(hba_count, mega_proc_dir_entry); 4462 4463 error = scsi_add_host(host, &pdev->dev); 4464 if (error) 4465 goto out_free_mbox; 4466 4467 scsi_scan_host(host); 4468 hba_count++; 4469 return 0; 4470 4471 out_free_mbox: 4472 pci_free_consistent(adapter->dev, sizeof(mbox64_t), 4473 adapter->una_mbox64, adapter->una_mbox64_dma); 4474 out_free_irq: 4475 free_irq(adapter->host->irq, adapter); 4476 out_free_scb_list: 4477 kfree(adapter->scb_list); 4478 out_free_cmd_buffer: 4479 pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE, 4480 adapter->mega_buffer, adapter->buf_dma_handle); 4481 out_host_put: 4482 scsi_host_put(host); 4483 out_iounmap: 4484 if (flag & BOARD_MEMMAP) 4485 iounmap((void *)mega_baseport); 4486 out_release_region: 4487 if (flag & BOARD_MEMMAP) 4488 release_mem_region(tbase, 128); 4489 else 4490 release_region(mega_baseport, 16); 4491 out_disable_device: 4492 pci_disable_device(pdev); 4493 out: 4494 return error; 4495 } 4496 4497 static void 4498 __megaraid_shutdown(adapter_t *adapter) 4499 { 4500 u_char raw_mbox[sizeof(struct mbox_out)]; 4501 mbox_t *mbox = (mbox_t *)raw_mbox; 4502 int i; 4503 4504 /* Flush adapter cache */ 4505 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4506 raw_mbox[0] = FLUSH_ADAPTER; 4507 4508 free_irq(adapter->host->irq, adapter); 4509 4510 /* Issue a blocking (interrupts disabled) command to the card */ 4511 issue_scb_block(adapter, raw_mbox); 4512 4513 /* Flush disks cache */ 4514 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4515 raw_mbox[0] = FLUSH_SYSTEM; 4516 4517 /* Issue a blocking (interrupts disabled) command to the card */ 4518 issue_scb_block(adapter, raw_mbox); 4519 4520 if (atomic_read(&adapter->pend_cmds) > 0) 4521 dev_warn(&adapter->dev->dev, "pending commands!!\n"); 4522 4523 /* 4524 * Have a delibrate delay to make sure all the caches are 4525 * actually flushed. 4526 */ 4527 for (i = 0; i <= 10; i++) 4528 mdelay(1000); 4529 } 4530 4531 static void 4532 megaraid_remove_one(struct pci_dev *pdev) 4533 { 4534 struct Scsi_Host *host = pci_get_drvdata(pdev); 4535 adapter_t *adapter = (adapter_t *)host->hostdata; 4536 char buf[12] = { 0 }; 4537 4538 scsi_remove_host(host); 4539 4540 __megaraid_shutdown(adapter); 4541 4542 /* Free our resources */ 4543 if (adapter->flag & BOARD_MEMMAP) { 4544 iounmap((void *)adapter->base); 4545 release_mem_region(adapter->host->base, 128); 4546 } else 4547 release_region(adapter->base, 16); 4548 4549 mega_free_sgl(adapter); 4550 4551 sprintf(buf, "hba%d", adapter->host->host_no); 4552 remove_proc_subtree(buf, mega_proc_dir_entry); 4553 4554 pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE, 4555 adapter->mega_buffer, adapter->buf_dma_handle); 4556 kfree(adapter->scb_list); 4557 pci_free_consistent(adapter->dev, sizeof(mbox64_t), 4558 adapter->una_mbox64, adapter->una_mbox64_dma); 4559 4560 scsi_host_put(host); 4561 pci_disable_device(pdev); 4562 4563 hba_count--; 4564 } 4565 4566 static void 4567 megaraid_shutdown(struct pci_dev *pdev) 4568 { 4569 struct Scsi_Host *host = pci_get_drvdata(pdev); 4570 adapter_t *adapter = (adapter_t *)host->hostdata; 4571 4572 __megaraid_shutdown(adapter); 4573 } 4574 4575 static struct pci_device_id megaraid_pci_tbl[] = { 4576 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, 4577 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4578 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, 4579 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4580 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, 4581 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4582 {0,} 4583 }; 4584 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 4585 4586 static struct pci_driver megaraid_pci_driver = { 4587 .name = "megaraid_legacy", 4588 .id_table = megaraid_pci_tbl, 4589 .probe = megaraid_probe_one, 4590 .remove = megaraid_remove_one, 4591 .shutdown = megaraid_shutdown, 4592 }; 4593 4594 static int __init megaraid_init(void) 4595 { 4596 int error; 4597 4598 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) 4599 max_cmd_per_lun = MAX_CMD_PER_LUN; 4600 if (max_mbox_busy_wait > MBOX_BUSY_WAIT) 4601 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4602 4603 #ifdef CONFIG_PROC_FS 4604 mega_proc_dir_entry = proc_mkdir("megaraid", NULL); 4605 if (!mega_proc_dir_entry) { 4606 printk(KERN_WARNING 4607 "megaraid: failed to create megaraid root\n"); 4608 } 4609 #endif 4610 error = pci_register_driver(&megaraid_pci_driver); 4611 if (error) { 4612 #ifdef CONFIG_PROC_FS 4613 remove_proc_entry("megaraid", NULL); 4614 #endif 4615 return error; 4616 } 4617 4618 /* 4619 * Register the driver as a character device, for applications 4620 * to access it for ioctls. 4621 * First argument (major) to register_chrdev implies a dynamic 4622 * major number allocation. 4623 */ 4624 major = register_chrdev(0, "megadev_legacy", &megadev_fops); 4625 if (!major) { 4626 printk(KERN_WARNING 4627 "megaraid: failed to register char device\n"); 4628 } 4629 4630 return 0; 4631 } 4632 4633 static void __exit megaraid_exit(void) 4634 { 4635 /* 4636 * Unregister the character device interface to the driver. 4637 */ 4638 unregister_chrdev(major, "megadev_legacy"); 4639 4640 pci_unregister_driver(&megaraid_pci_driver); 4641 4642 #ifdef CONFIG_PROC_FS 4643 remove_proc_entry("megaraid", NULL); 4644 #endif 4645 } 4646 4647 module_init(megaraid_init); 4648 module_exit(megaraid_exit); 4649 4650 /* vi: set ts=8 sw=8 tw=78: */ 4651