1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2002 LSI Logic Corporation. 7 * 8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 9 * - fixes 10 * - speed-ups (list handling fixes, issued_list, optimizations.) 11 * - lots of cleanups. 12 * 13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de> 14 * - new-style, hotplug-aware pci probing and scsi registration 15 * 16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju 17 * <Seokmann.Ju@lsil.com> 18 * 19 * Description: Linux device driver for LSI Logic MegaRAID controller 20 * 21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 22 * 518, 520, 531, 532 23 * 24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, 25 * and others. Please send updates to the mailing list 26 * linux-scsi@vger.kernel.org . 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/fs.h> 31 #include <linux/blkdev.h> 32 #include <linux/uaccess.h> 33 #include <asm/io.h> 34 #include <linux/completion.h> 35 #include <linux/delay.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/reboot.h> 39 #include <linux/module.h> 40 #include <linux/list.h> 41 #include <linux/interrupt.h> 42 #include <linux/pci.h> 43 #include <linux/init.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/mutex.h> 46 #include <linux/slab.h> 47 #include <scsi/scsicam.h> 48 49 #include "scsi.h" 50 #include <scsi/scsi_host.h> 51 52 #include "megaraid.h" 53 54 #define MEGARAID_MODULE_VERSION "2.00.4" 55 56 MODULE_AUTHOR ("sju@lsil.com"); 57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); 58 MODULE_LICENSE ("GPL"); 59 MODULE_VERSION(MEGARAID_MODULE_VERSION); 60 61 static DEFINE_MUTEX(megadev_mutex); 62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; 63 module_param(max_cmd_per_lun, uint, 0); 64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); 65 66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; 67 module_param(max_sectors_per_io, ushort, 0); 68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 69 70 71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; 72 module_param(max_mbox_busy_wait, ushort, 0); 73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74 75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) 76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) 77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) 78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) 79 80 /* 81 * Global variables 82 */ 83 84 static int hba_count; 85 static adapter_t *hba_soft_state[MAX_CONTROLLERS]; 86 static struct proc_dir_entry *mega_proc_dir_entry; 87 88 /* For controller re-ordering */ 89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 90 91 static long 92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 93 94 /* 95 * The File Operations structure for the serial/ioctl interface of the driver 96 */ 97 static const struct file_operations megadev_fops = { 98 .owner = THIS_MODULE, 99 .unlocked_ioctl = megadev_unlocked_ioctl, 100 .open = megadev_open, 101 .llseek = noop_llseek, 102 }; 103 104 /* 105 * Array to structures for storing the information about the controllers. This 106 * information is sent to the user level applications, when they do an ioctl 107 * for this information. 108 */ 109 static struct mcontroller mcontroller[MAX_CONTROLLERS]; 110 111 /* The current driver version */ 112 static u32 driver_ver = 0x02000000; 113 114 /* major number used by the device for character interface */ 115 static int major; 116 117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) 118 119 120 /* 121 * Debug variable to print some diagnostic messages 122 */ 123 static int trace_level; 124 125 /** 126 * mega_setup_mailbox() 127 * @adapter: pointer to our soft state 128 * 129 * Allocates a 8 byte aligned memory for the handshake mailbox. 130 */ 131 static int 132 mega_setup_mailbox(adapter_t *adapter) 133 { 134 unsigned long align; 135 136 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, 137 sizeof(mbox64_t), 138 &adapter->una_mbox64_dma, 139 GFP_KERNEL); 140 141 if( !adapter->una_mbox64 ) return -1; 142 143 adapter->mbox = &adapter->una_mbox64->mbox; 144 145 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & 146 (~0UL ^ 0xFUL)); 147 148 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); 149 150 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); 151 152 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; 153 154 /* 155 * Register the mailbox if the controller is an io-mapped controller 156 */ 157 if( adapter->flag & BOARD_IOMAP ) { 158 159 outb(adapter->mbox_dma & 0xFF, 160 adapter->host->io_port + MBOX_PORT0); 161 162 outb((adapter->mbox_dma >> 8) & 0xFF, 163 adapter->host->io_port + MBOX_PORT1); 164 165 outb((adapter->mbox_dma >> 16) & 0xFF, 166 adapter->host->io_port + MBOX_PORT2); 167 168 outb((adapter->mbox_dma >> 24) & 0xFF, 169 adapter->host->io_port + MBOX_PORT3); 170 171 outb(ENABLE_MBOX_BYTE, 172 adapter->host->io_port + ENABLE_MBOX_REGION); 173 174 irq_ack(adapter); 175 176 irq_enable(adapter); 177 } 178 179 return 0; 180 } 181 182 183 /* 184 * mega_query_adapter() 185 * @adapter - pointer to our soft state 186 * 187 * Issue the adapter inquiry commands to the controller and find out 188 * information and parameter about the devices attached 189 */ 190 static int 191 mega_query_adapter(adapter_t *adapter) 192 { 193 dma_addr_t prod_info_dma_handle; 194 mega_inquiry3 *inquiry3; 195 u8 raw_mbox[sizeof(struct mbox_out)]; 196 mbox_t *mbox; 197 int retval; 198 199 /* Initialize adapter inquiry mailbox */ 200 201 mbox = (mbox_t *)raw_mbox; 202 203 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 204 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 205 206 /* 207 * Try to issue Inquiry3 command 208 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 209 * update enquiry3 structure 210 */ 211 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 212 213 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 214 215 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 216 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ 217 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ 218 219 /* Issue a blocking command to the card */ 220 if ((retval = issue_scb_block(adapter, raw_mbox))) { 221 /* the adapter does not support 40ld */ 222 223 mraid_ext_inquiry *ext_inq; 224 mraid_inquiry *inq; 225 dma_addr_t dma_handle; 226 227 ext_inq = dma_alloc_coherent(&adapter->dev->dev, 228 sizeof(mraid_ext_inquiry), 229 &dma_handle, GFP_KERNEL); 230 231 if( ext_inq == NULL ) return -1; 232 233 inq = &ext_inq->raid_inq; 234 235 mbox->m_out.xferaddr = (u32)dma_handle; 236 237 /*issue old 0x04 command to adapter */ 238 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ; 239 240 issue_scb_block(adapter, raw_mbox); 241 242 /* 243 * update Enquiry3 and ProductInfo structures with 244 * mraid_inquiry structure 245 */ 246 mega_8_to_40ld(inq, inquiry3, 247 (mega_product_info *)&adapter->product_info); 248 249 dma_free_coherent(&adapter->dev->dev, 250 sizeof(mraid_ext_inquiry), ext_inq, 251 dma_handle); 252 253 } else { /*adapter supports 40ld */ 254 adapter->flag |= BOARD_40LD; 255 256 /* 257 * get product_info, which is static information and will be 258 * unchanged 259 */ 260 prod_info_dma_handle = dma_map_single(&adapter->dev->dev, 261 (void *)&adapter->product_info, 262 sizeof(mega_product_info), 263 DMA_FROM_DEVICE); 264 265 mbox->m_out.xferaddr = prod_info_dma_handle; 266 267 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 269 270 if ((retval = issue_scb_block(adapter, raw_mbox))) 271 dev_warn(&adapter->dev->dev, 272 "Product_info cmd failed with error: %d\n", 273 retval); 274 275 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, 276 sizeof(mega_product_info), DMA_FROM_DEVICE); 277 } 278 279 280 /* 281 * kernel scans the channels from 0 to <= max_channel 282 */ 283 adapter->host->max_channel = 284 adapter->product_info.nchannels + NVIRT_CHAN -1; 285 286 adapter->host->max_id = 16; /* max targets per channel */ 287 288 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ 289 290 adapter->host->cmd_per_lun = max_cmd_per_lun; 291 292 adapter->numldrv = inquiry3->num_ldrv; 293 294 adapter->max_cmds = adapter->product_info.max_commands; 295 296 if(adapter->max_cmds > MAX_COMMANDS) 297 adapter->max_cmds = MAX_COMMANDS; 298 299 adapter->host->can_queue = adapter->max_cmds - 1; 300 301 /* 302 * Get the maximum number of scatter-gather elements supported by this 303 * firmware 304 */ 305 mega_get_max_sgl(adapter); 306 307 adapter->host->sg_tablesize = adapter->sglen; 308 309 /* use HP firmware and bios version encoding 310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted 311 right 8 bits making them zero. This 0 value was hardcoded to fix 312 sparse warnings. */ 313 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { 314 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 315 "%c%d%d.%d%d", 316 adapter->product_info.fw_version[2], 317 0, 318 adapter->product_info.fw_version[1] & 0x0f, 319 0, 320 adapter->product_info.fw_version[0] & 0x0f); 321 snprintf(adapter->bios_version, sizeof(adapter->fw_version), 322 "%c%d%d.%d%d", 323 adapter->product_info.bios_version[2], 324 0, 325 adapter->product_info.bios_version[1] & 0x0f, 326 0, 327 adapter->product_info.bios_version[0] & 0x0f); 328 } else { 329 memcpy(adapter->fw_version, 330 (char *)adapter->product_info.fw_version, 4); 331 adapter->fw_version[4] = 0; 332 333 memcpy(adapter->bios_version, 334 (char *)adapter->product_info.bios_version, 4); 335 336 adapter->bios_version[4] = 0; 337 } 338 339 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", 340 adapter->fw_version, adapter->bios_version, adapter->numldrv); 341 342 /* 343 * Do we support extended (>10 bytes) cdbs 344 */ 345 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 346 if (adapter->support_ext_cdb) 347 dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); 348 349 350 return 0; 351 } 352 353 /** 354 * mega_runpendq() 355 * @adapter: pointer to our soft state 356 * 357 * Runs through the list of pending requests. 358 */ 359 static inline void 360 mega_runpendq(adapter_t *adapter) 361 { 362 if(!list_empty(&adapter->pending_list)) 363 __mega_runpendq(adapter); 364 } 365 366 /* 367 * megaraid_queue() 368 * @scmd - Issue this scsi command 369 * @done - the callback hook into the scsi mid-layer 370 * 371 * The command queuing entry point for the mid-layer. 372 */ 373 static int megaraid_queue_lck(struct scsi_cmnd *scmd) 374 { 375 adapter_t *adapter; 376 scb_t *scb; 377 int busy=0; 378 unsigned long flags; 379 380 adapter = (adapter_t *)scmd->device->host->hostdata; 381 382 /* 383 * Allocate and build a SCB request 384 * busy flag will be set if mega_build_cmd() command could not 385 * allocate scb. We will return non-zero status in that case. 386 * NOTE: scb can be null even though certain commands completed 387 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would 388 * return 0 in that case. 389 */ 390 391 spin_lock_irqsave(&adapter->lock, flags); 392 scb = mega_build_cmd(adapter, scmd, &busy); 393 if (!scb) 394 goto out; 395 396 scb->state |= SCB_PENDQ; 397 list_add_tail(&scb->list, &adapter->pending_list); 398 399 /* 400 * Check if the HBA is in quiescent state, e.g., during a 401 * delete logical drive opertion. If it is, don't run 402 * the pending_list. 403 */ 404 if (atomic_read(&adapter->quiescent) == 0) 405 mega_runpendq(adapter); 406 407 busy = 0; 408 out: 409 spin_unlock_irqrestore(&adapter->lock, flags); 410 return busy; 411 } 412 413 static DEF_SCSI_QCMD(megaraid_queue) 414 415 /** 416 * mega_allocate_scb() 417 * @adapter: pointer to our soft state 418 * @cmd: scsi command from the mid-layer 419 * 420 * Allocate a SCB structure. This is the central structure for controller 421 * commands. 422 */ 423 static inline scb_t * 424 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) 425 { 426 struct list_head *head = &adapter->free_list; 427 scb_t *scb; 428 429 /* Unlink command from Free List */ 430 if( !list_empty(head) ) { 431 432 scb = list_entry(head->next, scb_t, list); 433 434 list_del_init(head->next); 435 436 scb->state = SCB_ACTIVE; 437 scb->cmd = cmd; 438 scb->dma_type = MEGA_DMA_TYPE_NONE; 439 440 return scb; 441 } 442 443 return NULL; 444 } 445 446 /** 447 * mega_get_ldrv_num() 448 * @adapter: pointer to our soft state 449 * @cmd: scsi mid layer command 450 * @channel: channel on the controller 451 * 452 * Calculate the logical drive number based on the information in scsi command 453 * and the channel number. 454 */ 455 static inline int 456 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) 457 { 458 int tgt; 459 int ldrv_num; 460 461 tgt = cmd->device->id; 462 463 if ( tgt > adapter->this_id ) 464 tgt--; /* we do not get inquires for initiator id */ 465 466 ldrv_num = (channel * 15) + tgt; 467 468 469 /* 470 * If we have a logical drive with boot enabled, project it first 471 */ 472 if( adapter->boot_ldrv_enabled ) { 473 if( ldrv_num == 0 ) { 474 ldrv_num = adapter->boot_ldrv; 475 } 476 else { 477 if( ldrv_num <= adapter->boot_ldrv ) { 478 ldrv_num--; 479 } 480 } 481 } 482 483 /* 484 * If "delete logical drive" feature is enabled on this controller. 485 * Do only if at least one delete logical drive operation was done. 486 * 487 * Also, after logical drive deletion, instead of logical drive number, 488 * the value returned should be 0x80+logical drive id. 489 * 490 * These is valid only for IO commands. 491 */ 492 493 if (adapter->support_random_del && adapter->read_ldidmap ) 494 switch (cmd->cmnd[0]) { 495 case READ_6: 496 case WRITE_6: 497 case READ_10: 498 case WRITE_10: 499 ldrv_num += 0x80; 500 } 501 502 return ldrv_num; 503 } 504 505 /** 506 * mega_build_cmd() 507 * @adapter: pointer to our soft state 508 * @cmd: Prepare using this scsi command 509 * @busy: busy flag if no resources 510 * 511 * Prepares a command and scatter gather list for the controller. This routine 512 * also finds out if the commands is intended for a logical drive or a 513 * physical device and prepares the controller command accordingly. 514 * 515 * We also re-order the logical drives and physical devices based on their 516 * boot settings. 517 */ 518 static scb_t * 519 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 520 { 521 mega_passthru *pthru; 522 scb_t *scb; 523 mbox_t *mbox; 524 u32 seg; 525 char islogical; 526 int max_ldrv_num; 527 int channel = 0; 528 int target = 0; 529 int ldrv_num = 0; /* logical drive number */ 530 531 /* 532 * We know what channels our logical drives are on - mega_find_card() 533 */ 534 islogical = adapter->logdrv_chan[cmd->device->channel]; 535 536 /* 537 * The theory: If physical drive is chosen for boot, all the physical 538 * devices are exported before the logical drives, otherwise physical 539 * devices are pushed after logical drives, in which case - Kernel sees 540 * the physical devices on virtual channel which is obviously converted 541 * to actual channel on the HBA. 542 */ 543 if( adapter->boot_pdrv_enabled ) { 544 if( islogical ) { 545 /* logical channel */ 546 channel = cmd->device->channel - 547 adapter->product_info.nchannels; 548 } 549 else { 550 /* this is physical channel */ 551 channel = cmd->device->channel; 552 target = cmd->device->id; 553 554 /* 555 * boot from a physical disk, that disk needs to be 556 * exposed first IF both the channels are SCSI, then 557 * booting from the second channel is not allowed. 558 */ 559 if( target == 0 ) { 560 target = adapter->boot_pdrv_tgt; 561 } 562 else if( target == adapter->boot_pdrv_tgt ) { 563 target = 0; 564 } 565 } 566 } 567 else { 568 if( islogical ) { 569 /* this is the logical channel */ 570 channel = cmd->device->channel; 571 } 572 else { 573 /* physical channel */ 574 channel = cmd->device->channel - NVIRT_CHAN; 575 target = cmd->device->id; 576 } 577 } 578 579 580 if(islogical) { 581 582 /* have just LUN 0 for each target on virtual channels */ 583 if (cmd->device->lun) { 584 cmd->result = (DID_BAD_TARGET << 16); 585 scsi_done(cmd); 586 return NULL; 587 } 588 589 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); 590 591 592 max_ldrv_num = (adapter->flag & BOARD_40LD) ? 593 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; 594 595 /* 596 * max_ldrv_num increases by 0x80 if some logical drive was 597 * deleted. 598 */ 599 if(adapter->read_ldidmap) 600 max_ldrv_num += 0x80; 601 602 if(ldrv_num > max_ldrv_num ) { 603 cmd->result = (DID_BAD_TARGET << 16); 604 scsi_done(cmd); 605 return NULL; 606 } 607 608 } 609 else { 610 if( cmd->device->lun > 7) { 611 /* 612 * Do not support lun >7 for physically accessed 613 * devices 614 */ 615 cmd->result = (DID_BAD_TARGET << 16); 616 scsi_done(cmd); 617 return NULL; 618 } 619 } 620 621 /* 622 * 623 * Logical drive commands 624 * 625 */ 626 if(islogical) { 627 switch (cmd->cmnd[0]) { 628 case TEST_UNIT_READY: 629 #if MEGA_HAVE_CLUSTERING 630 /* 631 * Do we support clustering and is the support enabled 632 * If no, return success always 633 */ 634 if( !adapter->has_cluster ) { 635 cmd->result = (DID_OK << 16); 636 scsi_done(cmd); 637 return NULL; 638 } 639 640 if(!(scb = mega_allocate_scb(adapter, cmd))) { 641 *busy = 1; 642 return NULL; 643 } 644 645 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 646 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; 647 scb->raw_mbox[3] = ldrv_num; 648 649 scb->dma_direction = DMA_NONE; 650 651 return scb; 652 #else 653 cmd->result = (DID_OK << 16); 654 scsi_done(cmd); 655 return NULL; 656 #endif 657 658 case MODE_SENSE: { 659 char *buf; 660 struct scatterlist *sg; 661 662 sg = scsi_sglist(cmd); 663 buf = kmap_atomic(sg_page(sg)) + sg->offset; 664 665 memset(buf, 0, cmd->cmnd[4]); 666 kunmap_atomic(buf - sg->offset); 667 668 cmd->result = (DID_OK << 16); 669 scsi_done(cmd); 670 return NULL; 671 } 672 673 case READ_CAPACITY: 674 case INQUIRY: 675 676 if(!(adapter->flag & (1L << cmd->device->channel))) { 677 678 dev_notice(&adapter->dev->dev, 679 "scsi%d: scanning scsi channel %d " 680 "for logical drives\n", 681 adapter->host->host_no, 682 cmd->device->channel); 683 684 adapter->flag |= (1L << cmd->device->channel); 685 } 686 687 /* Allocate a SCB and initialize passthru */ 688 if(!(scb = mega_allocate_scb(adapter, cmd))) { 689 *busy = 1; 690 return NULL; 691 } 692 pthru = scb->pthru; 693 694 mbox = (mbox_t *)scb->raw_mbox; 695 memset(mbox, 0, sizeof(scb->raw_mbox)); 696 memset(pthru, 0, sizeof(mega_passthru)); 697 698 pthru->timeout = 0; 699 pthru->ars = 1; 700 pthru->reqsenselen = 14; 701 pthru->islogical = 1; 702 pthru->logdrv = ldrv_num; 703 pthru->cdblen = cmd->cmd_len; 704 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 705 706 if( adapter->has_64bit_addr ) { 707 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 708 } 709 else { 710 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 711 } 712 713 scb->dma_direction = DMA_FROM_DEVICE; 714 715 pthru->numsgelements = mega_build_sglist(adapter, scb, 716 &pthru->dataxferaddr, &pthru->dataxferlen); 717 718 mbox->m_out.xferaddr = scb->pthru_dma_addr; 719 720 return scb; 721 722 case READ_6: 723 case WRITE_6: 724 case READ_10: 725 case WRITE_10: 726 case READ_12: 727 case WRITE_12: 728 729 /* Allocate a SCB and initialize mailbox */ 730 if(!(scb = mega_allocate_scb(adapter, cmd))) { 731 *busy = 1; 732 return NULL; 733 } 734 mbox = (mbox_t *)scb->raw_mbox; 735 736 memset(mbox, 0, sizeof(scb->raw_mbox)); 737 mbox->m_out.logdrv = ldrv_num; 738 739 /* 740 * A little hack: 2nd bit is zero for all scsi read 741 * commands and is set for all scsi write commands 742 */ 743 if( adapter->has_64bit_addr ) { 744 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 745 MEGA_MBOXCMD_LWRITE64: 746 MEGA_MBOXCMD_LREAD64 ; 747 } 748 else { 749 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 750 MEGA_MBOXCMD_LWRITE: 751 MEGA_MBOXCMD_LREAD ; 752 } 753 754 /* 755 * 6-byte READ(0x08) or WRITE(0x0A) cdb 756 */ 757 if( cmd->cmd_len == 6 ) { 758 mbox->m_out.numsectors = (u32) cmd->cmnd[4]; 759 mbox->m_out.lba = 760 ((u32)cmd->cmnd[1] << 16) | 761 ((u32)cmd->cmnd[2] << 8) | 762 (u32)cmd->cmnd[3]; 763 764 mbox->m_out.lba &= 0x1FFFFF; 765 766 #if MEGA_HAVE_STATS 767 /* 768 * Take modulo 0x80, since the logical drive 769 * number increases by 0x80 when a logical 770 * drive was deleted 771 */ 772 if (*cmd->cmnd == READ_6) { 773 adapter->nreads[ldrv_num%0x80]++; 774 adapter->nreadblocks[ldrv_num%0x80] += 775 mbox->m_out.numsectors; 776 } else { 777 adapter->nwrites[ldrv_num%0x80]++; 778 adapter->nwriteblocks[ldrv_num%0x80] += 779 mbox->m_out.numsectors; 780 } 781 #endif 782 } 783 784 /* 785 * 10-byte READ(0x28) or WRITE(0x2A) cdb 786 */ 787 if( cmd->cmd_len == 10 ) { 788 mbox->m_out.numsectors = 789 (u32)cmd->cmnd[8] | 790 ((u32)cmd->cmnd[7] << 8); 791 mbox->m_out.lba = 792 ((u32)cmd->cmnd[2] << 24) | 793 ((u32)cmd->cmnd[3] << 16) | 794 ((u32)cmd->cmnd[4] << 8) | 795 (u32)cmd->cmnd[5]; 796 797 #if MEGA_HAVE_STATS 798 if (*cmd->cmnd == READ_10) { 799 adapter->nreads[ldrv_num%0x80]++; 800 adapter->nreadblocks[ldrv_num%0x80] += 801 mbox->m_out.numsectors; 802 } else { 803 adapter->nwrites[ldrv_num%0x80]++; 804 adapter->nwriteblocks[ldrv_num%0x80] += 805 mbox->m_out.numsectors; 806 } 807 #endif 808 } 809 810 /* 811 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 812 */ 813 if( cmd->cmd_len == 12 ) { 814 mbox->m_out.lba = 815 ((u32)cmd->cmnd[2] << 24) | 816 ((u32)cmd->cmnd[3] << 16) | 817 ((u32)cmd->cmnd[4] << 8) | 818 (u32)cmd->cmnd[5]; 819 820 mbox->m_out.numsectors = 821 ((u32)cmd->cmnd[6] << 24) | 822 ((u32)cmd->cmnd[7] << 16) | 823 ((u32)cmd->cmnd[8] << 8) | 824 (u32)cmd->cmnd[9]; 825 826 #if MEGA_HAVE_STATS 827 if (*cmd->cmnd == READ_12) { 828 adapter->nreads[ldrv_num%0x80]++; 829 adapter->nreadblocks[ldrv_num%0x80] += 830 mbox->m_out.numsectors; 831 } else { 832 adapter->nwrites[ldrv_num%0x80]++; 833 adapter->nwriteblocks[ldrv_num%0x80] += 834 mbox->m_out.numsectors; 835 } 836 #endif 837 } 838 839 /* 840 * If it is a read command 841 */ 842 if( (*cmd->cmnd & 0x0F) == 0x08 ) { 843 scb->dma_direction = DMA_FROM_DEVICE; 844 } 845 else { 846 scb->dma_direction = DMA_TO_DEVICE; 847 } 848 849 /* Calculate Scatter-Gather info */ 850 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 851 (u32 *)&mbox->m_out.xferaddr, &seg); 852 853 return scb; 854 855 #if MEGA_HAVE_CLUSTERING 856 case RESERVE: 857 case RELEASE: 858 859 /* 860 * Do we support clustering and is the support enabled 861 */ 862 if( ! adapter->has_cluster ) { 863 864 cmd->result = (DID_BAD_TARGET << 16); 865 scsi_done(cmd); 866 return NULL; 867 } 868 869 /* Allocate a SCB and initialize mailbox */ 870 if(!(scb = mega_allocate_scb(adapter, cmd))) { 871 *busy = 1; 872 return NULL; 873 } 874 875 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 876 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? 877 MEGA_RESERVE_LD : MEGA_RELEASE_LD; 878 879 scb->raw_mbox[3] = ldrv_num; 880 881 scb->dma_direction = DMA_NONE; 882 883 return scb; 884 #endif 885 886 default: 887 cmd->result = (DID_BAD_TARGET << 16); 888 scsi_done(cmd); 889 return NULL; 890 } 891 } 892 893 /* 894 * Passthru drive commands 895 */ 896 else { 897 /* Allocate a SCB and initialize passthru */ 898 if(!(scb = mega_allocate_scb(adapter, cmd))) { 899 *busy = 1; 900 return NULL; 901 } 902 903 mbox = (mbox_t *)scb->raw_mbox; 904 memset(mbox, 0, sizeof(scb->raw_mbox)); 905 906 if( adapter->support_ext_cdb ) { 907 908 mega_prepare_extpassthru(adapter, scb, cmd, 909 channel, target); 910 911 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; 912 913 mbox->m_out.xferaddr = scb->epthru_dma_addr; 914 915 } 916 else { 917 918 pthru = mega_prepare_passthru(adapter, scb, cmd, 919 channel, target); 920 921 /* Initialize mailbox */ 922 if( adapter->has_64bit_addr ) { 923 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 924 } 925 else { 926 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 927 } 928 929 mbox->m_out.xferaddr = scb->pthru_dma_addr; 930 931 } 932 return scb; 933 } 934 return NULL; 935 } 936 937 938 /** 939 * mega_prepare_passthru() 940 * @adapter: pointer to our soft state 941 * @scb: our scsi control block 942 * @cmd: scsi command from the mid-layer 943 * @channel: actual channel on the controller 944 * @target: actual id on the controller. 945 * 946 * prepare a command for the scsi physical devices. 947 */ 948 static mega_passthru * 949 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, 950 int channel, int target) 951 { 952 mega_passthru *pthru; 953 954 pthru = scb->pthru; 955 memset(pthru, 0, sizeof (mega_passthru)); 956 957 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 958 pthru->timeout = 2; 959 960 pthru->ars = 1; 961 pthru->reqsenselen = 14; 962 pthru->islogical = 0; 963 964 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 965 966 pthru->target = (adapter->flag & BOARD_40LD) ? 967 (channel << 4) | target : target; 968 969 pthru->cdblen = cmd->cmd_len; 970 pthru->logdrv = cmd->device->lun; 971 972 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 973 974 /* Not sure about the direction */ 975 scb->dma_direction = DMA_BIDIRECTIONAL; 976 977 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ 978 switch (cmd->cmnd[0]) { 979 case INQUIRY: 980 case READ_CAPACITY: 981 if(!(adapter->flag & (1L << cmd->device->channel))) { 982 983 dev_notice(&adapter->dev->dev, 984 "scsi%d: scanning scsi channel %d [P%d] " 985 "for physical devices\n", 986 adapter->host->host_no, 987 cmd->device->channel, channel); 988 989 adapter->flag |= (1L << cmd->device->channel); 990 } 991 fallthrough; 992 default: 993 pthru->numsgelements = mega_build_sglist(adapter, scb, 994 &pthru->dataxferaddr, &pthru->dataxferlen); 995 break; 996 } 997 return pthru; 998 } 999 1000 1001 /** 1002 * mega_prepare_extpassthru() 1003 * @adapter: pointer to our soft state 1004 * @scb: our scsi control block 1005 * @cmd: scsi command from the mid-layer 1006 * @channel: actual channel on the controller 1007 * @target: actual id on the controller. 1008 * 1009 * prepare a command for the scsi physical devices. This rountine prepares 1010 * commands for devices which can take extended CDBs (>10 bytes) 1011 */ 1012 static mega_ext_passthru * 1013 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, 1014 struct scsi_cmnd *cmd, 1015 int channel, int target) 1016 { 1017 mega_ext_passthru *epthru; 1018 1019 epthru = scb->epthru; 1020 memset(epthru, 0, sizeof(mega_ext_passthru)); 1021 1022 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 1023 epthru->timeout = 2; 1024 1025 epthru->ars = 1; 1026 epthru->reqsenselen = 14; 1027 epthru->islogical = 0; 1028 1029 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 1030 epthru->target = (adapter->flag & BOARD_40LD) ? 1031 (channel << 4) | target : target; 1032 1033 epthru->cdblen = cmd->cmd_len; 1034 epthru->logdrv = cmd->device->lun; 1035 1036 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); 1037 1038 /* Not sure about the direction */ 1039 scb->dma_direction = DMA_BIDIRECTIONAL; 1040 1041 switch(cmd->cmnd[0]) { 1042 case INQUIRY: 1043 case READ_CAPACITY: 1044 if(!(adapter->flag & (1L << cmd->device->channel))) { 1045 1046 dev_notice(&adapter->dev->dev, 1047 "scsi%d: scanning scsi channel %d [P%d] " 1048 "for physical devices\n", 1049 adapter->host->host_no, 1050 cmd->device->channel, channel); 1051 1052 adapter->flag |= (1L << cmd->device->channel); 1053 } 1054 fallthrough; 1055 default: 1056 epthru->numsgelements = mega_build_sglist(adapter, scb, 1057 &epthru->dataxferaddr, &epthru->dataxferlen); 1058 break; 1059 } 1060 1061 return epthru; 1062 } 1063 1064 static void 1065 __mega_runpendq(adapter_t *adapter) 1066 { 1067 scb_t *scb; 1068 struct list_head *pos, *next; 1069 1070 /* Issue any pending commands to the card */ 1071 list_for_each_safe(pos, next, &adapter->pending_list) { 1072 1073 scb = list_entry(pos, scb_t, list); 1074 1075 if( !(scb->state & SCB_ISSUED) ) { 1076 1077 if( issue_scb(adapter, scb) != 0 ) 1078 return; 1079 } 1080 } 1081 1082 return; 1083 } 1084 1085 1086 /** 1087 * issue_scb() 1088 * @adapter: pointer to our soft state 1089 * @scb: scsi control block 1090 * 1091 * Post a command to the card if the mailbox is available, otherwise return 1092 * busy. We also take the scb from the pending list if the mailbox is 1093 * available. 1094 */ 1095 static int 1096 issue_scb(adapter_t *adapter, scb_t *scb) 1097 { 1098 volatile mbox64_t *mbox64 = adapter->mbox64; 1099 volatile mbox_t *mbox = adapter->mbox; 1100 unsigned int i = 0; 1101 1102 if(unlikely(mbox->m_in.busy)) { 1103 do { 1104 udelay(1); 1105 i++; 1106 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); 1107 1108 if(mbox->m_in.busy) return -1; 1109 } 1110 1111 /* Copy mailbox data into host structure */ 1112 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, 1113 sizeof(struct mbox_out)); 1114 1115 mbox->m_out.cmdid = scb->idx; /* Set cmdid */ 1116 mbox->m_in.busy = 1; /* Set busy */ 1117 1118 1119 /* 1120 * Increment the pending queue counter 1121 */ 1122 atomic_inc(&adapter->pend_cmds); 1123 1124 switch (mbox->m_out.cmd) { 1125 case MEGA_MBOXCMD_LREAD64: 1126 case MEGA_MBOXCMD_LWRITE64: 1127 case MEGA_MBOXCMD_PASSTHRU64: 1128 case MEGA_MBOXCMD_EXTPTHRU: 1129 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1130 mbox64->xfer_segment_hi = 0; 1131 mbox->m_out.xferaddr = 0xFFFFFFFF; 1132 break; 1133 default: 1134 mbox64->xfer_segment_lo = 0; 1135 mbox64->xfer_segment_hi = 0; 1136 } 1137 1138 /* 1139 * post the command 1140 */ 1141 scb->state |= SCB_ISSUED; 1142 1143 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1144 mbox->m_in.poll = 0; 1145 mbox->m_in.ack = 0; 1146 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1147 } 1148 else { 1149 irq_enable(adapter); 1150 issue_command(adapter); 1151 } 1152 1153 return 0; 1154 } 1155 1156 /* 1157 * Wait until the controller's mailbox is available 1158 */ 1159 static inline int 1160 mega_busywait_mbox (adapter_t *adapter) 1161 { 1162 if (adapter->mbox->m_in.busy) 1163 return __mega_busywait_mbox(adapter); 1164 return 0; 1165 } 1166 1167 /** 1168 * issue_scb_block() 1169 * @adapter: pointer to our soft state 1170 * @raw_mbox: the mailbox 1171 * 1172 * Issue a scb in synchronous and non-interrupt mode 1173 */ 1174 static int 1175 issue_scb_block(adapter_t *adapter, u_char *raw_mbox) 1176 { 1177 volatile mbox64_t *mbox64 = adapter->mbox64; 1178 volatile mbox_t *mbox = adapter->mbox; 1179 u8 byte; 1180 1181 /* Wait until mailbox is free */ 1182 if(mega_busywait_mbox (adapter)) 1183 goto bug_blocked_mailbox; 1184 1185 /* Copy mailbox data into host structure */ 1186 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); 1187 mbox->m_out.cmdid = 0xFE; 1188 mbox->m_in.busy = 1; 1189 1190 switch (raw_mbox[0]) { 1191 case MEGA_MBOXCMD_LREAD64: 1192 case MEGA_MBOXCMD_LWRITE64: 1193 case MEGA_MBOXCMD_PASSTHRU64: 1194 case MEGA_MBOXCMD_EXTPTHRU: 1195 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1196 mbox64->xfer_segment_hi = 0; 1197 mbox->m_out.xferaddr = 0xFFFFFFFF; 1198 break; 1199 default: 1200 mbox64->xfer_segment_lo = 0; 1201 mbox64->xfer_segment_hi = 0; 1202 } 1203 1204 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1205 mbox->m_in.poll = 0; 1206 mbox->m_in.ack = 0; 1207 mbox->m_in.numstatus = 0xFF; 1208 mbox->m_in.status = 0xFF; 1209 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1210 1211 while((volatile u8)mbox->m_in.numstatus == 0xFF) 1212 cpu_relax(); 1213 1214 mbox->m_in.numstatus = 0xFF; 1215 1216 while( (volatile u8)mbox->m_in.poll != 0x77 ) 1217 cpu_relax(); 1218 1219 mbox->m_in.poll = 0; 1220 mbox->m_in.ack = 0x77; 1221 1222 WRINDOOR(adapter, adapter->mbox_dma | 0x2); 1223 1224 while(RDINDOOR(adapter) & 0x2) 1225 cpu_relax(); 1226 } 1227 else { 1228 irq_disable(adapter); 1229 issue_command(adapter); 1230 1231 while (!((byte = irq_state(adapter)) & INTR_VALID)) 1232 cpu_relax(); 1233 1234 set_irq_state(adapter, byte); 1235 irq_enable(adapter); 1236 irq_ack(adapter); 1237 } 1238 1239 return mbox->m_in.status; 1240 1241 bug_blocked_mailbox: 1242 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); 1243 udelay (1000); 1244 return -1; 1245 } 1246 1247 1248 /** 1249 * megaraid_isr_iomapped() 1250 * @irq: irq 1251 * @devp: pointer to our soft state 1252 * 1253 * Interrupt service routine for io-mapped controllers. 1254 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1255 * and service the completed commands. 1256 */ 1257 static irqreturn_t 1258 megaraid_isr_iomapped(int irq, void *devp) 1259 { 1260 adapter_t *adapter = devp; 1261 unsigned long flags; 1262 u8 status; 1263 u8 nstatus; 1264 u8 completed[MAX_FIRMWARE_STATUS]; 1265 u8 byte; 1266 int handled = 0; 1267 1268 1269 /* 1270 * loop till F/W has more commands for us to complete. 1271 */ 1272 spin_lock_irqsave(&adapter->lock, flags); 1273 1274 do { 1275 /* Check if a valid interrupt is pending */ 1276 byte = irq_state(adapter); 1277 if( (byte & VALID_INTR_BYTE) == 0 ) { 1278 /* 1279 * No more pending commands 1280 */ 1281 goto out_unlock; 1282 } 1283 set_irq_state(adapter, byte); 1284 1285 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1286 == 0xFF) 1287 cpu_relax(); 1288 adapter->mbox->m_in.numstatus = 0xFF; 1289 1290 status = adapter->mbox->m_in.status; 1291 1292 /* 1293 * decrement the pending queue counter 1294 */ 1295 atomic_sub(nstatus, &adapter->pend_cmds); 1296 1297 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1298 nstatus); 1299 1300 /* Acknowledge interrupt */ 1301 irq_ack(adapter); 1302 1303 mega_cmd_done(adapter, completed, nstatus, status); 1304 1305 mega_rundoneq(adapter); 1306 1307 handled = 1; 1308 1309 /* Loop through any pending requests */ 1310 if(atomic_read(&adapter->quiescent) == 0) { 1311 mega_runpendq(adapter); 1312 } 1313 1314 } while(1); 1315 1316 out_unlock: 1317 1318 spin_unlock_irqrestore(&adapter->lock, flags); 1319 1320 return IRQ_RETVAL(handled); 1321 } 1322 1323 1324 /** 1325 * megaraid_isr_memmapped() 1326 * @irq: irq 1327 * @devp: pointer to our soft state 1328 * 1329 * Interrupt service routine for memory-mapped controllers. 1330 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1331 * and service the completed commands. 1332 */ 1333 static irqreturn_t 1334 megaraid_isr_memmapped(int irq, void *devp) 1335 { 1336 adapter_t *adapter = devp; 1337 unsigned long flags; 1338 u8 status; 1339 u32 dword = 0; 1340 u8 nstatus; 1341 u8 completed[MAX_FIRMWARE_STATUS]; 1342 int handled = 0; 1343 1344 1345 /* 1346 * loop till F/W has more commands for us to complete. 1347 */ 1348 spin_lock_irqsave(&adapter->lock, flags); 1349 1350 do { 1351 /* Check if a valid interrupt is pending */ 1352 dword = RDOUTDOOR(adapter); 1353 if(dword != 0x10001234) { 1354 /* 1355 * No more pending commands 1356 */ 1357 goto out_unlock; 1358 } 1359 WROUTDOOR(adapter, 0x10001234); 1360 1361 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1362 == 0xFF) { 1363 cpu_relax(); 1364 } 1365 adapter->mbox->m_in.numstatus = 0xFF; 1366 1367 status = adapter->mbox->m_in.status; 1368 1369 /* 1370 * decrement the pending queue counter 1371 */ 1372 atomic_sub(nstatus, &adapter->pend_cmds); 1373 1374 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1375 nstatus); 1376 1377 /* Acknowledge interrupt */ 1378 WRINDOOR(adapter, 0x2); 1379 1380 handled = 1; 1381 1382 while( RDINDOOR(adapter) & 0x02 ) 1383 cpu_relax(); 1384 1385 mega_cmd_done(adapter, completed, nstatus, status); 1386 1387 mega_rundoneq(adapter); 1388 1389 /* Loop through any pending requests */ 1390 if(atomic_read(&adapter->quiescent) == 0) { 1391 mega_runpendq(adapter); 1392 } 1393 1394 } while(1); 1395 1396 out_unlock: 1397 1398 spin_unlock_irqrestore(&adapter->lock, flags); 1399 1400 return IRQ_RETVAL(handled); 1401 } 1402 /** 1403 * mega_cmd_done() 1404 * @adapter: pointer to our soft state 1405 * @completed: array of ids of completed commands 1406 * @nstatus: number of completed commands 1407 * @status: status of the last command completed 1408 * 1409 * Complete the commands and call the scsi mid-layer callback hooks. 1410 */ 1411 static void 1412 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) 1413 { 1414 mega_ext_passthru *epthru = NULL; 1415 struct scatterlist *sgl; 1416 struct scsi_cmnd *cmd = NULL; 1417 mega_passthru *pthru = NULL; 1418 mbox_t *mbox = NULL; 1419 u8 c; 1420 scb_t *scb; 1421 int islogical; 1422 int cmdid; 1423 int i; 1424 1425 /* 1426 * for all the commands completed, call the mid-layer callback routine 1427 * and free the scb. 1428 */ 1429 for( i = 0; i < nstatus; i++ ) { 1430 1431 cmdid = completed[i]; 1432 1433 /* 1434 * Only free SCBs for the commands coming down from the 1435 * mid-layer, not for which were issued internally 1436 * 1437 * For internal command, restore the status returned by the 1438 * firmware so that user can interpret it. 1439 */ 1440 if (cmdid == CMDID_INT_CMDS) { 1441 scb = &adapter->int_scb; 1442 1443 list_del_init(&scb->list); 1444 scb->state = SCB_FREE; 1445 1446 adapter->int_status = status; 1447 complete(&adapter->int_waitq); 1448 } else { 1449 scb = &adapter->scb_list[cmdid]; 1450 1451 /* 1452 * Make sure f/w has completed a valid command 1453 */ 1454 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1455 dev_crit(&adapter->dev->dev, "invalid command " 1456 "Id %d, scb->state:%x, scsi cmd:%p\n", 1457 cmdid, scb->state, scb->cmd); 1458 1459 continue; 1460 } 1461 1462 /* 1463 * Was a abort issued for this command 1464 */ 1465 if( scb->state & SCB_ABORT ) { 1466 1467 dev_warn(&adapter->dev->dev, 1468 "aborted cmd [%x] complete\n", 1469 scb->idx); 1470 1471 scb->cmd->result = (DID_ABORT << 16); 1472 1473 list_add_tail(SCSI_LIST(scb->cmd), 1474 &adapter->completed_list); 1475 1476 mega_free_scb(adapter, scb); 1477 1478 continue; 1479 } 1480 1481 /* 1482 * Was a reset issued for this command 1483 */ 1484 if( scb->state & SCB_RESET ) { 1485 1486 dev_warn(&adapter->dev->dev, 1487 "reset cmd [%x] complete\n", 1488 scb->idx); 1489 1490 scb->cmd->result = (DID_RESET << 16); 1491 1492 list_add_tail(SCSI_LIST(scb->cmd), 1493 &adapter->completed_list); 1494 1495 mega_free_scb (adapter, scb); 1496 1497 continue; 1498 } 1499 1500 cmd = scb->cmd; 1501 pthru = scb->pthru; 1502 epthru = scb->epthru; 1503 mbox = (mbox_t *)scb->raw_mbox; 1504 1505 #if MEGA_HAVE_STATS 1506 { 1507 1508 int logdrv = mbox->m_out.logdrv; 1509 1510 islogical = adapter->logdrv_chan[cmd->channel]; 1511 /* 1512 * Maintain an error counter for the logical drive. 1513 * Some application like SNMP agent need such 1514 * statistics 1515 */ 1516 if( status && islogical && (cmd->cmnd[0] == READ_6 || 1517 cmd->cmnd[0] == READ_10 || 1518 cmd->cmnd[0] == READ_12)) { 1519 /* 1520 * Logical drive number increases by 0x80 when 1521 * a logical drive is deleted 1522 */ 1523 adapter->rd_errors[logdrv%0x80]++; 1524 } 1525 1526 if( status && islogical && (cmd->cmnd[0] == WRITE_6 || 1527 cmd->cmnd[0] == WRITE_10 || 1528 cmd->cmnd[0] == WRITE_12)) { 1529 /* 1530 * Logical drive number increases by 0x80 when 1531 * a logical drive is deleted 1532 */ 1533 adapter->wr_errors[logdrv%0x80]++; 1534 } 1535 1536 } 1537 #endif 1538 } 1539 1540 /* 1541 * Do not return the presence of hard disk on the channel so, 1542 * inquiry sent, and returned data==hard disk or removable 1543 * hard disk and not logical, request should return failure! - 1544 * PJ 1545 */ 1546 islogical = adapter->logdrv_chan[cmd->device->channel]; 1547 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1548 1549 sgl = scsi_sglist(cmd); 1550 if( sg_page(sgl) ) { 1551 c = *(unsigned char *) sg_virt(&sgl[0]); 1552 } else { 1553 dev_warn(&adapter->dev->dev, "invalid sg\n"); 1554 c = 0; 1555 } 1556 1557 if(IS_RAID_CH(adapter, cmd->device->channel) && 1558 ((c & 0x1F ) == TYPE_DISK)) { 1559 status = 0xF0; 1560 } 1561 } 1562 1563 /* clear result; otherwise, success returns corrupt value */ 1564 cmd->result = 0; 1565 1566 /* Convert MegaRAID status to Linux error code */ 1567 switch (status) { 1568 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ 1569 cmd->result |= (DID_OK << 16); 1570 break; 1571 1572 case 0x02: /* ERROR_ABORTED, i.e. 1573 SCSI_STATUS_CHECK_CONDITION */ 1574 1575 /* set sense_buffer and result fields */ 1576 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || 1577 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { 1578 1579 memcpy(cmd->sense_buffer, pthru->reqsensearea, 1580 14); 1581 1582 cmd->result = SAM_STAT_CHECK_CONDITION; 1583 } 1584 else { 1585 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { 1586 1587 memcpy(cmd->sense_buffer, 1588 epthru->reqsensearea, 14); 1589 1590 cmd->result = SAM_STAT_CHECK_CONDITION; 1591 } else 1592 scsi_build_sense(cmd, 0, 1593 ABORTED_COMMAND, 0, 0); 1594 } 1595 break; 1596 1597 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. 1598 SCSI_STATUS_BUSY */ 1599 cmd->result |= (DID_BUS_BUSY << 16) | status; 1600 break; 1601 1602 default: 1603 #if MEGA_HAVE_CLUSTERING 1604 /* 1605 * If TEST_UNIT_READY fails, we know 1606 * MEGA_RESERVATION_STATUS failed 1607 */ 1608 if( cmd->cmnd[0] == TEST_UNIT_READY ) { 1609 cmd->result |= (DID_ERROR << 16) | 1610 SAM_STAT_RESERVATION_CONFLICT; 1611 } 1612 else 1613 /* 1614 * Error code returned is 1 if Reserve or Release 1615 * failed or the input parameter is invalid 1616 */ 1617 if( status == 1 && 1618 (cmd->cmnd[0] == RESERVE || 1619 cmd->cmnd[0] == RELEASE) ) { 1620 1621 cmd->result |= (DID_ERROR << 16) | 1622 SAM_STAT_RESERVATION_CONFLICT; 1623 } 1624 else 1625 #endif 1626 cmd->result |= (DID_BAD_TARGET << 16)|status; 1627 } 1628 1629 mega_free_scb(adapter, scb); 1630 1631 /* Add Scsi_Command to end of completed queue */ 1632 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1633 } 1634 } 1635 1636 1637 /* 1638 * mega_runpendq() 1639 * 1640 * Run through the list of completed requests and finish it 1641 */ 1642 static void 1643 mega_rundoneq (adapter_t *adapter) 1644 { 1645 struct scsi_cmnd *cmd; 1646 struct list_head *pos; 1647 1648 list_for_each(pos, &adapter->completed_list) { 1649 1650 struct scsi_pointer* spos = (struct scsi_pointer *)pos; 1651 1652 cmd = list_entry(spos, struct scsi_cmnd, SCp); 1653 scsi_done(cmd); 1654 } 1655 1656 INIT_LIST_HEAD(&adapter->completed_list); 1657 } 1658 1659 1660 /* 1661 * Free a SCB structure 1662 * Note: We assume the scsi commands associated with this scb is not free yet. 1663 */ 1664 static void 1665 mega_free_scb(adapter_t *adapter, scb_t *scb) 1666 { 1667 switch( scb->dma_type ) { 1668 1669 case MEGA_DMA_TYPE_NONE: 1670 break; 1671 1672 case MEGA_SGLIST: 1673 scsi_dma_unmap(scb->cmd); 1674 break; 1675 default: 1676 break; 1677 } 1678 1679 /* 1680 * Remove from the pending list 1681 */ 1682 list_del_init(&scb->list); 1683 1684 /* Link the scb back into free list */ 1685 scb->state = SCB_FREE; 1686 scb->cmd = NULL; 1687 1688 list_add(&scb->list, &adapter->free_list); 1689 } 1690 1691 1692 static int 1693 __mega_busywait_mbox (adapter_t *adapter) 1694 { 1695 volatile mbox_t *mbox = adapter->mbox; 1696 long counter; 1697 1698 for (counter = 0; counter < 10000; counter++) { 1699 if (!mbox->m_in.busy) 1700 return 0; 1701 udelay(100); 1702 cond_resched(); 1703 } 1704 return -1; /* give up after 1 second */ 1705 } 1706 1707 /* 1708 * Copies data to SGLIST 1709 * Note: For 64 bit cards, we need a minimum of one SG element for read/write 1710 */ 1711 static int 1712 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1713 { 1714 struct scatterlist *sg; 1715 struct scsi_cmnd *cmd; 1716 int sgcnt; 1717 int idx; 1718 1719 cmd = scb->cmd; 1720 1721 /* 1722 * Copy Scatter-Gather list info into controller structure. 1723 * 1724 * The number of sg elements returned must not exceed our limit 1725 */ 1726 sgcnt = scsi_dma_map(cmd); 1727 1728 scb->dma_type = MEGA_SGLIST; 1729 1730 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); 1731 1732 *len = 0; 1733 1734 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { 1735 sg = scsi_sglist(cmd); 1736 scb->dma_h_bulkdata = sg_dma_address(sg); 1737 *buf = (u32)scb->dma_h_bulkdata; 1738 *len = sg_dma_len(sg); 1739 return 0; 1740 } 1741 1742 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1743 if (adapter->has_64bit_addr) { 1744 scb->sgl64[idx].address = sg_dma_address(sg); 1745 *len += scb->sgl64[idx].length = sg_dma_len(sg); 1746 } else { 1747 scb->sgl[idx].address = sg_dma_address(sg); 1748 *len += scb->sgl[idx].length = sg_dma_len(sg); 1749 } 1750 } 1751 1752 /* Reset pointer and length fields */ 1753 *buf = scb->sgl_dma_addr; 1754 1755 /* Return count of SG requests */ 1756 return sgcnt; 1757 } 1758 1759 1760 /* 1761 * mega_8_to_40ld() 1762 * 1763 * takes all info in AdapterInquiry structure and puts it into ProductInfo and 1764 * Enquiry3 structures for later use 1765 */ 1766 static void 1767 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, 1768 mega_product_info *product_info) 1769 { 1770 int i; 1771 1772 product_info->max_commands = inquiry->adapter_info.max_commands; 1773 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; 1774 product_info->nchannels = inquiry->adapter_info.nchannels; 1775 1776 for (i = 0; i < 4; i++) { 1777 product_info->fw_version[i] = 1778 inquiry->adapter_info.fw_version[i]; 1779 1780 product_info->bios_version[i] = 1781 inquiry->adapter_info.bios_version[i]; 1782 } 1783 enquiry3->cache_flush_interval = 1784 inquiry->adapter_info.cache_flush_interval; 1785 1786 product_info->dram_size = inquiry->adapter_info.dram_size; 1787 1788 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; 1789 1790 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { 1791 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; 1792 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; 1793 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; 1794 } 1795 1796 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) 1797 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; 1798 } 1799 1800 static inline void 1801 mega_free_sgl(adapter_t *adapter) 1802 { 1803 scb_t *scb; 1804 int i; 1805 1806 for(i = 0; i < adapter->max_cmds; i++) { 1807 1808 scb = &adapter->scb_list[i]; 1809 1810 if( scb->sgl64 ) { 1811 dma_free_coherent(&adapter->dev->dev, 1812 sizeof(mega_sgl64) * adapter->sglen, 1813 scb->sgl64, scb->sgl_dma_addr); 1814 1815 scb->sgl64 = NULL; 1816 } 1817 1818 if( scb->pthru ) { 1819 dma_free_coherent(&adapter->dev->dev, 1820 sizeof(mega_passthru), scb->pthru, 1821 scb->pthru_dma_addr); 1822 1823 scb->pthru = NULL; 1824 } 1825 1826 if( scb->epthru ) { 1827 dma_free_coherent(&adapter->dev->dev, 1828 sizeof(mega_ext_passthru), 1829 scb->epthru, scb->epthru_dma_addr); 1830 1831 scb->epthru = NULL; 1832 } 1833 1834 } 1835 } 1836 1837 1838 /* 1839 * Get information about the card/driver 1840 */ 1841 const char * 1842 megaraid_info(struct Scsi_Host *host) 1843 { 1844 static char buffer[512]; 1845 adapter_t *adapter; 1846 1847 adapter = (adapter_t *)host->hostdata; 1848 1849 sprintf (buffer, 1850 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", 1851 adapter->fw_version, adapter->product_info.max_commands, 1852 adapter->host->max_id, adapter->host->max_channel, 1853 (u32)adapter->host->max_lun); 1854 return buffer; 1855 } 1856 1857 /* 1858 * Abort a previous SCSI request. Only commands on the pending list can be 1859 * aborted. All the commands issued to the F/W must complete. 1860 */ 1861 static int 1862 megaraid_abort(struct scsi_cmnd *cmd) 1863 { 1864 adapter_t *adapter; 1865 int rval; 1866 1867 adapter = (adapter_t *)cmd->device->host->hostdata; 1868 1869 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); 1870 1871 /* 1872 * This is required here to complete any completed requests 1873 * to be communicated over to the mid layer. 1874 */ 1875 mega_rundoneq(adapter); 1876 1877 return rval; 1878 } 1879 1880 1881 static int 1882 megaraid_reset(struct scsi_cmnd *cmd) 1883 { 1884 adapter_t *adapter; 1885 megacmd_t mc; 1886 int rval; 1887 1888 adapter = (adapter_t *)cmd->device->host->hostdata; 1889 1890 #if MEGA_HAVE_CLUSTERING 1891 mc.cmd = MEGA_CLUSTER_CMD; 1892 mc.opcode = MEGA_RESET_RESERVATIONS; 1893 1894 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1895 dev_warn(&adapter->dev->dev, "reservation reset failed\n"); 1896 } 1897 else { 1898 dev_info(&adapter->dev->dev, "reservation reset\n"); 1899 } 1900 #endif 1901 1902 spin_lock_irq(&adapter->lock); 1903 1904 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); 1905 1906 /* 1907 * This is required here to complete any completed requests 1908 * to be communicated over to the mid layer. 1909 */ 1910 mega_rundoneq(adapter); 1911 spin_unlock_irq(&adapter->lock); 1912 1913 return rval; 1914 } 1915 1916 /** 1917 * megaraid_abort_and_reset() 1918 * @adapter: megaraid soft state 1919 * @cmd: scsi command to be aborted or reset 1920 * @aor: abort or reset flag 1921 * 1922 * Try to locate the scsi command in the pending queue. If found and is not 1923 * issued to the controller, abort/reset it. Otherwise return failure 1924 */ 1925 static int 1926 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) 1927 { 1928 struct list_head *pos, *next; 1929 scb_t *scb; 1930 1931 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", 1932 (aor == SCB_ABORT)? "ABORTING":"RESET", 1933 cmd->cmnd[0], cmd->device->channel, 1934 cmd->device->id, (u32)cmd->device->lun); 1935 1936 if(list_empty(&adapter->pending_list)) 1937 return FAILED; 1938 1939 list_for_each_safe(pos, next, &adapter->pending_list) { 1940 1941 scb = list_entry(pos, scb_t, list); 1942 1943 if (scb->cmd == cmd) { /* Found command */ 1944 1945 scb->state |= aor; 1946 1947 /* 1948 * Check if this command has firmware ownership. If 1949 * yes, we cannot reset this command. Whenever f/w 1950 * completes this command, we will return appropriate 1951 * status from ISR. 1952 */ 1953 if( scb->state & SCB_ISSUED ) { 1954 1955 dev_warn(&adapter->dev->dev, 1956 "%s[%x], fw owner\n", 1957 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1958 scb->idx); 1959 1960 return FAILED; 1961 } 1962 else { 1963 1964 /* 1965 * Not yet issued! Remove from the pending 1966 * list 1967 */ 1968 dev_warn(&adapter->dev->dev, 1969 "%s-[%x], driver owner\n", 1970 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1971 scb->idx); 1972 1973 mega_free_scb(adapter, scb); 1974 1975 if( aor == SCB_ABORT ) { 1976 cmd->result = (DID_ABORT << 16); 1977 } 1978 else { 1979 cmd->result = (DID_RESET << 16); 1980 } 1981 1982 list_add_tail(SCSI_LIST(cmd), 1983 &adapter->completed_list); 1984 1985 return SUCCESS; 1986 } 1987 } 1988 } 1989 1990 return FAILED; 1991 } 1992 1993 static inline int 1994 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 1995 { 1996 *pdev = pci_alloc_dev(NULL); 1997 1998 if( *pdev == NULL ) return -1; 1999 2000 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 2001 2002 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { 2003 kfree(*pdev); 2004 return -1; 2005 } 2006 2007 return 0; 2008 } 2009 2010 static inline void 2011 free_local_pdev(struct pci_dev *pdev) 2012 { 2013 kfree(pdev); 2014 } 2015 2016 /** 2017 * mega_allocate_inquiry() 2018 * @dma_handle: handle returned for dma address 2019 * @pdev: handle to pci device 2020 * 2021 * allocates memory for inquiry structure 2022 */ 2023 static inline void * 2024 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) 2025 { 2026 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), 2027 dma_handle, GFP_KERNEL); 2028 } 2029 2030 2031 static inline void 2032 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) 2033 { 2034 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, 2035 dma_handle); 2036 } 2037 2038 2039 #ifdef CONFIG_PROC_FS 2040 /* Following code handles /proc fs */ 2041 2042 /** 2043 * proc_show_config() 2044 * @m: Synthetic file construction data 2045 * @v: File iterator 2046 * 2047 * Display configuration information about the controller. 2048 */ 2049 static int 2050 proc_show_config(struct seq_file *m, void *v) 2051 { 2052 2053 adapter_t *adapter = m->private; 2054 2055 seq_puts(m, MEGARAID_VERSION); 2056 if(adapter->product_info.product_name[0]) 2057 seq_printf(m, "%s\n", adapter->product_info.product_name); 2058 2059 seq_puts(m, "Controller Type: "); 2060 2061 if( adapter->flag & BOARD_MEMMAP ) 2062 seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); 2063 else 2064 seq_puts(m, "418/428/434\n"); 2065 2066 if(adapter->flag & BOARD_40LD) 2067 seq_puts(m, "Controller Supports 40 Logical Drives\n"); 2068 2069 if(adapter->flag & BOARD_64BIT) 2070 seq_puts(m, "Controller capable of 64-bit memory addressing\n"); 2071 if( adapter->has_64bit_addr ) 2072 seq_puts(m, "Controller using 64-bit memory addressing\n"); 2073 else 2074 seq_puts(m, "Controller is not using 64-bit memory addressing\n"); 2075 2076 seq_printf(m, "Base = %08lx, Irq = %d, ", 2077 adapter->base, adapter->host->irq); 2078 2079 seq_printf(m, "Logical Drives = %d, Channels = %d\n", 2080 adapter->numldrv, adapter->product_info.nchannels); 2081 2082 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", 2083 adapter->fw_version, adapter->bios_version, 2084 adapter->product_info.dram_size); 2085 2086 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", 2087 adapter->product_info.max_commands, adapter->max_cmds); 2088 2089 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); 2090 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); 2091 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); 2092 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); 2093 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); 2094 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); 2095 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); 2096 seq_printf(m, "quiescent = %d\n", 2097 atomic_read(&adapter->quiescent)); 2098 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); 2099 2100 seq_puts(m, "\nModule Parameters:\n"); 2101 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); 2102 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); 2103 return 0; 2104 } 2105 2106 /** 2107 * proc_show_stat() 2108 * @m: Synthetic file construction data 2109 * @v: File iterator 2110 * 2111 * Display statistical information about the I/O activity. 2112 */ 2113 static int 2114 proc_show_stat(struct seq_file *m, void *v) 2115 { 2116 adapter_t *adapter = m->private; 2117 #if MEGA_HAVE_STATS 2118 int i; 2119 #endif 2120 2121 seq_puts(m, "Statistical Information for this controller\n"); 2122 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); 2123 #if MEGA_HAVE_STATS 2124 for(i = 0; i < adapter->numldrv; i++) { 2125 seq_printf(m, "Logical Drive %d:\n", i); 2126 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", 2127 adapter->nreads[i], adapter->nwrites[i]); 2128 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", 2129 adapter->nreadblocks[i], adapter->nwriteblocks[i]); 2130 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", 2131 adapter->rd_errors[i], adapter->wr_errors[i]); 2132 } 2133 #else 2134 seq_puts(m, "IO and error counters not compiled in driver.\n"); 2135 #endif 2136 return 0; 2137 } 2138 2139 2140 /** 2141 * proc_show_mbox() 2142 * @m: Synthetic file construction data 2143 * @v: File iterator 2144 * 2145 * Display mailbox information for the last command issued. This information 2146 * is good for debugging. 2147 */ 2148 static int 2149 proc_show_mbox(struct seq_file *m, void *v) 2150 { 2151 adapter_t *adapter = m->private; 2152 volatile mbox_t *mbox = adapter->mbox; 2153 2154 seq_puts(m, "Contents of Mail Box Structure\n"); 2155 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); 2156 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); 2157 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); 2158 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); 2159 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); 2160 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); 2161 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); 2162 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); 2163 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); 2164 return 0; 2165 } 2166 2167 2168 /** 2169 * proc_show_rebuild_rate() 2170 * @m: Synthetic file construction data 2171 * @v: File iterator 2172 * 2173 * Display current rebuild rate 2174 */ 2175 static int 2176 proc_show_rebuild_rate(struct seq_file *m, void *v) 2177 { 2178 adapter_t *adapter = m->private; 2179 dma_addr_t dma_handle; 2180 caddr_t inquiry; 2181 struct pci_dev *pdev; 2182 2183 if( make_local_pdev(adapter, &pdev) != 0 ) 2184 return 0; 2185 2186 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2187 goto free_pdev; 2188 2189 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2190 seq_puts(m, "Adapter inquiry failed.\n"); 2191 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2192 goto free_inquiry; 2193 } 2194 2195 if( adapter->flag & BOARD_40LD ) 2196 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2197 ((mega_inquiry3 *)inquiry)->rebuild_rate); 2198 else 2199 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2200 ((mraid_ext_inquiry *) 2201 inquiry)->raid_inq.adapter_info.rebuild_rate); 2202 2203 free_inquiry: 2204 mega_free_inquiry(inquiry, dma_handle, pdev); 2205 free_pdev: 2206 free_local_pdev(pdev); 2207 return 0; 2208 } 2209 2210 2211 /** 2212 * proc_show_battery() 2213 * @m: Synthetic file construction data 2214 * @v: File iterator 2215 * 2216 * Display information about the battery module on the controller. 2217 */ 2218 static int 2219 proc_show_battery(struct seq_file *m, void *v) 2220 { 2221 adapter_t *adapter = m->private; 2222 dma_addr_t dma_handle; 2223 caddr_t inquiry; 2224 struct pci_dev *pdev; 2225 u8 battery_status; 2226 2227 if( make_local_pdev(adapter, &pdev) != 0 ) 2228 return 0; 2229 2230 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2231 goto free_pdev; 2232 2233 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2234 seq_puts(m, "Adapter inquiry failed.\n"); 2235 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2236 goto free_inquiry; 2237 } 2238 2239 if( adapter->flag & BOARD_40LD ) { 2240 battery_status = ((mega_inquiry3 *)inquiry)->battery_status; 2241 } 2242 else { 2243 battery_status = ((mraid_ext_inquiry *)inquiry)-> 2244 raid_inq.adapter_info.battery_status; 2245 } 2246 2247 /* 2248 * Decode the battery status 2249 */ 2250 seq_printf(m, "Battery Status:[%d]", battery_status); 2251 2252 if(battery_status == MEGA_BATT_CHARGE_DONE) 2253 seq_puts(m, " Charge Done"); 2254 2255 if(battery_status & MEGA_BATT_MODULE_MISSING) 2256 seq_puts(m, " Module Missing"); 2257 2258 if(battery_status & MEGA_BATT_LOW_VOLTAGE) 2259 seq_puts(m, " Low Voltage"); 2260 2261 if(battery_status & MEGA_BATT_TEMP_HIGH) 2262 seq_puts(m, " Temperature High"); 2263 2264 if(battery_status & MEGA_BATT_PACK_MISSING) 2265 seq_puts(m, " Pack Missing"); 2266 2267 if(battery_status & MEGA_BATT_CHARGE_INPROG) 2268 seq_puts(m, " Charge In-progress"); 2269 2270 if(battery_status & MEGA_BATT_CHARGE_FAIL) 2271 seq_puts(m, " Charge Fail"); 2272 2273 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) 2274 seq_puts(m, " Cycles Exceeded"); 2275 2276 seq_putc(m, '\n'); 2277 2278 free_inquiry: 2279 mega_free_inquiry(inquiry, dma_handle, pdev); 2280 free_pdev: 2281 free_local_pdev(pdev); 2282 return 0; 2283 } 2284 2285 2286 /* 2287 * Display scsi inquiry 2288 */ 2289 static void 2290 mega_print_inquiry(struct seq_file *m, char *scsi_inq) 2291 { 2292 int i; 2293 2294 seq_puts(m, " Vendor: "); 2295 seq_write(m, scsi_inq + 8, 8); 2296 seq_puts(m, " Model: "); 2297 seq_write(m, scsi_inq + 16, 16); 2298 seq_puts(m, " Rev: "); 2299 seq_write(m, scsi_inq + 32, 4); 2300 seq_putc(m, '\n'); 2301 2302 i = scsi_inq[0] & 0x1f; 2303 seq_printf(m, " Type: %s ", scsi_device_type(i)); 2304 2305 seq_printf(m, " ANSI SCSI revision: %02x", 2306 scsi_inq[2] & 0x07); 2307 2308 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) 2309 seq_puts(m, " CCS\n"); 2310 else 2311 seq_putc(m, '\n'); 2312 } 2313 2314 /** 2315 * proc_show_pdrv() 2316 * @m: Synthetic file construction data 2317 * @adapter: pointer to our soft state 2318 * @channel: channel 2319 * 2320 * Display information about the physical drives. 2321 */ 2322 static int 2323 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) 2324 { 2325 dma_addr_t dma_handle; 2326 char *scsi_inq; 2327 dma_addr_t scsi_inq_dma_handle; 2328 caddr_t inquiry; 2329 struct pci_dev *pdev; 2330 u8 *pdrv_state; 2331 u8 state; 2332 int tgt; 2333 int max_channels; 2334 int i; 2335 2336 if( make_local_pdev(adapter, &pdev) != 0 ) 2337 return 0; 2338 2339 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2340 goto free_pdev; 2341 2342 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2343 seq_puts(m, "Adapter inquiry failed.\n"); 2344 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2345 goto free_inquiry; 2346 } 2347 2348 2349 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, 2350 GFP_KERNEL); 2351 if( scsi_inq == NULL ) { 2352 seq_puts(m, "memory not available for scsi inq.\n"); 2353 goto free_inquiry; 2354 } 2355 2356 if( adapter->flag & BOARD_40LD ) { 2357 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; 2358 } 2359 else { 2360 pdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2361 raid_inq.pdrv_info.pdrv_state; 2362 } 2363 2364 max_channels = adapter->product_info.nchannels; 2365 2366 if( channel >= max_channels ) { 2367 goto free_pci; 2368 } 2369 2370 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { 2371 2372 i = channel*16 + tgt; 2373 2374 state = *(pdrv_state + i); 2375 switch( state & 0x0F ) { 2376 case PDRV_ONLINE: 2377 seq_printf(m, "Channel:%2d Id:%2d State: Online", 2378 channel, tgt); 2379 break; 2380 2381 case PDRV_FAILED: 2382 seq_printf(m, "Channel:%2d Id:%2d State: Failed", 2383 channel, tgt); 2384 break; 2385 2386 case PDRV_RBLD: 2387 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", 2388 channel, tgt); 2389 break; 2390 2391 case PDRV_HOTSPARE: 2392 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", 2393 channel, tgt); 2394 break; 2395 2396 default: 2397 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", 2398 channel, tgt); 2399 break; 2400 } 2401 2402 /* 2403 * This interface displays inquiries for disk drives 2404 * only. Inquries for logical drives and non-disk 2405 * devices are available through /proc/scsi/scsi 2406 */ 2407 memset(scsi_inq, 0, 256); 2408 if( mega_internal_dev_inquiry(adapter, channel, tgt, 2409 scsi_inq_dma_handle) || 2410 (scsi_inq[0] & 0x1F) != TYPE_DISK ) { 2411 continue; 2412 } 2413 2414 /* 2415 * Check for overflow. We print less than 240 2416 * characters for inquiry 2417 */ 2418 seq_puts(m, ".\n"); 2419 mega_print_inquiry(m, scsi_inq); 2420 } 2421 2422 free_pci: 2423 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); 2424 free_inquiry: 2425 mega_free_inquiry(inquiry, dma_handle, pdev); 2426 free_pdev: 2427 free_local_pdev(pdev); 2428 return 0; 2429 } 2430 2431 /** 2432 * proc_show_pdrv_ch0() 2433 * @m: Synthetic file construction data 2434 * @v: File iterator 2435 * 2436 * Display information about the physical drives on physical channel 0. 2437 */ 2438 static int 2439 proc_show_pdrv_ch0(struct seq_file *m, void *v) 2440 { 2441 return proc_show_pdrv(m, m->private, 0); 2442 } 2443 2444 2445 /** 2446 * proc_show_pdrv_ch1() 2447 * @m: Synthetic file construction data 2448 * @v: File iterator 2449 * 2450 * Display information about the physical drives on physical channel 1. 2451 */ 2452 static int 2453 proc_show_pdrv_ch1(struct seq_file *m, void *v) 2454 { 2455 return proc_show_pdrv(m, m->private, 1); 2456 } 2457 2458 2459 /** 2460 * proc_show_pdrv_ch2() 2461 * @m: Synthetic file construction data 2462 * @v: File iterator 2463 * 2464 * Display information about the physical drives on physical channel 2. 2465 */ 2466 static int 2467 proc_show_pdrv_ch2(struct seq_file *m, void *v) 2468 { 2469 return proc_show_pdrv(m, m->private, 2); 2470 } 2471 2472 2473 /** 2474 * proc_show_pdrv_ch3() 2475 * @m: Synthetic file construction data 2476 * @v: File iterator 2477 * 2478 * Display information about the physical drives on physical channel 3. 2479 */ 2480 static int 2481 proc_show_pdrv_ch3(struct seq_file *m, void *v) 2482 { 2483 return proc_show_pdrv(m, m->private, 3); 2484 } 2485 2486 2487 /** 2488 * proc_show_rdrv() 2489 * @m: Synthetic file construction data 2490 * @adapter: pointer to our soft state 2491 * @start: starting logical drive to display 2492 * @end: ending logical drive to display 2493 * 2494 * We do not print the inquiry information since its already available through 2495 * /proc/scsi/scsi interface 2496 */ 2497 static int 2498 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) 2499 { 2500 dma_addr_t dma_handle; 2501 logdrv_param *lparam; 2502 megacmd_t mc; 2503 char *disk_array; 2504 dma_addr_t disk_array_dma_handle; 2505 caddr_t inquiry; 2506 struct pci_dev *pdev; 2507 u8 *rdrv_state; 2508 int num_ldrv; 2509 u32 array_sz; 2510 int i; 2511 2512 if( make_local_pdev(adapter, &pdev) != 0 ) 2513 return 0; 2514 2515 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2516 goto free_pdev; 2517 2518 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2519 seq_puts(m, "Adapter inquiry failed.\n"); 2520 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2521 goto free_inquiry; 2522 } 2523 2524 memset(&mc, 0, sizeof(megacmd_t)); 2525 2526 if( adapter->flag & BOARD_40LD ) { 2527 array_sz = sizeof(disk_array_40ld); 2528 2529 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; 2530 2531 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; 2532 } 2533 else { 2534 array_sz = sizeof(disk_array_8ld); 2535 2536 rdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2537 raid_inq.logdrv_info.ldrv_state; 2538 2539 num_ldrv = ((mraid_ext_inquiry *)inquiry)-> 2540 raid_inq.logdrv_info.num_ldrv; 2541 } 2542 2543 disk_array = dma_alloc_coherent(&pdev->dev, array_sz, 2544 &disk_array_dma_handle, GFP_KERNEL); 2545 2546 if( disk_array == NULL ) { 2547 seq_puts(m, "memory not available.\n"); 2548 goto free_inquiry; 2549 } 2550 2551 mc.xferaddr = (u32)disk_array_dma_handle; 2552 2553 if( adapter->flag & BOARD_40LD ) { 2554 mc.cmd = FC_NEW_CONFIG; 2555 mc.opcode = OP_DCMD_READ_CONFIG; 2556 2557 if( mega_internal_command(adapter, &mc, NULL) ) { 2558 seq_puts(m, "40LD read config failed.\n"); 2559 goto free_pci; 2560 } 2561 2562 } 2563 else { 2564 mc.cmd = NEW_READ_CONFIG_8LD; 2565 2566 if( mega_internal_command(adapter, &mc, NULL) ) { 2567 mc.cmd = READ_CONFIG_8LD; 2568 if( mega_internal_command(adapter, &mc, NULL) ) { 2569 seq_puts(m, "8LD read config failed.\n"); 2570 goto free_pci; 2571 } 2572 } 2573 } 2574 2575 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { 2576 2577 if( adapter->flag & BOARD_40LD ) { 2578 lparam = 2579 &((disk_array_40ld *)disk_array)->ldrv[i].lparam; 2580 } 2581 else { 2582 lparam = 2583 &((disk_array_8ld *)disk_array)->ldrv[i].lparam; 2584 } 2585 2586 /* 2587 * Check for overflow. We print less than 240 characters for 2588 * information about each logical drive. 2589 */ 2590 seq_printf(m, "Logical drive:%2d:, ", i); 2591 2592 switch( rdrv_state[i] & 0x0F ) { 2593 case RDRV_OFFLINE: 2594 seq_puts(m, "state: offline"); 2595 break; 2596 case RDRV_DEGRADED: 2597 seq_puts(m, "state: degraded"); 2598 break; 2599 case RDRV_OPTIMAL: 2600 seq_puts(m, "state: optimal"); 2601 break; 2602 case RDRV_DELETED: 2603 seq_puts(m, "state: deleted"); 2604 break; 2605 default: 2606 seq_puts(m, "state: unknown"); 2607 break; 2608 } 2609 2610 /* 2611 * Check if check consistency or initialization is going on 2612 * for this logical drive. 2613 */ 2614 if( (rdrv_state[i] & 0xF0) == 0x20 ) 2615 seq_puts(m, ", check-consistency in progress"); 2616 else if( (rdrv_state[i] & 0xF0) == 0x10 ) 2617 seq_puts(m, ", initialization in progress"); 2618 2619 seq_putc(m, '\n'); 2620 2621 seq_printf(m, "Span depth:%3d, ", lparam->span_depth); 2622 seq_printf(m, "RAID level:%3d, ", lparam->level); 2623 seq_printf(m, "Stripe size:%3d, ", 2624 lparam->stripe_sz ? lparam->stripe_sz/2: 128); 2625 seq_printf(m, "Row size:%3d\n", lparam->row_size); 2626 2627 seq_puts(m, "Read Policy: "); 2628 switch(lparam->read_ahead) { 2629 case NO_READ_AHEAD: 2630 seq_puts(m, "No read ahead, "); 2631 break; 2632 case READ_AHEAD: 2633 seq_puts(m, "Read ahead, "); 2634 break; 2635 case ADAP_READ_AHEAD: 2636 seq_puts(m, "Adaptive, "); 2637 break; 2638 2639 } 2640 2641 seq_puts(m, "Write Policy: "); 2642 switch(lparam->write_mode) { 2643 case WRMODE_WRITE_THRU: 2644 seq_puts(m, "Write thru, "); 2645 break; 2646 case WRMODE_WRITE_BACK: 2647 seq_puts(m, "Write back, "); 2648 break; 2649 } 2650 2651 seq_puts(m, "Cache Policy: "); 2652 switch(lparam->direct_io) { 2653 case CACHED_IO: 2654 seq_puts(m, "Cached IO\n\n"); 2655 break; 2656 case DIRECT_IO: 2657 seq_puts(m, "Direct IO\n\n"); 2658 break; 2659 } 2660 } 2661 2662 free_pci: 2663 dma_free_coherent(&pdev->dev, array_sz, disk_array, 2664 disk_array_dma_handle); 2665 free_inquiry: 2666 mega_free_inquiry(inquiry, dma_handle, pdev); 2667 free_pdev: 2668 free_local_pdev(pdev); 2669 return 0; 2670 } 2671 2672 /** 2673 * proc_show_rdrv_10() 2674 * @m: Synthetic file construction data 2675 * @v: File iterator 2676 * 2677 * Display real time information about the logical drives 0 through 9. 2678 */ 2679 static int 2680 proc_show_rdrv_10(struct seq_file *m, void *v) 2681 { 2682 return proc_show_rdrv(m, m->private, 0, 9); 2683 } 2684 2685 2686 /** 2687 * proc_show_rdrv_20() 2688 * @m: Synthetic file construction data 2689 * @v: File iterator 2690 * 2691 * Display real time information about the logical drives 0 through 9. 2692 */ 2693 static int 2694 proc_show_rdrv_20(struct seq_file *m, void *v) 2695 { 2696 return proc_show_rdrv(m, m->private, 10, 19); 2697 } 2698 2699 2700 /** 2701 * proc_show_rdrv_30() 2702 * @m: Synthetic file construction data 2703 * @v: File iterator 2704 * 2705 * Display real time information about the logical drives 0 through 9. 2706 */ 2707 static int 2708 proc_show_rdrv_30(struct seq_file *m, void *v) 2709 { 2710 return proc_show_rdrv(m, m->private, 20, 29); 2711 } 2712 2713 2714 /** 2715 * proc_show_rdrv_40() 2716 * @m: Synthetic file construction data 2717 * @v: File iterator 2718 * 2719 * Display real time information about the logical drives 0 through 9. 2720 */ 2721 static int 2722 proc_show_rdrv_40(struct seq_file *m, void *v) 2723 { 2724 return proc_show_rdrv(m, m->private, 30, 39); 2725 } 2726 2727 /** 2728 * mega_create_proc_entry() 2729 * @index: index in soft state array 2730 * @parent: parent node for this /proc entry 2731 * 2732 * Creates /proc entries for our controllers. 2733 */ 2734 static void 2735 mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2736 { 2737 adapter_t *adapter = hba_soft_state[index]; 2738 struct proc_dir_entry *dir; 2739 u8 string[16]; 2740 2741 sprintf(string, "hba%d", adapter->host->host_no); 2742 dir = proc_mkdir_data(string, 0, parent, adapter); 2743 if (!dir) { 2744 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); 2745 return; 2746 } 2747 2748 proc_create_single_data("config", S_IRUSR, dir, 2749 proc_show_config, adapter); 2750 proc_create_single_data("stat", S_IRUSR, dir, 2751 proc_show_stat, adapter); 2752 proc_create_single_data("mailbox", S_IRUSR, dir, 2753 proc_show_mbox, adapter); 2754 #if MEGA_HAVE_ENH_PROC 2755 proc_create_single_data("rebuild-rate", S_IRUSR, dir, 2756 proc_show_rebuild_rate, adapter); 2757 proc_create_single_data("battery-status", S_IRUSR, dir, 2758 proc_show_battery, adapter); 2759 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, 2760 proc_show_pdrv_ch0, adapter); 2761 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, 2762 proc_show_pdrv_ch1, adapter); 2763 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, 2764 proc_show_pdrv_ch2, adapter); 2765 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, 2766 proc_show_pdrv_ch3, adapter); 2767 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, 2768 proc_show_rdrv_10, adapter); 2769 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, 2770 proc_show_rdrv_20, adapter); 2771 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, 2772 proc_show_rdrv_30, adapter); 2773 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, 2774 proc_show_rdrv_40, adapter); 2775 #endif 2776 } 2777 2778 #else 2779 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2780 { 2781 } 2782 #endif 2783 2784 2785 /* 2786 * megaraid_biosparam() 2787 * 2788 * Return the disk geometry for a particular disk 2789 */ 2790 static int 2791 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, 2792 sector_t capacity, int geom[]) 2793 { 2794 adapter_t *adapter; 2795 int heads; 2796 int sectors; 2797 int cylinders; 2798 2799 /* Get pointer to host config structure */ 2800 adapter = (adapter_t *)sdev->host->hostdata; 2801 2802 if (IS_RAID_CH(adapter, sdev->channel)) { 2803 /* Default heads (64) & sectors (32) */ 2804 heads = 64; 2805 sectors = 32; 2806 cylinders = (ulong)capacity / (heads * sectors); 2807 2808 /* 2809 * Handle extended translation size for logical drives 2810 * > 1Gb 2811 */ 2812 if ((ulong)capacity >= 0x200000) { 2813 heads = 255; 2814 sectors = 63; 2815 cylinders = (ulong)capacity / (heads * sectors); 2816 } 2817 2818 /* return result */ 2819 geom[0] = heads; 2820 geom[1] = sectors; 2821 geom[2] = cylinders; 2822 } 2823 else { 2824 if (scsi_partsize(bdev, capacity, geom)) 2825 return 0; 2826 2827 dev_info(&adapter->dev->dev, 2828 "invalid partition on this disk on channel %d\n", 2829 sdev->channel); 2830 2831 /* Default heads (64) & sectors (32) */ 2832 heads = 64; 2833 sectors = 32; 2834 cylinders = (ulong)capacity / (heads * sectors); 2835 2836 /* Handle extended translation size for logical drives > 1Gb */ 2837 if ((ulong)capacity >= 0x200000) { 2838 heads = 255; 2839 sectors = 63; 2840 cylinders = (ulong)capacity / (heads * sectors); 2841 } 2842 2843 /* return result */ 2844 geom[0] = heads; 2845 geom[1] = sectors; 2846 geom[2] = cylinders; 2847 } 2848 2849 return 0; 2850 } 2851 2852 /** 2853 * mega_init_scb() 2854 * @adapter: pointer to our soft state 2855 * 2856 * Allocate memory for the various pointers in the scb structures: 2857 * scatter-gather list pointer, passthru and extended passthru structure 2858 * pointers. 2859 */ 2860 static int 2861 mega_init_scb(adapter_t *adapter) 2862 { 2863 scb_t *scb; 2864 int i; 2865 2866 for( i = 0; i < adapter->max_cmds; i++ ) { 2867 2868 scb = &adapter->scb_list[i]; 2869 2870 scb->sgl64 = NULL; 2871 scb->sgl = NULL; 2872 scb->pthru = NULL; 2873 scb->epthru = NULL; 2874 } 2875 2876 for( i = 0; i < adapter->max_cmds; i++ ) { 2877 2878 scb = &adapter->scb_list[i]; 2879 2880 scb->idx = i; 2881 2882 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, 2883 sizeof(mega_sgl64) * adapter->sglen, 2884 &scb->sgl_dma_addr, GFP_KERNEL); 2885 2886 scb->sgl = (mega_sglist *)scb->sgl64; 2887 2888 if( !scb->sgl ) { 2889 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); 2890 mega_free_sgl(adapter); 2891 return -1; 2892 } 2893 2894 scb->pthru = dma_alloc_coherent(&adapter->dev->dev, 2895 sizeof(mega_passthru), 2896 &scb->pthru_dma_addr, GFP_KERNEL); 2897 2898 if( !scb->pthru ) { 2899 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); 2900 mega_free_sgl(adapter); 2901 return -1; 2902 } 2903 2904 scb->epthru = dma_alloc_coherent(&adapter->dev->dev, 2905 sizeof(mega_ext_passthru), 2906 &scb->epthru_dma_addr, GFP_KERNEL); 2907 2908 if( !scb->epthru ) { 2909 dev_warn(&adapter->dev->dev, 2910 "Can't allocate extended passthru\n"); 2911 mega_free_sgl(adapter); 2912 return -1; 2913 } 2914 2915 2916 scb->dma_type = MEGA_DMA_TYPE_NONE; 2917 2918 /* 2919 * Link to free list 2920 * lock not required since we are loading the driver, so no 2921 * commands possible right now. 2922 */ 2923 scb->state = SCB_FREE; 2924 scb->cmd = NULL; 2925 list_add(&scb->list, &adapter->free_list); 2926 } 2927 2928 return 0; 2929 } 2930 2931 2932 /** 2933 * megadev_open() 2934 * @inode: unused 2935 * @filep: unused 2936 * 2937 * Routines for the character/ioctl interface to the driver. Find out if this 2938 * is a valid open. 2939 */ 2940 static int 2941 megadev_open (struct inode *inode, struct file *filep) 2942 { 2943 /* 2944 * Only allow superuser to access private ioctl interface 2945 */ 2946 if( !capable(CAP_SYS_ADMIN) ) return -EACCES; 2947 2948 return 0; 2949 } 2950 2951 2952 /** 2953 * megadev_ioctl() 2954 * @filep: Our device file 2955 * @cmd: ioctl command 2956 * @arg: user buffer 2957 * 2958 * ioctl entry point for our private ioctl interface. We move the data in from 2959 * the user space, prepare the command (if necessary, convert the old MIMD 2960 * ioctl to new ioctl command), and issue a synchronous command to the 2961 * controller. 2962 */ 2963 static int 2964 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2965 { 2966 adapter_t *adapter; 2967 nitioctl_t uioc; 2968 int adapno; 2969 int rval; 2970 mega_passthru __user *upthru; /* user address for passthru */ 2971 mega_passthru *pthru; /* copy user passthru here */ 2972 dma_addr_t pthru_dma_hndl; 2973 void *data = NULL; /* data to be transferred */ 2974 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ 2975 megacmd_t mc; 2976 #if MEGA_HAVE_STATS 2977 megastat_t __user *ustats = NULL; 2978 int num_ldrv = 0; 2979 #endif 2980 u32 uxferaddr = 0; 2981 struct pci_dev *pdev; 2982 2983 /* 2984 * Make sure only USCSICMD are issued through this interface. 2985 * MIMD application would still fire different command. 2986 */ 2987 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { 2988 return -EINVAL; 2989 } 2990 2991 /* 2992 * Check and convert a possible MIMD command to NIT command. 2993 * mega_m_to_n() copies the data from the user space, so we do not 2994 * have to do it here. 2995 * NOTE: We will need some user address to copyout the data, therefore 2996 * the inteface layer will also provide us with the required user 2997 * addresses. 2998 */ 2999 memset(&uioc, 0, sizeof(nitioctl_t)); 3000 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) 3001 return rval; 3002 3003 3004 switch( uioc.opcode ) { 3005 3006 case GET_DRIVER_VER: 3007 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) 3008 return (-EFAULT); 3009 3010 break; 3011 3012 case GET_N_ADAP: 3013 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) 3014 return (-EFAULT); 3015 3016 /* 3017 * Shucks. MIMD interface returns a positive value for number 3018 * of adapters. TODO: Change it to return 0 when there is no 3019 * applicatio using mimd interface. 3020 */ 3021 return hba_count; 3022 3023 case GET_ADAP_INFO: 3024 3025 /* 3026 * Which adapter 3027 */ 3028 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3029 return (-ENODEV); 3030 3031 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, 3032 sizeof(struct mcontroller)) ) 3033 return (-EFAULT); 3034 break; 3035 3036 #if MEGA_HAVE_STATS 3037 3038 case GET_STATS: 3039 /* 3040 * Which adapter 3041 */ 3042 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3043 return (-ENODEV); 3044 3045 adapter = hba_soft_state[adapno]; 3046 3047 ustats = uioc.uioc_uaddr; 3048 3049 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) 3050 return (-EFAULT); 3051 3052 /* 3053 * Check for the validity of the logical drive number 3054 */ 3055 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; 3056 3057 if( copy_to_user(ustats->nreads, adapter->nreads, 3058 num_ldrv*sizeof(u32)) ) 3059 return -EFAULT; 3060 3061 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, 3062 num_ldrv*sizeof(u32)) ) 3063 return -EFAULT; 3064 3065 if( copy_to_user(ustats->nwrites, adapter->nwrites, 3066 num_ldrv*sizeof(u32)) ) 3067 return -EFAULT; 3068 3069 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, 3070 num_ldrv*sizeof(u32)) ) 3071 return -EFAULT; 3072 3073 if( copy_to_user(ustats->rd_errors, adapter->rd_errors, 3074 num_ldrv*sizeof(u32)) ) 3075 return -EFAULT; 3076 3077 if( copy_to_user(ustats->wr_errors, adapter->wr_errors, 3078 num_ldrv*sizeof(u32)) ) 3079 return -EFAULT; 3080 3081 return 0; 3082 3083 #endif 3084 case MBOX_CMD: 3085 3086 /* 3087 * Which adapter 3088 */ 3089 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3090 return (-ENODEV); 3091 3092 adapter = hba_soft_state[adapno]; 3093 3094 /* 3095 * Deletion of logical drive is a special case. The adapter 3096 * should be quiescent before this command is issued. 3097 */ 3098 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && 3099 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { 3100 3101 /* 3102 * Do we support this feature 3103 */ 3104 if( !adapter->support_random_del ) { 3105 dev_warn(&adapter->dev->dev, "logdrv " 3106 "delete on non-supporting F/W\n"); 3107 3108 return (-EINVAL); 3109 } 3110 3111 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); 3112 3113 if( rval == 0 ) { 3114 memset(&mc, 0, sizeof(megacmd_t)); 3115 3116 mc.status = rval; 3117 3118 rval = mega_n_to_m((void __user *)arg, &mc); 3119 } 3120 3121 return rval; 3122 } 3123 /* 3124 * This interface only support the regular passthru commands. 3125 * Reject extended passthru and 64-bit passthru 3126 */ 3127 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3128 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3129 3130 dev_warn(&adapter->dev->dev, "rejected passthru\n"); 3131 3132 return (-EINVAL); 3133 } 3134 3135 /* 3136 * For all internal commands, the buffer must be allocated in 3137 * <4GB address range 3138 */ 3139 if( make_local_pdev(adapter, &pdev) != 0 ) 3140 return -EIO; 3141 3142 /* Is it a passthru command or a DCMD */ 3143 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { 3144 /* Passthru commands */ 3145 3146 pthru = dma_alloc_coherent(&pdev->dev, 3147 sizeof(mega_passthru), 3148 &pthru_dma_hndl, GFP_KERNEL); 3149 3150 if( pthru == NULL ) { 3151 free_local_pdev(pdev); 3152 return (-ENOMEM); 3153 } 3154 3155 /* 3156 * The user passthru structure 3157 */ 3158 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; 3159 3160 /* 3161 * Copy in the user passthru here. 3162 */ 3163 if( copy_from_user(pthru, upthru, 3164 sizeof(mega_passthru)) ) { 3165 3166 dma_free_coherent(&pdev->dev, 3167 sizeof(mega_passthru), 3168 pthru, pthru_dma_hndl); 3169 3170 free_local_pdev(pdev); 3171 3172 return (-EFAULT); 3173 } 3174 3175 /* 3176 * Is there a data transfer 3177 */ 3178 if( pthru->dataxferlen ) { 3179 data = dma_alloc_coherent(&pdev->dev, 3180 pthru->dataxferlen, 3181 &data_dma_hndl, 3182 GFP_KERNEL); 3183 3184 if( data == NULL ) { 3185 dma_free_coherent(&pdev->dev, 3186 sizeof(mega_passthru), 3187 pthru, 3188 pthru_dma_hndl); 3189 3190 free_local_pdev(pdev); 3191 3192 return (-ENOMEM); 3193 } 3194 3195 /* 3196 * Save the user address and point the kernel 3197 * address at just allocated memory 3198 */ 3199 uxferaddr = pthru->dataxferaddr; 3200 pthru->dataxferaddr = data_dma_hndl; 3201 } 3202 3203 3204 /* 3205 * Is data coming down-stream 3206 */ 3207 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { 3208 /* 3209 * Get the user data 3210 */ 3211 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3212 pthru->dataxferlen) ) { 3213 rval = (-EFAULT); 3214 goto freemem_and_return; 3215 } 3216 } 3217 3218 memset(&mc, 0, sizeof(megacmd_t)); 3219 3220 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 3221 mc.xferaddr = (u32)pthru_dma_hndl; 3222 3223 /* 3224 * Issue the command 3225 */ 3226 mega_internal_command(adapter, &mc, pthru); 3227 3228 rval = mega_n_to_m((void __user *)arg, &mc); 3229 3230 if( rval ) goto freemem_and_return; 3231 3232 3233 /* 3234 * Is data going up-stream 3235 */ 3236 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3237 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3238 pthru->dataxferlen) ) { 3239 rval = (-EFAULT); 3240 } 3241 } 3242 3243 /* 3244 * Send the request sense data also, irrespective of 3245 * whether the user has asked for it or not. 3246 */ 3247 if (copy_to_user(upthru->reqsensearea, 3248 pthru->reqsensearea, 14)) 3249 rval = -EFAULT; 3250 3251 freemem_and_return: 3252 if( pthru->dataxferlen ) { 3253 dma_free_coherent(&pdev->dev, 3254 pthru->dataxferlen, data, 3255 data_dma_hndl); 3256 } 3257 3258 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), 3259 pthru, pthru_dma_hndl); 3260 3261 free_local_pdev(pdev); 3262 3263 return rval; 3264 } 3265 else { 3266 /* DCMD commands */ 3267 3268 /* 3269 * Is there a data transfer 3270 */ 3271 if( uioc.xferlen ) { 3272 data = dma_alloc_coherent(&pdev->dev, 3273 uioc.xferlen, 3274 &data_dma_hndl, 3275 GFP_KERNEL); 3276 3277 if( data == NULL ) { 3278 free_local_pdev(pdev); 3279 return (-ENOMEM); 3280 } 3281 3282 uxferaddr = MBOX(uioc)->xferaddr; 3283 } 3284 3285 /* 3286 * Is data coming down-stream 3287 */ 3288 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { 3289 /* 3290 * Get the user data 3291 */ 3292 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3293 uioc.xferlen) ) { 3294 3295 dma_free_coherent(&pdev->dev, 3296 uioc.xferlen, data, 3297 data_dma_hndl); 3298 3299 free_local_pdev(pdev); 3300 3301 return (-EFAULT); 3302 } 3303 } 3304 3305 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); 3306 3307 mc.xferaddr = (u32)data_dma_hndl; 3308 3309 /* 3310 * Issue the command 3311 */ 3312 mega_internal_command(adapter, &mc, NULL); 3313 3314 rval = mega_n_to_m((void __user *)arg, &mc); 3315 3316 if( rval ) { 3317 if( uioc.xferlen ) { 3318 dma_free_coherent(&pdev->dev, 3319 uioc.xferlen, data, 3320 data_dma_hndl); 3321 } 3322 3323 free_local_pdev(pdev); 3324 3325 return rval; 3326 } 3327 3328 /* 3329 * Is data going up-stream 3330 */ 3331 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3332 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3333 uioc.xferlen) ) { 3334 3335 rval = (-EFAULT); 3336 } 3337 } 3338 3339 if( uioc.xferlen ) { 3340 dma_free_coherent(&pdev->dev, uioc.xferlen, 3341 data, data_dma_hndl); 3342 } 3343 3344 free_local_pdev(pdev); 3345 3346 return rval; 3347 } 3348 3349 default: 3350 return (-EINVAL); 3351 } 3352 3353 return 0; 3354 } 3355 3356 static long 3357 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3358 { 3359 int ret; 3360 3361 mutex_lock(&megadev_mutex); 3362 ret = megadev_ioctl(filep, cmd, arg); 3363 mutex_unlock(&megadev_mutex); 3364 3365 return ret; 3366 } 3367 3368 /** 3369 * mega_m_to_n() 3370 * @arg: user address 3371 * @uioc: new ioctl structure 3372 * 3373 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl 3374 * structure 3375 * 3376 * Converts the older mimd ioctl structure to newer NIT structure 3377 */ 3378 static int 3379 mega_m_to_n(void __user *arg, nitioctl_t *uioc) 3380 { 3381 struct uioctl_t uioc_mimd; 3382 char signature[8] = {0}; 3383 u8 opcode; 3384 u8 subopcode; 3385 3386 3387 /* 3388 * check is the application conforms to NIT. We do not have to do much 3389 * in that case. 3390 * We exploit the fact that the signature is stored in the very 3391 * beginning of the structure. 3392 */ 3393 3394 if( copy_from_user(signature, arg, 7) ) 3395 return (-EFAULT); 3396 3397 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3398 3399 /* 3400 * NOTE NOTE: The nit ioctl is still under flux because of 3401 * change of mailbox definition, in HPE. No applications yet 3402 * use this interface and let's not have applications use this 3403 * interface till the new specifitions are in place. 3404 */ 3405 return -EINVAL; 3406 #if 0 3407 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) 3408 return (-EFAULT); 3409 return 0; 3410 #endif 3411 } 3412 3413 /* 3414 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t 3415 * 3416 * Get the user ioctl structure 3417 */ 3418 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) 3419 return (-EFAULT); 3420 3421 3422 /* 3423 * Get the opcode and subopcode for the commands 3424 */ 3425 opcode = uioc_mimd.ui.fcs.opcode; 3426 subopcode = uioc_mimd.ui.fcs.subopcode; 3427 3428 switch (opcode) { 3429 case 0x82: 3430 3431 switch (subopcode) { 3432 3433 case MEGAIOC_QDRVRVER: /* Query driver version */ 3434 uioc->opcode = GET_DRIVER_VER; 3435 uioc->uioc_uaddr = uioc_mimd.data; 3436 break; 3437 3438 case MEGAIOC_QNADAP: /* Get # of adapters */ 3439 uioc->opcode = GET_N_ADAP; 3440 uioc->uioc_uaddr = uioc_mimd.data; 3441 break; 3442 3443 case MEGAIOC_QADAPINFO: /* Get adapter information */ 3444 uioc->opcode = GET_ADAP_INFO; 3445 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3446 uioc->uioc_uaddr = uioc_mimd.data; 3447 break; 3448 3449 default: 3450 return(-EINVAL); 3451 } 3452 3453 break; 3454 3455 3456 case 0x81: 3457 3458 uioc->opcode = MBOX_CMD; 3459 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3460 3461 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3462 3463 uioc->xferlen = uioc_mimd.ui.fcs.length; 3464 3465 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3466 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3467 3468 break; 3469 3470 case 0x80: 3471 3472 uioc->opcode = MBOX_CMD; 3473 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3474 3475 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3476 3477 /* 3478 * Choose the xferlen bigger of input and output data 3479 */ 3480 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? 3481 uioc_mimd.outlen : uioc_mimd.inlen; 3482 3483 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3484 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3485 3486 break; 3487 3488 default: 3489 return (-EINVAL); 3490 3491 } 3492 3493 return 0; 3494 } 3495 3496 /* 3497 * mega_n_to_m() 3498 * @arg: user address 3499 * @mc: mailbox command 3500 * 3501 * Updates the status information to the application, depending on application 3502 * conforms to older mimd ioctl interface or newer NIT ioctl interface 3503 */ 3504 static int 3505 mega_n_to_m(void __user *arg, megacmd_t *mc) 3506 { 3507 nitioctl_t __user *uiocp; 3508 megacmd_t __user *umc; 3509 mega_passthru __user *upthru; 3510 struct uioctl_t __user *uioc_mimd; 3511 char signature[8] = {0}; 3512 3513 /* 3514 * check is the application conforms to NIT. 3515 */ 3516 if( copy_from_user(signature, arg, 7) ) 3517 return -EFAULT; 3518 3519 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3520 3521 uiocp = arg; 3522 3523 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) 3524 return (-EFAULT); 3525 3526 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3527 3528 umc = MBOX_P(uiocp); 3529 3530 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3531 return -EFAULT; 3532 3533 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) 3534 return (-EFAULT); 3535 } 3536 } 3537 else { 3538 uioc_mimd = arg; 3539 3540 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) 3541 return (-EFAULT); 3542 3543 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3544 3545 umc = (megacmd_t __user *)uioc_mimd->mbox; 3546 3547 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3548 return (-EFAULT); 3549 3550 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) 3551 return (-EFAULT); 3552 } 3553 } 3554 3555 return 0; 3556 } 3557 3558 3559 /* 3560 * MEGARAID 'FW' commands. 3561 */ 3562 3563 /** 3564 * mega_is_bios_enabled() 3565 * @adapter: pointer to our soft state 3566 * 3567 * issue command to find out if the BIOS is enabled for this controller 3568 */ 3569 static int 3570 mega_is_bios_enabled(adapter_t *adapter) 3571 { 3572 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3573 mbox_t *mbox; 3574 3575 mbox = (mbox_t *)raw_mbox; 3576 3577 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3578 3579 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3580 3581 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3582 3583 raw_mbox[0] = IS_BIOS_ENABLED; 3584 raw_mbox[2] = GET_BIOS; 3585 3586 issue_scb_block(adapter, raw_mbox); 3587 3588 return *(char *)adapter->mega_buffer; 3589 } 3590 3591 3592 /** 3593 * mega_enum_raid_scsi() 3594 * @adapter: pointer to our soft state 3595 * 3596 * Find out what channels are RAID/SCSI. This information is used to 3597 * differentiate the virtual channels and physical channels and to support 3598 * ROMB feature and non-disk devices. 3599 */ 3600 static void 3601 mega_enum_raid_scsi(adapter_t *adapter) 3602 { 3603 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3604 mbox_t *mbox; 3605 int i; 3606 3607 mbox = (mbox_t *)raw_mbox; 3608 3609 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3610 3611 /* 3612 * issue command to find out what channels are raid/scsi 3613 */ 3614 raw_mbox[0] = CHNL_CLASS; 3615 raw_mbox[2] = GET_CHNL_CLASS; 3616 3617 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3618 3619 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3620 3621 /* 3622 * Non-ROMB firmware fail this command, so all channels 3623 * must be shown RAID 3624 */ 3625 adapter->mega_ch_class = 0xFF; 3626 3627 if(!issue_scb_block(adapter, raw_mbox)) { 3628 adapter->mega_ch_class = *((char *)adapter->mega_buffer); 3629 3630 } 3631 3632 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3633 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3634 dev_info(&adapter->dev->dev, "channel[%d] is raid\n", 3635 i); 3636 } 3637 else { 3638 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", 3639 i); 3640 } 3641 } 3642 3643 return; 3644 } 3645 3646 3647 /** 3648 * mega_get_boot_drv() 3649 * @adapter: pointer to our soft state 3650 * 3651 * Find out which device is the boot device. Note, any logical drive or any 3652 * phyical device (e.g., a CDROM) can be designated as a boot device. 3653 */ 3654 static void 3655 mega_get_boot_drv(adapter_t *adapter) 3656 { 3657 struct private_bios_data *prv_bios_data; 3658 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3659 mbox_t *mbox; 3660 u16 cksum = 0; 3661 u8 *cksum_p; 3662 u8 boot_pdrv; 3663 int i; 3664 3665 mbox = (mbox_t *)raw_mbox; 3666 3667 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3668 3669 raw_mbox[0] = BIOS_PVT_DATA; 3670 raw_mbox[2] = GET_BIOS_PVT_DATA; 3671 3672 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3673 3674 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3675 3676 adapter->boot_ldrv_enabled = 0; 3677 adapter->boot_ldrv = 0; 3678 3679 adapter->boot_pdrv_enabled = 0; 3680 adapter->boot_pdrv_ch = 0; 3681 adapter->boot_pdrv_tgt = 0; 3682 3683 if(issue_scb_block(adapter, raw_mbox) == 0) { 3684 prv_bios_data = 3685 (struct private_bios_data *)adapter->mega_buffer; 3686 3687 cksum = 0; 3688 cksum_p = (char *)prv_bios_data; 3689 for (i = 0; i < 14; i++ ) { 3690 cksum += (u16)(*cksum_p++); 3691 } 3692 3693 if (prv_bios_data->cksum == (u16)(0-cksum) ) { 3694 3695 /* 3696 * If MSB is set, a physical drive is set as boot 3697 * device 3698 */ 3699 if( prv_bios_data->boot_drv & 0x80 ) { 3700 adapter->boot_pdrv_enabled = 1; 3701 boot_pdrv = prv_bios_data->boot_drv & 0x7F; 3702 adapter->boot_pdrv_ch = boot_pdrv / 16; 3703 adapter->boot_pdrv_tgt = boot_pdrv % 16; 3704 } 3705 else { 3706 adapter->boot_ldrv_enabled = 1; 3707 adapter->boot_ldrv = prv_bios_data->boot_drv; 3708 } 3709 } 3710 } 3711 3712 } 3713 3714 /** 3715 * mega_support_random_del() 3716 * @adapter: pointer to our soft state 3717 * 3718 * Find out if this controller supports random deletion and addition of 3719 * logical drives 3720 */ 3721 static int 3722 mega_support_random_del(adapter_t *adapter) 3723 { 3724 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3725 mbox_t *mbox; 3726 int rval; 3727 3728 mbox = (mbox_t *)raw_mbox; 3729 3730 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3731 3732 /* 3733 * issue command 3734 */ 3735 raw_mbox[0] = FC_DEL_LOGDRV; 3736 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3737 3738 rval = issue_scb_block(adapter, raw_mbox); 3739 3740 return !rval; 3741 } 3742 3743 3744 /** 3745 * mega_support_ext_cdb() 3746 * @adapter: pointer to our soft state 3747 * 3748 * Find out if this firmware support cdblen > 10 3749 */ 3750 static int 3751 mega_support_ext_cdb(adapter_t *adapter) 3752 { 3753 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3754 mbox_t *mbox; 3755 int rval; 3756 3757 mbox = (mbox_t *)raw_mbox; 3758 3759 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3760 /* 3761 * issue command to find out if controller supports extended CDBs. 3762 */ 3763 raw_mbox[0] = 0xA4; 3764 raw_mbox[2] = 0x16; 3765 3766 rval = issue_scb_block(adapter, raw_mbox); 3767 3768 return !rval; 3769 } 3770 3771 3772 /** 3773 * mega_del_logdrv() 3774 * @adapter: pointer to our soft state 3775 * @logdrv: logical drive to be deleted 3776 * 3777 * Delete the specified logical drive. It is the responsibility of the user 3778 * app to let the OS know about this operation. 3779 */ 3780 static int 3781 mega_del_logdrv(adapter_t *adapter, int logdrv) 3782 { 3783 unsigned long flags; 3784 scb_t *scb; 3785 int rval; 3786 3787 /* 3788 * Stop sending commands to the controller, queue them internally. 3789 * When deletion is complete, ISR will flush the queue. 3790 */ 3791 atomic_set(&adapter->quiescent, 1); 3792 3793 /* 3794 * Wait till all the issued commands are complete and there are no 3795 * commands in the pending queue 3796 */ 3797 while (atomic_read(&adapter->pend_cmds) > 0 || 3798 !list_empty(&adapter->pending_list)) 3799 msleep(1000); /* sleep for 1s */ 3800 3801 rval = mega_do_del_logdrv(adapter, logdrv); 3802 3803 spin_lock_irqsave(&adapter->lock, flags); 3804 3805 /* 3806 * If delete operation was successful, add 0x80 to the logical drive 3807 * ids for commands in the pending queue. 3808 */ 3809 if (adapter->read_ldidmap) { 3810 struct list_head *pos; 3811 list_for_each(pos, &adapter->pending_list) { 3812 scb = list_entry(pos, scb_t, list); 3813 if (scb->pthru->logdrv < 0x80 ) 3814 scb->pthru->logdrv += 0x80; 3815 } 3816 } 3817 3818 atomic_set(&adapter->quiescent, 0); 3819 3820 mega_runpendq(adapter); 3821 3822 spin_unlock_irqrestore(&adapter->lock, flags); 3823 3824 return rval; 3825 } 3826 3827 3828 static int 3829 mega_do_del_logdrv(adapter_t *adapter, int logdrv) 3830 { 3831 megacmd_t mc; 3832 int rval; 3833 3834 memset( &mc, 0, sizeof(megacmd_t)); 3835 3836 mc.cmd = FC_DEL_LOGDRV; 3837 mc.opcode = OP_DEL_LOGDRV; 3838 mc.subopcode = logdrv; 3839 3840 rval = mega_internal_command(adapter, &mc, NULL); 3841 3842 /* log this event */ 3843 if(rval) { 3844 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); 3845 return rval; 3846 } 3847 3848 /* 3849 * After deleting first logical drive, the logical drives must be 3850 * addressed by adding 0x80 to the logical drive id. 3851 */ 3852 adapter->read_ldidmap = 1; 3853 3854 return rval; 3855 } 3856 3857 3858 /** 3859 * mega_get_max_sgl() 3860 * @adapter: pointer to our soft state 3861 * 3862 * Find out the maximum number of scatter-gather elements supported by this 3863 * version of the firmware 3864 */ 3865 static void 3866 mega_get_max_sgl(adapter_t *adapter) 3867 { 3868 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3869 mbox_t *mbox; 3870 3871 mbox = (mbox_t *)raw_mbox; 3872 3873 memset(mbox, 0, sizeof(raw_mbox)); 3874 3875 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3876 3877 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3878 3879 raw_mbox[0] = MAIN_MISC_OPCODE; 3880 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3881 3882 3883 if( issue_scb_block(adapter, raw_mbox) ) { 3884 /* 3885 * f/w does not support this command. Choose the default value 3886 */ 3887 adapter->sglen = MIN_SGLIST; 3888 } 3889 else { 3890 adapter->sglen = *((char *)adapter->mega_buffer); 3891 3892 /* 3893 * Make sure this is not more than the resources we are 3894 * planning to allocate 3895 */ 3896 if ( adapter->sglen > MAX_SGLIST ) 3897 adapter->sglen = MAX_SGLIST; 3898 } 3899 3900 return; 3901 } 3902 3903 3904 /** 3905 * mega_support_cluster() 3906 * @adapter: pointer to our soft state 3907 * 3908 * Find out if this firmware support cluster calls. 3909 */ 3910 static int 3911 mega_support_cluster(adapter_t *adapter) 3912 { 3913 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3914 mbox_t *mbox; 3915 3916 mbox = (mbox_t *)raw_mbox; 3917 3918 memset(mbox, 0, sizeof(raw_mbox)); 3919 3920 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3921 3922 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3923 3924 /* 3925 * Try to get the initiator id. This command will succeed iff the 3926 * clustering is available on this HBA. 3927 */ 3928 raw_mbox[0] = MEGA_GET_TARGET_ID; 3929 3930 if( issue_scb_block(adapter, raw_mbox) == 0 ) { 3931 3932 /* 3933 * Cluster support available. Get the initiator target id. 3934 * Tell our id to mid-layer too. 3935 */ 3936 adapter->this_id = *(u32 *)adapter->mega_buffer; 3937 adapter->host->this_id = adapter->this_id; 3938 3939 return 1; 3940 } 3941 3942 return 0; 3943 } 3944 3945 #ifdef CONFIG_PROC_FS 3946 /** 3947 * mega_adapinq() 3948 * @adapter: pointer to our soft state 3949 * @dma_handle: DMA address of the buffer 3950 * 3951 * Issue internal commands while interrupts are available. 3952 * We only issue direct mailbox commands from within the driver. ioctl() 3953 * interface using these routines can issue passthru commands. 3954 */ 3955 static int 3956 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) 3957 { 3958 megacmd_t mc; 3959 3960 memset(&mc, 0, sizeof(megacmd_t)); 3961 3962 if( adapter->flag & BOARD_40LD ) { 3963 mc.cmd = FC_NEW_CONFIG; 3964 mc.opcode = NC_SUBOP_ENQUIRY3; 3965 mc.subopcode = ENQ3_GET_SOLICITED_FULL; 3966 } 3967 else { 3968 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; 3969 } 3970 3971 mc.xferaddr = (u32)dma_handle; 3972 3973 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { 3974 return -1; 3975 } 3976 3977 return 0; 3978 } 3979 3980 3981 /** 3982 * mega_internal_dev_inquiry() 3983 * @adapter: pointer to our soft state 3984 * @ch: channel for this device 3985 * @tgt: ID of this device 3986 * @buf_dma_handle: DMA address of the buffer 3987 * 3988 * Issue the scsi inquiry for the specified device. 3989 */ 3990 static int 3991 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, 3992 dma_addr_t buf_dma_handle) 3993 { 3994 mega_passthru *pthru; 3995 dma_addr_t pthru_dma_handle; 3996 megacmd_t mc; 3997 int rval; 3998 struct pci_dev *pdev; 3999 4000 4001 /* 4002 * For all internal commands, the buffer must be allocated in <4GB 4003 * address range 4004 */ 4005 if( make_local_pdev(adapter, &pdev) != 0 ) return -1; 4006 4007 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), 4008 &pthru_dma_handle, GFP_KERNEL); 4009 4010 if( pthru == NULL ) { 4011 free_local_pdev(pdev); 4012 return -1; 4013 } 4014 4015 pthru->timeout = 2; 4016 pthru->ars = 1; 4017 pthru->reqsenselen = 14; 4018 pthru->islogical = 0; 4019 4020 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; 4021 4022 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; 4023 4024 pthru->cdblen = 6; 4025 4026 pthru->cdb[0] = INQUIRY; 4027 pthru->cdb[1] = 0; 4028 pthru->cdb[2] = 0; 4029 pthru->cdb[3] = 0; 4030 pthru->cdb[4] = 255; 4031 pthru->cdb[5] = 0; 4032 4033 4034 pthru->dataxferaddr = (u32)buf_dma_handle; 4035 pthru->dataxferlen = 256; 4036 4037 memset(&mc, 0, sizeof(megacmd_t)); 4038 4039 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 4040 mc.xferaddr = (u32)pthru_dma_handle; 4041 4042 rval = mega_internal_command(adapter, &mc, pthru); 4043 4044 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, 4045 pthru_dma_handle); 4046 4047 free_local_pdev(pdev); 4048 4049 return rval; 4050 } 4051 #endif 4052 4053 /** 4054 * mega_internal_command() 4055 * @adapter: pointer to our soft state 4056 * @mc: the mailbox command 4057 * @pthru: Passthru structure for DCDB commands 4058 * 4059 * Issue the internal commands in interrupt mode. 4060 * The last argument is the address of the passthru structure if the command 4061 * to be fired is a passthru command 4062 * 4063 * Note: parameter 'pthru' is null for non-passthru commands. 4064 */ 4065 static int 4066 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4067 { 4068 unsigned long flags; 4069 scb_t *scb; 4070 int rval; 4071 4072 /* 4073 * The internal commands share one command id and hence are 4074 * serialized. This is so because we want to reserve maximum number of 4075 * available command ids for the I/O commands. 4076 */ 4077 mutex_lock(&adapter->int_mtx); 4078 4079 scb = &adapter->int_scb; 4080 memset(scb, 0, sizeof(scb_t)); 4081 4082 scb->idx = CMDID_INT_CMDS; 4083 scb->state |= SCB_ACTIVE | SCB_PENDQ; 4084 4085 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4086 4087 /* 4088 * Is it a passthru command 4089 */ 4090 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) 4091 scb->pthru = pthru; 4092 4093 spin_lock_irqsave(&adapter->lock, flags); 4094 list_add_tail(&scb->list, &adapter->pending_list); 4095 /* 4096 * Check if the HBA is in quiescent state, e.g., during a 4097 * delete logical drive opertion. If it is, don't run 4098 * the pending_list. 4099 */ 4100 if (atomic_read(&adapter->quiescent) == 0) 4101 mega_runpendq(adapter); 4102 spin_unlock_irqrestore(&adapter->lock, flags); 4103 4104 wait_for_completion(&adapter->int_waitq); 4105 4106 mc->status = rval = adapter->int_status; 4107 4108 /* 4109 * Print a debug message for all failed commands. Applications can use 4110 * this information. 4111 */ 4112 if (rval && trace_level) { 4113 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", 4114 mc->cmd, mc->opcode, mc->subopcode, rval); 4115 } 4116 4117 mutex_unlock(&adapter->int_mtx); 4118 return rval; 4119 } 4120 4121 static struct scsi_host_template megaraid_template = { 4122 .module = THIS_MODULE, 4123 .name = "MegaRAID", 4124 .proc_name = "megaraid_legacy", 4125 .info = megaraid_info, 4126 .queuecommand = megaraid_queue, 4127 .bios_param = megaraid_biosparam, 4128 .max_sectors = MAX_SECTORS_PER_IO, 4129 .can_queue = MAX_COMMANDS, 4130 .this_id = DEFAULT_INITIATOR_ID, 4131 .sg_tablesize = MAX_SGLIST, 4132 .cmd_per_lun = DEF_CMD_PER_LUN, 4133 .eh_abort_handler = megaraid_abort, 4134 .eh_device_reset_handler = megaraid_reset, 4135 .eh_bus_reset_handler = megaraid_reset, 4136 .eh_host_reset_handler = megaraid_reset, 4137 .no_write_same = 1, 4138 }; 4139 4140 static int 4141 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4142 { 4143 struct Scsi_Host *host; 4144 adapter_t *adapter; 4145 unsigned long mega_baseport, tbase, flag = 0; 4146 u16 subsysid, subsysvid; 4147 u8 pci_bus, pci_dev_func; 4148 int irq, i, j; 4149 int error = -ENODEV; 4150 4151 if (hba_count >= MAX_CONTROLLERS) 4152 goto out; 4153 4154 if (pci_enable_device(pdev)) 4155 goto out; 4156 pci_set_master(pdev); 4157 4158 pci_bus = pdev->bus->number; 4159 pci_dev_func = pdev->devfn; 4160 4161 /* 4162 * The megaraid3 stuff reports the ID of the Intel part which is not 4163 * remotely specific to the megaraid 4164 */ 4165 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 4166 u16 magic; 4167 /* 4168 * Don't fall over the Compaq management cards using the same 4169 * PCI identifier 4170 */ 4171 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && 4172 pdev->subsystem_device == 0xC000) 4173 goto out_disable_device; 4174 /* Now check the magic signature byte */ 4175 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); 4176 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) 4177 goto out_disable_device; 4178 /* Ok it is probably a megaraid */ 4179 } 4180 4181 /* 4182 * For these vendor and device ids, signature offsets are not 4183 * valid and 64 bit is implicit 4184 */ 4185 if (id->driver_data & BOARD_64BIT) 4186 flag |= BOARD_64BIT; 4187 else { 4188 u32 magic64; 4189 4190 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); 4191 if (magic64 == HBA_SIGNATURE_64BIT) 4192 flag |= BOARD_64BIT; 4193 } 4194 4195 subsysvid = pdev->subsystem_vendor; 4196 subsysid = pdev->subsystem_device; 4197 4198 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", 4199 id->vendor, id->device); 4200 4201 /* Read the base port and IRQ from PCI */ 4202 mega_baseport = pci_resource_start(pdev, 0); 4203 irq = pdev->irq; 4204 4205 tbase = mega_baseport; 4206 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { 4207 flag |= BOARD_MEMMAP; 4208 4209 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4210 dev_warn(&pdev->dev, "mem region busy!\n"); 4211 goto out_disable_device; 4212 } 4213 4214 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4215 if (!mega_baseport) { 4216 dev_warn(&pdev->dev, "could not map hba memory\n"); 4217 goto out_release_region; 4218 } 4219 } else { 4220 flag |= BOARD_IOMAP; 4221 mega_baseport += 0x10; 4222 4223 if (!request_region(mega_baseport, 16, "megaraid")) 4224 goto out_disable_device; 4225 } 4226 4227 /* Initialize SCSI Host structure */ 4228 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); 4229 if (!host) 4230 goto out_iounmap; 4231 4232 adapter = (adapter_t *)host->hostdata; 4233 memset(adapter, 0, sizeof(adapter_t)); 4234 4235 dev_notice(&pdev->dev, 4236 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4237 host->host_no, mega_baseport, irq); 4238 4239 adapter->base = mega_baseport; 4240 if (flag & BOARD_MEMMAP) 4241 adapter->mmio_base = (void __iomem *) mega_baseport; 4242 4243 INIT_LIST_HEAD(&adapter->free_list); 4244 INIT_LIST_HEAD(&adapter->pending_list); 4245 INIT_LIST_HEAD(&adapter->completed_list); 4246 4247 adapter->flag = flag; 4248 spin_lock_init(&adapter->lock); 4249 4250 host->cmd_per_lun = max_cmd_per_lun; 4251 host->max_sectors = max_sectors_per_io; 4252 4253 adapter->dev = pdev; 4254 adapter->host = host; 4255 4256 adapter->host->irq = irq; 4257 4258 if (flag & BOARD_MEMMAP) 4259 adapter->host->base = tbase; 4260 else { 4261 adapter->host->io_port = tbase; 4262 adapter->host->n_io_port = 16; 4263 } 4264 4265 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; 4266 4267 /* 4268 * Allocate buffer to issue internal commands. 4269 */ 4270 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, 4271 MEGA_BUFFER_SIZE, 4272 &adapter->buf_dma_handle, 4273 GFP_KERNEL); 4274 if (!adapter->mega_buffer) { 4275 dev_warn(&pdev->dev, "out of RAM\n"); 4276 goto out_host_put; 4277 } 4278 4279 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), 4280 GFP_KERNEL); 4281 if (!adapter->scb_list) { 4282 dev_warn(&pdev->dev, "out of RAM\n"); 4283 goto out_free_cmd_buffer; 4284 } 4285 4286 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4287 megaraid_isr_memmapped : megaraid_isr_iomapped, 4288 IRQF_SHARED, "megaraid", adapter)) { 4289 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); 4290 goto out_free_scb_list; 4291 } 4292 4293 if (mega_setup_mailbox(adapter)) 4294 goto out_free_irq; 4295 4296 if (mega_query_adapter(adapter)) 4297 goto out_free_mbox; 4298 4299 /* 4300 * Have checks for some buggy f/w 4301 */ 4302 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { 4303 /* 4304 * Which firmware 4305 */ 4306 if (!strcmp(adapter->fw_version, "3.00") || 4307 !strcmp(adapter->fw_version, "3.01")) { 4308 4309 dev_warn(&pdev->dev, 4310 "Your card is a Dell PERC " 4311 "2/SC RAID controller with " 4312 "firmware\nmegaraid: 3.00 or 3.01. " 4313 "This driver is known to have " 4314 "corruption issues\nmegaraid: with " 4315 "those firmware versions on this " 4316 "specific card. In order\nmegaraid: " 4317 "to protect your data, please upgrade " 4318 "your firmware to version\nmegaraid: " 4319 "3.10 or later, available from the " 4320 "Dell Technical Support web\n" 4321 "megaraid: site at\nhttp://support." 4322 "dell.com/us/en/filelib/download/" 4323 "index.asp?fileid=2940\n" 4324 ); 4325 } 4326 } 4327 4328 /* 4329 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with 4330 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit 4331 * support, since this firmware cannot handle 64 bit 4332 * addressing 4333 */ 4334 if ((subsysvid == PCI_VENDOR_ID_HP) && 4335 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { 4336 /* 4337 * which firmware 4338 */ 4339 if (!strcmp(adapter->fw_version, "H01.07") || 4340 !strcmp(adapter->fw_version, "H01.08") || 4341 !strcmp(adapter->fw_version, "H01.09") ) { 4342 dev_warn(&pdev->dev, 4343 "Firmware H.01.07, " 4344 "H.01.08, and H.01.09 on 1M/2M " 4345 "controllers\n" 4346 "do not support 64 bit " 4347 "addressing.\nDISABLING " 4348 "64 bit support.\n"); 4349 adapter->flag &= ~BOARD_64BIT; 4350 } 4351 } 4352 4353 if (mega_is_bios_enabled(adapter)) 4354 mega_hbas[hba_count].is_bios_enabled = 1; 4355 mega_hbas[hba_count].hostdata_addr = adapter; 4356 4357 /* 4358 * Find out which channel is raid and which is scsi. This is 4359 * for ROMB support. 4360 */ 4361 mega_enum_raid_scsi(adapter); 4362 4363 /* 4364 * Find out if a logical drive is set as the boot drive. If 4365 * there is one, will make that as the first logical drive. 4366 * ROMB: Do we have to boot from a physical drive. Then all 4367 * the physical drives would appear before the logical disks. 4368 * Else, all the physical drives would be exported to the mid 4369 * layer after logical drives. 4370 */ 4371 mega_get_boot_drv(adapter); 4372 4373 if (adapter->boot_pdrv_enabled) { 4374 j = adapter->product_info.nchannels; 4375 for( i = 0; i < j; i++ ) 4376 adapter->logdrv_chan[i] = 0; 4377 for( i = j; i < NVIRT_CHAN + j; i++ ) 4378 adapter->logdrv_chan[i] = 1; 4379 } else { 4380 for (i = 0; i < NVIRT_CHAN; i++) 4381 adapter->logdrv_chan[i] = 1; 4382 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) 4383 adapter->logdrv_chan[i] = 0; 4384 adapter->mega_ch_class <<= NVIRT_CHAN; 4385 } 4386 4387 /* 4388 * Do we support random deletion and addition of logical 4389 * drives 4390 */ 4391 adapter->read_ldidmap = 0; /* set it after first logdrv 4392 delete cmd */ 4393 adapter->support_random_del = mega_support_random_del(adapter); 4394 4395 /* Initialize SCBs */ 4396 if (mega_init_scb(adapter)) 4397 goto out_free_mbox; 4398 4399 /* 4400 * Reset the pending commands counter 4401 */ 4402 atomic_set(&adapter->pend_cmds, 0); 4403 4404 /* 4405 * Reset the adapter quiescent flag 4406 */ 4407 atomic_set(&adapter->quiescent, 0); 4408 4409 hba_soft_state[hba_count] = adapter; 4410 4411 /* 4412 * Fill in the structure which needs to be passed back to the 4413 * application when it does an ioctl() for controller related 4414 * information. 4415 */ 4416 i = hba_count; 4417 4418 mcontroller[i].base = mega_baseport; 4419 mcontroller[i].irq = irq; 4420 mcontroller[i].numldrv = adapter->numldrv; 4421 mcontroller[i].pcibus = pci_bus; 4422 mcontroller[i].pcidev = id->device; 4423 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); 4424 mcontroller[i].pciid = -1; 4425 mcontroller[i].pcivendor = id->vendor; 4426 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); 4427 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; 4428 4429 4430 /* Set the Mode of addressing to 64 bit if we can */ 4431 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4432 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4433 adapter->has_64bit_addr = 1; 4434 } else { 4435 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4436 adapter->has_64bit_addr = 0; 4437 } 4438 4439 mutex_init(&adapter->int_mtx); 4440 init_completion(&adapter->int_waitq); 4441 4442 adapter->this_id = DEFAULT_INITIATOR_ID; 4443 adapter->host->this_id = DEFAULT_INITIATOR_ID; 4444 4445 #if MEGA_HAVE_CLUSTERING 4446 /* 4447 * Is cluster support enabled on this controller 4448 * Note: In a cluster the HBAs ( the initiators ) will have 4449 * different target IDs and we cannot assume it to be 7. Call 4450 * to mega_support_cluster() will get the target ids also if 4451 * the cluster support is available 4452 */ 4453 adapter->has_cluster = mega_support_cluster(adapter); 4454 if (adapter->has_cluster) { 4455 dev_notice(&pdev->dev, 4456 "Cluster driver, initiator id:%d\n", 4457 adapter->this_id); 4458 } 4459 #endif 4460 4461 pci_set_drvdata(pdev, host); 4462 4463 mega_create_proc_entry(hba_count, mega_proc_dir_entry); 4464 4465 error = scsi_add_host(host, &pdev->dev); 4466 if (error) 4467 goto out_free_mbox; 4468 4469 scsi_scan_host(host); 4470 hba_count++; 4471 return 0; 4472 4473 out_free_mbox: 4474 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4475 adapter->una_mbox64, adapter->una_mbox64_dma); 4476 out_free_irq: 4477 free_irq(adapter->host->irq, adapter); 4478 out_free_scb_list: 4479 kfree(adapter->scb_list); 4480 out_free_cmd_buffer: 4481 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4482 adapter->mega_buffer, adapter->buf_dma_handle); 4483 out_host_put: 4484 scsi_host_put(host); 4485 out_iounmap: 4486 if (flag & BOARD_MEMMAP) 4487 iounmap((void *)mega_baseport); 4488 out_release_region: 4489 if (flag & BOARD_MEMMAP) 4490 release_mem_region(tbase, 128); 4491 else 4492 release_region(mega_baseport, 16); 4493 out_disable_device: 4494 pci_disable_device(pdev); 4495 out: 4496 return error; 4497 } 4498 4499 static void 4500 __megaraid_shutdown(adapter_t *adapter) 4501 { 4502 u_char raw_mbox[sizeof(struct mbox_out)]; 4503 mbox_t *mbox = (mbox_t *)raw_mbox; 4504 int i; 4505 4506 /* Flush adapter cache */ 4507 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4508 raw_mbox[0] = FLUSH_ADAPTER; 4509 4510 free_irq(adapter->host->irq, adapter); 4511 4512 /* Issue a blocking (interrupts disabled) command to the card */ 4513 issue_scb_block(adapter, raw_mbox); 4514 4515 /* Flush disks cache */ 4516 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4517 raw_mbox[0] = FLUSH_SYSTEM; 4518 4519 /* Issue a blocking (interrupts disabled) command to the card */ 4520 issue_scb_block(adapter, raw_mbox); 4521 4522 if (atomic_read(&adapter->pend_cmds) > 0) 4523 dev_warn(&adapter->dev->dev, "pending commands!!\n"); 4524 4525 /* 4526 * Have a delibrate delay to make sure all the caches are 4527 * actually flushed. 4528 */ 4529 for (i = 0; i <= 10; i++) 4530 mdelay(1000); 4531 } 4532 4533 static void 4534 megaraid_remove_one(struct pci_dev *pdev) 4535 { 4536 struct Scsi_Host *host = pci_get_drvdata(pdev); 4537 adapter_t *adapter = (adapter_t *)host->hostdata; 4538 char buf[12] = { 0 }; 4539 4540 scsi_remove_host(host); 4541 4542 __megaraid_shutdown(adapter); 4543 4544 /* Free our resources */ 4545 if (adapter->flag & BOARD_MEMMAP) { 4546 iounmap((void *)adapter->base); 4547 release_mem_region(adapter->host->base, 128); 4548 } else 4549 release_region(adapter->base, 16); 4550 4551 mega_free_sgl(adapter); 4552 4553 sprintf(buf, "hba%d", adapter->host->host_no); 4554 remove_proc_subtree(buf, mega_proc_dir_entry); 4555 4556 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4557 adapter->mega_buffer, adapter->buf_dma_handle); 4558 kfree(adapter->scb_list); 4559 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4560 adapter->una_mbox64, adapter->una_mbox64_dma); 4561 4562 scsi_host_put(host); 4563 pci_disable_device(pdev); 4564 4565 hba_count--; 4566 } 4567 4568 static void 4569 megaraid_shutdown(struct pci_dev *pdev) 4570 { 4571 struct Scsi_Host *host = pci_get_drvdata(pdev); 4572 adapter_t *adapter = (adapter_t *)host->hostdata; 4573 4574 __megaraid_shutdown(adapter); 4575 } 4576 4577 static struct pci_device_id megaraid_pci_tbl[] = { 4578 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, 4579 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4580 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, 4581 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4582 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, 4583 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4584 {0,} 4585 }; 4586 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 4587 4588 static struct pci_driver megaraid_pci_driver = { 4589 .name = "megaraid_legacy", 4590 .id_table = megaraid_pci_tbl, 4591 .probe = megaraid_probe_one, 4592 .remove = megaraid_remove_one, 4593 .shutdown = megaraid_shutdown, 4594 }; 4595 4596 static int __init megaraid_init(void) 4597 { 4598 int error; 4599 4600 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) 4601 max_cmd_per_lun = MAX_CMD_PER_LUN; 4602 if (max_mbox_busy_wait > MBOX_BUSY_WAIT) 4603 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4604 4605 #ifdef CONFIG_PROC_FS 4606 mega_proc_dir_entry = proc_mkdir("megaraid", NULL); 4607 if (!mega_proc_dir_entry) { 4608 printk(KERN_WARNING 4609 "megaraid: failed to create megaraid root\n"); 4610 } 4611 #endif 4612 error = pci_register_driver(&megaraid_pci_driver); 4613 if (error) { 4614 #ifdef CONFIG_PROC_FS 4615 remove_proc_entry("megaraid", NULL); 4616 #endif 4617 return error; 4618 } 4619 4620 /* 4621 * Register the driver as a character device, for applications 4622 * to access it for ioctls. 4623 * First argument (major) to register_chrdev implies a dynamic 4624 * major number allocation. 4625 */ 4626 major = register_chrdev(0, "megadev_legacy", &megadev_fops); 4627 if (!major) { 4628 printk(KERN_WARNING 4629 "megaraid: failed to register char device\n"); 4630 } 4631 4632 return 0; 4633 } 4634 4635 static void __exit megaraid_exit(void) 4636 { 4637 /* 4638 * Unregister the character device interface to the driver. 4639 */ 4640 unregister_chrdev(major, "megadev_legacy"); 4641 4642 pci_unregister_driver(&megaraid_pci_driver); 4643 4644 #ifdef CONFIG_PROC_FS 4645 remove_proc_entry("megaraid", NULL); 4646 #endif 4647 } 4648 4649 module_init(megaraid_init); 4650 module_exit(megaraid_exit); 4651 4652 /* vi: set ts=8 sw=8 tw=78: */ 4653