1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2002 LSI Logic Corporation. 7 * 8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 9 * - fixes 10 * - speed-ups (list handling fixes, issued_list, optimizations.) 11 * - lots of cleanups. 12 * 13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de> 14 * - new-style, hotplug-aware pci probing and scsi registration 15 * 16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju 17 * <Seokmann.Ju@lsil.com> 18 * 19 * Description: Linux device driver for LSI Logic MegaRAID controller 20 * 21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 22 * 518, 520, 531, 532 23 * 24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, 25 * and others. Please send updates to the mailing list 26 * linux-scsi@vger.kernel.org . 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/fs.h> 31 #include <linux/blkdev.h> 32 #include <linux/uaccess.h> 33 #include <asm/io.h> 34 #include <linux/completion.h> 35 #include <linux/delay.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/reboot.h> 39 #include <linux/module.h> 40 #include <linux/list.h> 41 #include <linux/interrupt.h> 42 #include <linux/pci.h> 43 #include <linux/init.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/mutex.h> 46 #include <linux/slab.h> 47 #include <scsi/scsicam.h> 48 49 #include "scsi.h" 50 #include <scsi/scsi_host.h> 51 52 #include "megaraid.h" 53 54 #define MEGARAID_MODULE_VERSION "2.00.4" 55 56 MODULE_AUTHOR ("sju@lsil.com"); 57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); 58 MODULE_LICENSE ("GPL"); 59 MODULE_VERSION(MEGARAID_MODULE_VERSION); 60 61 static DEFINE_MUTEX(megadev_mutex); 62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; 63 module_param(max_cmd_per_lun, uint, 0); 64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); 65 66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; 67 module_param(max_sectors_per_io, ushort, 0); 68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 69 70 71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; 72 module_param(max_mbox_busy_wait, ushort, 0); 73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74 75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) 76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) 77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) 78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) 79 80 /* 81 * Global variables 82 */ 83 84 static int hba_count; 85 static adapter_t *hba_soft_state[MAX_CONTROLLERS]; 86 static struct proc_dir_entry *mega_proc_dir_entry; 87 88 /* For controller re-ordering */ 89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 90 91 static long 92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 93 94 /* 95 * The File Operations structure for the serial/ioctl interface of the driver 96 */ 97 static const struct file_operations megadev_fops = { 98 .owner = THIS_MODULE, 99 .unlocked_ioctl = megadev_unlocked_ioctl, 100 .open = megadev_open, 101 .llseek = noop_llseek, 102 }; 103 104 /* 105 * Array to structures for storing the information about the controllers. This 106 * information is sent to the user level applications, when they do an ioctl 107 * for this information. 108 */ 109 static struct mcontroller mcontroller[MAX_CONTROLLERS]; 110 111 /* The current driver version */ 112 static u32 driver_ver = 0x02000000; 113 114 /* major number used by the device for character interface */ 115 static int major; 116 117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) 118 119 120 /* 121 * Debug variable to print some diagnostic messages 122 */ 123 static int trace_level; 124 125 /** 126 * mega_setup_mailbox() 127 * @adapter: pointer to our soft state 128 * 129 * Allocates a 8 byte aligned memory for the handshake mailbox. 130 */ 131 static int 132 mega_setup_mailbox(adapter_t *adapter) 133 { 134 unsigned long align; 135 136 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, 137 sizeof(mbox64_t), 138 &adapter->una_mbox64_dma, 139 GFP_KERNEL); 140 141 if( !adapter->una_mbox64 ) return -1; 142 143 adapter->mbox = &adapter->una_mbox64->mbox; 144 145 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & 146 (~0UL ^ 0xFUL)); 147 148 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); 149 150 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); 151 152 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; 153 154 /* 155 * Register the mailbox if the controller is an io-mapped controller 156 */ 157 if( adapter->flag & BOARD_IOMAP ) { 158 159 outb(adapter->mbox_dma & 0xFF, 160 adapter->host->io_port + MBOX_PORT0); 161 162 outb((adapter->mbox_dma >> 8) & 0xFF, 163 adapter->host->io_port + MBOX_PORT1); 164 165 outb((adapter->mbox_dma >> 16) & 0xFF, 166 adapter->host->io_port + MBOX_PORT2); 167 168 outb((adapter->mbox_dma >> 24) & 0xFF, 169 adapter->host->io_port + MBOX_PORT3); 170 171 outb(ENABLE_MBOX_BYTE, 172 adapter->host->io_port + ENABLE_MBOX_REGION); 173 174 irq_ack(adapter); 175 176 irq_enable(adapter); 177 } 178 179 return 0; 180 } 181 182 183 /* 184 * mega_query_adapter() 185 * @adapter - pointer to our soft state 186 * 187 * Issue the adapter inquiry commands to the controller and find out 188 * information and parameter about the devices attached 189 */ 190 static int 191 mega_query_adapter(adapter_t *adapter) 192 { 193 dma_addr_t prod_info_dma_handle; 194 mega_inquiry3 *inquiry3; 195 u8 raw_mbox[sizeof(struct mbox_out)]; 196 mbox_t *mbox; 197 int retval; 198 199 /* Initialize adapter inquiry mailbox */ 200 201 mbox = (mbox_t *)raw_mbox; 202 203 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 204 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 205 206 /* 207 * Try to issue Inquiry3 command 208 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 209 * update enquiry3 structure 210 */ 211 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 212 213 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 214 215 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 216 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ 217 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ 218 219 /* Issue a blocking command to the card */ 220 if ((retval = issue_scb_block(adapter, raw_mbox))) { 221 /* the adapter does not support 40ld */ 222 223 mraid_ext_inquiry *ext_inq; 224 mraid_inquiry *inq; 225 dma_addr_t dma_handle; 226 227 ext_inq = dma_alloc_coherent(&adapter->dev->dev, 228 sizeof(mraid_ext_inquiry), 229 &dma_handle, GFP_KERNEL); 230 231 if( ext_inq == NULL ) return -1; 232 233 inq = &ext_inq->raid_inq; 234 235 mbox->m_out.xferaddr = (u32)dma_handle; 236 237 /*issue old 0x04 command to adapter */ 238 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ; 239 240 issue_scb_block(adapter, raw_mbox); 241 242 /* 243 * update Enquiry3 and ProductInfo structures with 244 * mraid_inquiry structure 245 */ 246 mega_8_to_40ld(inq, inquiry3, 247 (mega_product_info *)&adapter->product_info); 248 249 dma_free_coherent(&adapter->dev->dev, 250 sizeof(mraid_ext_inquiry), ext_inq, 251 dma_handle); 252 253 } else { /*adapter supports 40ld */ 254 adapter->flag |= BOARD_40LD; 255 256 /* 257 * get product_info, which is static information and will be 258 * unchanged 259 */ 260 prod_info_dma_handle = dma_map_single(&adapter->dev->dev, 261 (void *)&adapter->product_info, 262 sizeof(mega_product_info), 263 DMA_FROM_DEVICE); 264 265 mbox->m_out.xferaddr = prod_info_dma_handle; 266 267 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 269 270 if ((retval = issue_scb_block(adapter, raw_mbox))) 271 dev_warn(&adapter->dev->dev, 272 "Product_info cmd failed with error: %d\n", 273 retval); 274 275 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, 276 sizeof(mega_product_info), DMA_FROM_DEVICE); 277 } 278 279 280 /* 281 * kernel scans the channels from 0 to <= max_channel 282 */ 283 adapter->host->max_channel = 284 adapter->product_info.nchannels + NVIRT_CHAN -1; 285 286 adapter->host->max_id = 16; /* max targets per channel */ 287 288 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ 289 290 adapter->host->cmd_per_lun = max_cmd_per_lun; 291 292 adapter->numldrv = inquiry3->num_ldrv; 293 294 adapter->max_cmds = adapter->product_info.max_commands; 295 296 if(adapter->max_cmds > MAX_COMMANDS) 297 adapter->max_cmds = MAX_COMMANDS; 298 299 adapter->host->can_queue = adapter->max_cmds - 1; 300 301 /* 302 * Get the maximum number of scatter-gather elements supported by this 303 * firmware 304 */ 305 mega_get_max_sgl(adapter); 306 307 adapter->host->sg_tablesize = adapter->sglen; 308 309 /* use HP firmware and bios version encoding 310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted 311 right 8 bits making them zero. This 0 value was hardcoded to fix 312 sparse warnings. */ 313 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { 314 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 315 "%c%d%d.%d%d", 316 adapter->product_info.fw_version[2], 317 0, 318 adapter->product_info.fw_version[1] & 0x0f, 319 0, 320 adapter->product_info.fw_version[0] & 0x0f); 321 snprintf(adapter->bios_version, sizeof(adapter->fw_version), 322 "%c%d%d.%d%d", 323 adapter->product_info.bios_version[2], 324 0, 325 adapter->product_info.bios_version[1] & 0x0f, 326 0, 327 adapter->product_info.bios_version[0] & 0x0f); 328 } else { 329 memcpy(adapter->fw_version, 330 (char *)adapter->product_info.fw_version, 4); 331 adapter->fw_version[4] = 0; 332 333 memcpy(adapter->bios_version, 334 (char *)adapter->product_info.bios_version, 4); 335 336 adapter->bios_version[4] = 0; 337 } 338 339 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", 340 adapter->fw_version, adapter->bios_version, adapter->numldrv); 341 342 /* 343 * Do we support extended (>10 bytes) cdbs 344 */ 345 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 346 if (adapter->support_ext_cdb) 347 dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); 348 349 350 return 0; 351 } 352 353 /** 354 * mega_runpendq() 355 * @adapter: pointer to our soft state 356 * 357 * Runs through the list of pending requests. 358 */ 359 static inline void 360 mega_runpendq(adapter_t *adapter) 361 { 362 if(!list_empty(&adapter->pending_list)) 363 __mega_runpendq(adapter); 364 } 365 366 /* 367 * megaraid_queue() 368 * @scmd - Issue this scsi command 369 * @done - the callback hook into the scsi mid-layer 370 * 371 * The command queuing entry point for the mid-layer. 372 */ 373 static int 374 megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 375 { 376 adapter_t *adapter; 377 scb_t *scb; 378 int busy=0; 379 unsigned long flags; 380 381 adapter = (adapter_t *)scmd->device->host->hostdata; 382 383 /* 384 * Allocate and build a SCB request 385 * busy flag will be set if mega_build_cmd() command could not 386 * allocate scb. We will return non-zero status in that case. 387 * NOTE: scb can be null even though certain commands completed 388 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would 389 * return 0 in that case. 390 */ 391 392 spin_lock_irqsave(&adapter->lock, flags); 393 scb = mega_build_cmd(adapter, scmd, &busy); 394 if (!scb) 395 goto out; 396 397 scb->state |= SCB_PENDQ; 398 list_add_tail(&scb->list, &adapter->pending_list); 399 400 /* 401 * Check if the HBA is in quiescent state, e.g., during a 402 * delete logical drive opertion. If it is, don't run 403 * the pending_list. 404 */ 405 if (atomic_read(&adapter->quiescent) == 0) 406 mega_runpendq(adapter); 407 408 busy = 0; 409 out: 410 spin_unlock_irqrestore(&adapter->lock, flags); 411 return busy; 412 } 413 414 static DEF_SCSI_QCMD(megaraid_queue) 415 416 /** 417 * mega_allocate_scb() 418 * @adapter: pointer to our soft state 419 * @cmd: scsi command from the mid-layer 420 * 421 * Allocate a SCB structure. This is the central structure for controller 422 * commands. 423 */ 424 static inline scb_t * 425 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) 426 { 427 struct list_head *head = &adapter->free_list; 428 scb_t *scb; 429 430 /* Unlink command from Free List */ 431 if( !list_empty(head) ) { 432 433 scb = list_entry(head->next, scb_t, list); 434 435 list_del_init(head->next); 436 437 scb->state = SCB_ACTIVE; 438 scb->cmd = cmd; 439 scb->dma_type = MEGA_DMA_TYPE_NONE; 440 441 return scb; 442 } 443 444 return NULL; 445 } 446 447 /** 448 * mega_get_ldrv_num() 449 * @adapter: pointer to our soft state 450 * @cmd: scsi mid layer command 451 * @channel: channel on the controller 452 * 453 * Calculate the logical drive number based on the information in scsi command 454 * and the channel number. 455 */ 456 static inline int 457 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) 458 { 459 int tgt; 460 int ldrv_num; 461 462 tgt = cmd->device->id; 463 464 if ( tgt > adapter->this_id ) 465 tgt--; /* we do not get inquires for initiator id */ 466 467 ldrv_num = (channel * 15) + tgt; 468 469 470 /* 471 * If we have a logical drive with boot enabled, project it first 472 */ 473 if( adapter->boot_ldrv_enabled ) { 474 if( ldrv_num == 0 ) { 475 ldrv_num = adapter->boot_ldrv; 476 } 477 else { 478 if( ldrv_num <= adapter->boot_ldrv ) { 479 ldrv_num--; 480 } 481 } 482 } 483 484 /* 485 * If "delete logical drive" feature is enabled on this controller. 486 * Do only if at least one delete logical drive operation was done. 487 * 488 * Also, after logical drive deletion, instead of logical drive number, 489 * the value returned should be 0x80+logical drive id. 490 * 491 * These is valid only for IO commands. 492 */ 493 494 if (adapter->support_random_del && adapter->read_ldidmap ) 495 switch (cmd->cmnd[0]) { 496 case READ_6: 497 case WRITE_6: 498 case READ_10: 499 case WRITE_10: 500 ldrv_num += 0x80; 501 } 502 503 return ldrv_num; 504 } 505 506 /** 507 * mega_build_cmd() 508 * @adapter: pointer to our soft state 509 * @cmd: Prepare using this scsi command 510 * @busy: busy flag if no resources 511 * 512 * Prepares a command and scatter gather list for the controller. This routine 513 * also finds out if the commands is intended for a logical drive or a 514 * physical device and prepares the controller command accordingly. 515 * 516 * We also re-order the logical drives and physical devices based on their 517 * boot settings. 518 */ 519 static scb_t * 520 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 521 { 522 mega_passthru *pthru; 523 scb_t *scb; 524 mbox_t *mbox; 525 u32 seg; 526 char islogical; 527 int max_ldrv_num; 528 int channel = 0; 529 int target = 0; 530 int ldrv_num = 0; /* logical drive number */ 531 532 /* 533 * We know what channels our logical drives are on - mega_find_card() 534 */ 535 islogical = adapter->logdrv_chan[cmd->device->channel]; 536 537 /* 538 * The theory: If physical drive is chosen for boot, all the physical 539 * devices are exported before the logical drives, otherwise physical 540 * devices are pushed after logical drives, in which case - Kernel sees 541 * the physical devices on virtual channel which is obviously converted 542 * to actual channel on the HBA. 543 */ 544 if( adapter->boot_pdrv_enabled ) { 545 if( islogical ) { 546 /* logical channel */ 547 channel = cmd->device->channel - 548 adapter->product_info.nchannels; 549 } 550 else { 551 /* this is physical channel */ 552 channel = cmd->device->channel; 553 target = cmd->device->id; 554 555 /* 556 * boot from a physical disk, that disk needs to be 557 * exposed first IF both the channels are SCSI, then 558 * booting from the second channel is not allowed. 559 */ 560 if( target == 0 ) { 561 target = adapter->boot_pdrv_tgt; 562 } 563 else if( target == adapter->boot_pdrv_tgt ) { 564 target = 0; 565 } 566 } 567 } 568 else { 569 if( islogical ) { 570 /* this is the logical channel */ 571 channel = cmd->device->channel; 572 } 573 else { 574 /* physical channel */ 575 channel = cmd->device->channel - NVIRT_CHAN; 576 target = cmd->device->id; 577 } 578 } 579 580 581 if(islogical) { 582 583 /* have just LUN 0 for each target on virtual channels */ 584 if (cmd->device->lun) { 585 cmd->result = (DID_BAD_TARGET << 16); 586 scsi_done(cmd); 587 return NULL; 588 } 589 590 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); 591 592 593 max_ldrv_num = (adapter->flag & BOARD_40LD) ? 594 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; 595 596 /* 597 * max_ldrv_num increases by 0x80 if some logical drive was 598 * deleted. 599 */ 600 if(adapter->read_ldidmap) 601 max_ldrv_num += 0x80; 602 603 if(ldrv_num > max_ldrv_num ) { 604 cmd->result = (DID_BAD_TARGET << 16); 605 scsi_done(cmd); 606 return NULL; 607 } 608 609 } 610 else { 611 if( cmd->device->lun > 7) { 612 /* 613 * Do not support lun >7 for physically accessed 614 * devices 615 */ 616 cmd->result = (DID_BAD_TARGET << 16); 617 scsi_done(cmd); 618 return NULL; 619 } 620 } 621 622 /* 623 * 624 * Logical drive commands 625 * 626 */ 627 if(islogical) { 628 switch (cmd->cmnd[0]) { 629 case TEST_UNIT_READY: 630 #if MEGA_HAVE_CLUSTERING 631 /* 632 * Do we support clustering and is the support enabled 633 * If no, return success always 634 */ 635 if( !adapter->has_cluster ) { 636 cmd->result = (DID_OK << 16); 637 scsi_done(cmd); 638 return NULL; 639 } 640 641 if(!(scb = mega_allocate_scb(adapter, cmd))) { 642 *busy = 1; 643 return NULL; 644 } 645 646 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 647 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; 648 scb->raw_mbox[3] = ldrv_num; 649 650 scb->dma_direction = DMA_NONE; 651 652 return scb; 653 #else 654 cmd->result = (DID_OK << 16); 655 scsi_done(cmd); 656 return NULL; 657 #endif 658 659 case MODE_SENSE: { 660 char *buf; 661 struct scatterlist *sg; 662 663 sg = scsi_sglist(cmd); 664 buf = kmap_atomic(sg_page(sg)) + sg->offset; 665 666 memset(buf, 0, cmd->cmnd[4]); 667 kunmap_atomic(buf - sg->offset); 668 669 cmd->result = (DID_OK << 16); 670 scsi_done(cmd); 671 return NULL; 672 } 673 674 case READ_CAPACITY: 675 case INQUIRY: 676 677 if(!(adapter->flag & (1L << cmd->device->channel))) { 678 679 dev_notice(&adapter->dev->dev, 680 "scsi%d: scanning scsi channel %d " 681 "for logical drives\n", 682 adapter->host->host_no, 683 cmd->device->channel); 684 685 adapter->flag |= (1L << cmd->device->channel); 686 } 687 688 /* Allocate a SCB and initialize passthru */ 689 if(!(scb = mega_allocate_scb(adapter, cmd))) { 690 *busy = 1; 691 return NULL; 692 } 693 pthru = scb->pthru; 694 695 mbox = (mbox_t *)scb->raw_mbox; 696 memset(mbox, 0, sizeof(scb->raw_mbox)); 697 memset(pthru, 0, sizeof(mega_passthru)); 698 699 pthru->timeout = 0; 700 pthru->ars = 1; 701 pthru->reqsenselen = 14; 702 pthru->islogical = 1; 703 pthru->logdrv = ldrv_num; 704 pthru->cdblen = cmd->cmd_len; 705 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 706 707 if( adapter->has_64bit_addr ) { 708 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 709 } 710 else { 711 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 712 } 713 714 scb->dma_direction = DMA_FROM_DEVICE; 715 716 pthru->numsgelements = mega_build_sglist(adapter, scb, 717 &pthru->dataxferaddr, &pthru->dataxferlen); 718 719 mbox->m_out.xferaddr = scb->pthru_dma_addr; 720 721 return scb; 722 723 case READ_6: 724 case WRITE_6: 725 case READ_10: 726 case WRITE_10: 727 case READ_12: 728 case WRITE_12: 729 730 /* Allocate a SCB and initialize mailbox */ 731 if(!(scb = mega_allocate_scb(adapter, cmd))) { 732 *busy = 1; 733 return NULL; 734 } 735 mbox = (mbox_t *)scb->raw_mbox; 736 737 memset(mbox, 0, sizeof(scb->raw_mbox)); 738 mbox->m_out.logdrv = ldrv_num; 739 740 /* 741 * A little hack: 2nd bit is zero for all scsi read 742 * commands and is set for all scsi write commands 743 */ 744 if( adapter->has_64bit_addr ) { 745 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 746 MEGA_MBOXCMD_LWRITE64: 747 MEGA_MBOXCMD_LREAD64 ; 748 } 749 else { 750 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 751 MEGA_MBOXCMD_LWRITE: 752 MEGA_MBOXCMD_LREAD ; 753 } 754 755 /* 756 * 6-byte READ(0x08) or WRITE(0x0A) cdb 757 */ 758 if( cmd->cmd_len == 6 ) { 759 mbox->m_out.numsectors = (u32) cmd->cmnd[4]; 760 mbox->m_out.lba = 761 ((u32)cmd->cmnd[1] << 16) | 762 ((u32)cmd->cmnd[2] << 8) | 763 (u32)cmd->cmnd[3]; 764 765 mbox->m_out.lba &= 0x1FFFFF; 766 767 #if MEGA_HAVE_STATS 768 /* 769 * Take modulo 0x80, since the logical drive 770 * number increases by 0x80 when a logical 771 * drive was deleted 772 */ 773 if (*cmd->cmnd == READ_6) { 774 adapter->nreads[ldrv_num%0x80]++; 775 adapter->nreadblocks[ldrv_num%0x80] += 776 mbox->m_out.numsectors; 777 } else { 778 adapter->nwrites[ldrv_num%0x80]++; 779 adapter->nwriteblocks[ldrv_num%0x80] += 780 mbox->m_out.numsectors; 781 } 782 #endif 783 } 784 785 /* 786 * 10-byte READ(0x28) or WRITE(0x2A) cdb 787 */ 788 if( cmd->cmd_len == 10 ) { 789 mbox->m_out.numsectors = 790 (u32)cmd->cmnd[8] | 791 ((u32)cmd->cmnd[7] << 8); 792 mbox->m_out.lba = 793 ((u32)cmd->cmnd[2] << 24) | 794 ((u32)cmd->cmnd[3] << 16) | 795 ((u32)cmd->cmnd[4] << 8) | 796 (u32)cmd->cmnd[5]; 797 798 #if MEGA_HAVE_STATS 799 if (*cmd->cmnd == READ_10) { 800 adapter->nreads[ldrv_num%0x80]++; 801 adapter->nreadblocks[ldrv_num%0x80] += 802 mbox->m_out.numsectors; 803 } else { 804 adapter->nwrites[ldrv_num%0x80]++; 805 adapter->nwriteblocks[ldrv_num%0x80] += 806 mbox->m_out.numsectors; 807 } 808 #endif 809 } 810 811 /* 812 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 813 */ 814 if( cmd->cmd_len == 12 ) { 815 mbox->m_out.lba = 816 ((u32)cmd->cmnd[2] << 24) | 817 ((u32)cmd->cmnd[3] << 16) | 818 ((u32)cmd->cmnd[4] << 8) | 819 (u32)cmd->cmnd[5]; 820 821 mbox->m_out.numsectors = 822 ((u32)cmd->cmnd[6] << 24) | 823 ((u32)cmd->cmnd[7] << 16) | 824 ((u32)cmd->cmnd[8] << 8) | 825 (u32)cmd->cmnd[9]; 826 827 #if MEGA_HAVE_STATS 828 if (*cmd->cmnd == READ_12) { 829 adapter->nreads[ldrv_num%0x80]++; 830 adapter->nreadblocks[ldrv_num%0x80] += 831 mbox->m_out.numsectors; 832 } else { 833 adapter->nwrites[ldrv_num%0x80]++; 834 adapter->nwriteblocks[ldrv_num%0x80] += 835 mbox->m_out.numsectors; 836 } 837 #endif 838 } 839 840 /* 841 * If it is a read command 842 */ 843 if( (*cmd->cmnd & 0x0F) == 0x08 ) { 844 scb->dma_direction = DMA_FROM_DEVICE; 845 } 846 else { 847 scb->dma_direction = DMA_TO_DEVICE; 848 } 849 850 /* Calculate Scatter-Gather info */ 851 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 852 (u32 *)&mbox->m_out.xferaddr, &seg); 853 854 return scb; 855 856 #if MEGA_HAVE_CLUSTERING 857 case RESERVE: 858 case RELEASE: 859 860 /* 861 * Do we support clustering and is the support enabled 862 */ 863 if( ! adapter->has_cluster ) { 864 865 cmd->result = (DID_BAD_TARGET << 16); 866 scsi_done(cmd); 867 return NULL; 868 } 869 870 /* Allocate a SCB and initialize mailbox */ 871 if(!(scb = mega_allocate_scb(adapter, cmd))) { 872 *busy = 1; 873 return NULL; 874 } 875 876 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 877 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? 878 MEGA_RESERVE_LD : MEGA_RELEASE_LD; 879 880 scb->raw_mbox[3] = ldrv_num; 881 882 scb->dma_direction = DMA_NONE; 883 884 return scb; 885 #endif 886 887 default: 888 cmd->result = (DID_BAD_TARGET << 16); 889 scsi_done(cmd); 890 return NULL; 891 } 892 } 893 894 /* 895 * Passthru drive commands 896 */ 897 else { 898 /* Allocate a SCB and initialize passthru */ 899 if(!(scb = mega_allocate_scb(adapter, cmd))) { 900 *busy = 1; 901 return NULL; 902 } 903 904 mbox = (mbox_t *)scb->raw_mbox; 905 memset(mbox, 0, sizeof(scb->raw_mbox)); 906 907 if( adapter->support_ext_cdb ) { 908 909 mega_prepare_extpassthru(adapter, scb, cmd, 910 channel, target); 911 912 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; 913 914 mbox->m_out.xferaddr = scb->epthru_dma_addr; 915 916 } 917 else { 918 919 pthru = mega_prepare_passthru(adapter, scb, cmd, 920 channel, target); 921 922 /* Initialize mailbox */ 923 if( adapter->has_64bit_addr ) { 924 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 925 } 926 else { 927 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 928 } 929 930 mbox->m_out.xferaddr = scb->pthru_dma_addr; 931 932 } 933 return scb; 934 } 935 return NULL; 936 } 937 938 939 /** 940 * mega_prepare_passthru() 941 * @adapter: pointer to our soft state 942 * @scb: our scsi control block 943 * @cmd: scsi command from the mid-layer 944 * @channel: actual channel on the controller 945 * @target: actual id on the controller. 946 * 947 * prepare a command for the scsi physical devices. 948 */ 949 static mega_passthru * 950 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, 951 int channel, int target) 952 { 953 mega_passthru *pthru; 954 955 pthru = scb->pthru; 956 memset(pthru, 0, sizeof (mega_passthru)); 957 958 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 959 pthru->timeout = 2; 960 961 pthru->ars = 1; 962 pthru->reqsenselen = 14; 963 pthru->islogical = 0; 964 965 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 966 967 pthru->target = (adapter->flag & BOARD_40LD) ? 968 (channel << 4) | target : target; 969 970 pthru->cdblen = cmd->cmd_len; 971 pthru->logdrv = cmd->device->lun; 972 973 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 974 975 /* Not sure about the direction */ 976 scb->dma_direction = DMA_BIDIRECTIONAL; 977 978 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ 979 switch (cmd->cmnd[0]) { 980 case INQUIRY: 981 case READ_CAPACITY: 982 if(!(adapter->flag & (1L << cmd->device->channel))) { 983 984 dev_notice(&adapter->dev->dev, 985 "scsi%d: scanning scsi channel %d [P%d] " 986 "for physical devices\n", 987 adapter->host->host_no, 988 cmd->device->channel, channel); 989 990 adapter->flag |= (1L << cmd->device->channel); 991 } 992 fallthrough; 993 default: 994 pthru->numsgelements = mega_build_sglist(adapter, scb, 995 &pthru->dataxferaddr, &pthru->dataxferlen); 996 break; 997 } 998 return pthru; 999 } 1000 1001 1002 /** 1003 * mega_prepare_extpassthru() 1004 * @adapter: pointer to our soft state 1005 * @scb: our scsi control block 1006 * @cmd: scsi command from the mid-layer 1007 * @channel: actual channel on the controller 1008 * @target: actual id on the controller. 1009 * 1010 * prepare a command for the scsi physical devices. This rountine prepares 1011 * commands for devices which can take extended CDBs (>10 bytes) 1012 */ 1013 static mega_ext_passthru * 1014 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, 1015 struct scsi_cmnd *cmd, 1016 int channel, int target) 1017 { 1018 mega_ext_passthru *epthru; 1019 1020 epthru = scb->epthru; 1021 memset(epthru, 0, sizeof(mega_ext_passthru)); 1022 1023 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 1024 epthru->timeout = 2; 1025 1026 epthru->ars = 1; 1027 epthru->reqsenselen = 14; 1028 epthru->islogical = 0; 1029 1030 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 1031 epthru->target = (adapter->flag & BOARD_40LD) ? 1032 (channel << 4) | target : target; 1033 1034 epthru->cdblen = cmd->cmd_len; 1035 epthru->logdrv = cmd->device->lun; 1036 1037 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); 1038 1039 /* Not sure about the direction */ 1040 scb->dma_direction = DMA_BIDIRECTIONAL; 1041 1042 switch(cmd->cmnd[0]) { 1043 case INQUIRY: 1044 case READ_CAPACITY: 1045 if(!(adapter->flag & (1L << cmd->device->channel))) { 1046 1047 dev_notice(&adapter->dev->dev, 1048 "scsi%d: scanning scsi channel %d [P%d] " 1049 "for physical devices\n", 1050 adapter->host->host_no, 1051 cmd->device->channel, channel); 1052 1053 adapter->flag |= (1L << cmd->device->channel); 1054 } 1055 fallthrough; 1056 default: 1057 epthru->numsgelements = mega_build_sglist(adapter, scb, 1058 &epthru->dataxferaddr, &epthru->dataxferlen); 1059 break; 1060 } 1061 1062 return epthru; 1063 } 1064 1065 static void 1066 __mega_runpendq(adapter_t *adapter) 1067 { 1068 scb_t *scb; 1069 struct list_head *pos, *next; 1070 1071 /* Issue any pending commands to the card */ 1072 list_for_each_safe(pos, next, &adapter->pending_list) { 1073 1074 scb = list_entry(pos, scb_t, list); 1075 1076 if( !(scb->state & SCB_ISSUED) ) { 1077 1078 if( issue_scb(adapter, scb) != 0 ) 1079 return; 1080 } 1081 } 1082 1083 return; 1084 } 1085 1086 1087 /** 1088 * issue_scb() 1089 * @adapter: pointer to our soft state 1090 * @scb: scsi control block 1091 * 1092 * Post a command to the card if the mailbox is available, otherwise return 1093 * busy. We also take the scb from the pending list if the mailbox is 1094 * available. 1095 */ 1096 static int 1097 issue_scb(adapter_t *adapter, scb_t *scb) 1098 { 1099 volatile mbox64_t *mbox64 = adapter->mbox64; 1100 volatile mbox_t *mbox = adapter->mbox; 1101 unsigned int i = 0; 1102 1103 if(unlikely(mbox->m_in.busy)) { 1104 do { 1105 udelay(1); 1106 i++; 1107 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); 1108 1109 if(mbox->m_in.busy) return -1; 1110 } 1111 1112 /* Copy mailbox data into host structure */ 1113 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, 1114 sizeof(struct mbox_out)); 1115 1116 mbox->m_out.cmdid = scb->idx; /* Set cmdid */ 1117 mbox->m_in.busy = 1; /* Set busy */ 1118 1119 1120 /* 1121 * Increment the pending queue counter 1122 */ 1123 atomic_inc(&adapter->pend_cmds); 1124 1125 switch (mbox->m_out.cmd) { 1126 case MEGA_MBOXCMD_LREAD64: 1127 case MEGA_MBOXCMD_LWRITE64: 1128 case MEGA_MBOXCMD_PASSTHRU64: 1129 case MEGA_MBOXCMD_EXTPTHRU: 1130 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1131 mbox64->xfer_segment_hi = 0; 1132 mbox->m_out.xferaddr = 0xFFFFFFFF; 1133 break; 1134 default: 1135 mbox64->xfer_segment_lo = 0; 1136 mbox64->xfer_segment_hi = 0; 1137 } 1138 1139 /* 1140 * post the command 1141 */ 1142 scb->state |= SCB_ISSUED; 1143 1144 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1145 mbox->m_in.poll = 0; 1146 mbox->m_in.ack = 0; 1147 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1148 } 1149 else { 1150 irq_enable(adapter); 1151 issue_command(adapter); 1152 } 1153 1154 return 0; 1155 } 1156 1157 /* 1158 * Wait until the controller's mailbox is available 1159 */ 1160 static inline int 1161 mega_busywait_mbox (adapter_t *adapter) 1162 { 1163 if (adapter->mbox->m_in.busy) 1164 return __mega_busywait_mbox(adapter); 1165 return 0; 1166 } 1167 1168 /** 1169 * issue_scb_block() 1170 * @adapter: pointer to our soft state 1171 * @raw_mbox: the mailbox 1172 * 1173 * Issue a scb in synchronous and non-interrupt mode 1174 */ 1175 static int 1176 issue_scb_block(adapter_t *adapter, u_char *raw_mbox) 1177 { 1178 volatile mbox64_t *mbox64 = adapter->mbox64; 1179 volatile mbox_t *mbox = adapter->mbox; 1180 u8 byte; 1181 1182 /* Wait until mailbox is free */ 1183 if(mega_busywait_mbox (adapter)) 1184 goto bug_blocked_mailbox; 1185 1186 /* Copy mailbox data into host structure */ 1187 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); 1188 mbox->m_out.cmdid = 0xFE; 1189 mbox->m_in.busy = 1; 1190 1191 switch (raw_mbox[0]) { 1192 case MEGA_MBOXCMD_LREAD64: 1193 case MEGA_MBOXCMD_LWRITE64: 1194 case MEGA_MBOXCMD_PASSTHRU64: 1195 case MEGA_MBOXCMD_EXTPTHRU: 1196 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1197 mbox64->xfer_segment_hi = 0; 1198 mbox->m_out.xferaddr = 0xFFFFFFFF; 1199 break; 1200 default: 1201 mbox64->xfer_segment_lo = 0; 1202 mbox64->xfer_segment_hi = 0; 1203 } 1204 1205 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1206 mbox->m_in.poll = 0; 1207 mbox->m_in.ack = 0; 1208 mbox->m_in.numstatus = 0xFF; 1209 mbox->m_in.status = 0xFF; 1210 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1211 1212 while((volatile u8)mbox->m_in.numstatus == 0xFF) 1213 cpu_relax(); 1214 1215 mbox->m_in.numstatus = 0xFF; 1216 1217 while( (volatile u8)mbox->m_in.poll != 0x77 ) 1218 cpu_relax(); 1219 1220 mbox->m_in.poll = 0; 1221 mbox->m_in.ack = 0x77; 1222 1223 WRINDOOR(adapter, adapter->mbox_dma | 0x2); 1224 1225 while(RDINDOOR(adapter) & 0x2) 1226 cpu_relax(); 1227 } 1228 else { 1229 irq_disable(adapter); 1230 issue_command(adapter); 1231 1232 while (!((byte = irq_state(adapter)) & INTR_VALID)) 1233 cpu_relax(); 1234 1235 set_irq_state(adapter, byte); 1236 irq_enable(adapter); 1237 irq_ack(adapter); 1238 } 1239 1240 return mbox->m_in.status; 1241 1242 bug_blocked_mailbox: 1243 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); 1244 udelay (1000); 1245 return -1; 1246 } 1247 1248 1249 /** 1250 * megaraid_isr_iomapped() 1251 * @irq: irq 1252 * @devp: pointer to our soft state 1253 * 1254 * Interrupt service routine for io-mapped controllers. 1255 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1256 * and service the completed commands. 1257 */ 1258 static irqreturn_t 1259 megaraid_isr_iomapped(int irq, void *devp) 1260 { 1261 adapter_t *adapter = devp; 1262 unsigned long flags; 1263 u8 status; 1264 u8 nstatus; 1265 u8 completed[MAX_FIRMWARE_STATUS]; 1266 u8 byte; 1267 int handled = 0; 1268 1269 1270 /* 1271 * loop till F/W has more commands for us to complete. 1272 */ 1273 spin_lock_irqsave(&adapter->lock, flags); 1274 1275 do { 1276 /* Check if a valid interrupt is pending */ 1277 byte = irq_state(adapter); 1278 if( (byte & VALID_INTR_BYTE) == 0 ) { 1279 /* 1280 * No more pending commands 1281 */ 1282 goto out_unlock; 1283 } 1284 set_irq_state(adapter, byte); 1285 1286 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1287 == 0xFF) 1288 cpu_relax(); 1289 adapter->mbox->m_in.numstatus = 0xFF; 1290 1291 status = adapter->mbox->m_in.status; 1292 1293 /* 1294 * decrement the pending queue counter 1295 */ 1296 atomic_sub(nstatus, &adapter->pend_cmds); 1297 1298 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1299 nstatus); 1300 1301 /* Acknowledge interrupt */ 1302 irq_ack(adapter); 1303 1304 mega_cmd_done(adapter, completed, nstatus, status); 1305 1306 mega_rundoneq(adapter); 1307 1308 handled = 1; 1309 1310 /* Loop through any pending requests */ 1311 if(atomic_read(&adapter->quiescent) == 0) { 1312 mega_runpendq(adapter); 1313 } 1314 1315 } while(1); 1316 1317 out_unlock: 1318 1319 spin_unlock_irqrestore(&adapter->lock, flags); 1320 1321 return IRQ_RETVAL(handled); 1322 } 1323 1324 1325 /** 1326 * megaraid_isr_memmapped() 1327 * @irq: irq 1328 * @devp: pointer to our soft state 1329 * 1330 * Interrupt service routine for memory-mapped controllers. 1331 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1332 * and service the completed commands. 1333 */ 1334 static irqreturn_t 1335 megaraid_isr_memmapped(int irq, void *devp) 1336 { 1337 adapter_t *adapter = devp; 1338 unsigned long flags; 1339 u8 status; 1340 u32 dword = 0; 1341 u8 nstatus; 1342 u8 completed[MAX_FIRMWARE_STATUS]; 1343 int handled = 0; 1344 1345 1346 /* 1347 * loop till F/W has more commands for us to complete. 1348 */ 1349 spin_lock_irqsave(&adapter->lock, flags); 1350 1351 do { 1352 /* Check if a valid interrupt is pending */ 1353 dword = RDOUTDOOR(adapter); 1354 if(dword != 0x10001234) { 1355 /* 1356 * No more pending commands 1357 */ 1358 goto out_unlock; 1359 } 1360 WROUTDOOR(adapter, 0x10001234); 1361 1362 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1363 == 0xFF) { 1364 cpu_relax(); 1365 } 1366 adapter->mbox->m_in.numstatus = 0xFF; 1367 1368 status = adapter->mbox->m_in.status; 1369 1370 /* 1371 * decrement the pending queue counter 1372 */ 1373 atomic_sub(nstatus, &adapter->pend_cmds); 1374 1375 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1376 nstatus); 1377 1378 /* Acknowledge interrupt */ 1379 WRINDOOR(adapter, 0x2); 1380 1381 handled = 1; 1382 1383 while( RDINDOOR(adapter) & 0x02 ) 1384 cpu_relax(); 1385 1386 mega_cmd_done(adapter, completed, nstatus, status); 1387 1388 mega_rundoneq(adapter); 1389 1390 /* Loop through any pending requests */ 1391 if(atomic_read(&adapter->quiescent) == 0) { 1392 mega_runpendq(adapter); 1393 } 1394 1395 } while(1); 1396 1397 out_unlock: 1398 1399 spin_unlock_irqrestore(&adapter->lock, flags); 1400 1401 return IRQ_RETVAL(handled); 1402 } 1403 /** 1404 * mega_cmd_done() 1405 * @adapter: pointer to our soft state 1406 * @completed: array of ids of completed commands 1407 * @nstatus: number of completed commands 1408 * @status: status of the last command completed 1409 * 1410 * Complete the commands and call the scsi mid-layer callback hooks. 1411 */ 1412 static void 1413 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) 1414 { 1415 mega_ext_passthru *epthru = NULL; 1416 struct scatterlist *sgl; 1417 struct scsi_cmnd *cmd = NULL; 1418 mega_passthru *pthru = NULL; 1419 mbox_t *mbox = NULL; 1420 u8 c; 1421 scb_t *scb; 1422 int islogical; 1423 int cmdid; 1424 int i; 1425 1426 /* 1427 * for all the commands completed, call the mid-layer callback routine 1428 * and free the scb. 1429 */ 1430 for( i = 0; i < nstatus; i++ ) { 1431 1432 cmdid = completed[i]; 1433 1434 /* 1435 * Only free SCBs for the commands coming down from the 1436 * mid-layer, not for which were issued internally 1437 * 1438 * For internal command, restore the status returned by the 1439 * firmware so that user can interpret it. 1440 */ 1441 if (cmdid == CMDID_INT_CMDS) { 1442 scb = &adapter->int_scb; 1443 1444 list_del_init(&scb->list); 1445 scb->state = SCB_FREE; 1446 1447 adapter->int_status = status; 1448 complete(&adapter->int_waitq); 1449 } else { 1450 scb = &adapter->scb_list[cmdid]; 1451 1452 /* 1453 * Make sure f/w has completed a valid command 1454 */ 1455 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1456 dev_crit(&adapter->dev->dev, "invalid command " 1457 "Id %d, scb->state:%x, scsi cmd:%p\n", 1458 cmdid, scb->state, scb->cmd); 1459 1460 continue; 1461 } 1462 1463 /* 1464 * Was a abort issued for this command 1465 */ 1466 if( scb->state & SCB_ABORT ) { 1467 1468 dev_warn(&adapter->dev->dev, 1469 "aborted cmd [%x] complete\n", 1470 scb->idx); 1471 1472 scb->cmd->result = (DID_ABORT << 16); 1473 1474 list_add_tail(SCSI_LIST(scb->cmd), 1475 &adapter->completed_list); 1476 1477 mega_free_scb(adapter, scb); 1478 1479 continue; 1480 } 1481 1482 /* 1483 * Was a reset issued for this command 1484 */ 1485 if( scb->state & SCB_RESET ) { 1486 1487 dev_warn(&adapter->dev->dev, 1488 "reset cmd [%x] complete\n", 1489 scb->idx); 1490 1491 scb->cmd->result = (DID_RESET << 16); 1492 1493 list_add_tail(SCSI_LIST(scb->cmd), 1494 &adapter->completed_list); 1495 1496 mega_free_scb (adapter, scb); 1497 1498 continue; 1499 } 1500 1501 cmd = scb->cmd; 1502 pthru = scb->pthru; 1503 epthru = scb->epthru; 1504 mbox = (mbox_t *)scb->raw_mbox; 1505 1506 #if MEGA_HAVE_STATS 1507 { 1508 1509 int logdrv = mbox->m_out.logdrv; 1510 1511 islogical = adapter->logdrv_chan[cmd->channel]; 1512 /* 1513 * Maintain an error counter for the logical drive. 1514 * Some application like SNMP agent need such 1515 * statistics 1516 */ 1517 if( status && islogical && (cmd->cmnd[0] == READ_6 || 1518 cmd->cmnd[0] == READ_10 || 1519 cmd->cmnd[0] == READ_12)) { 1520 /* 1521 * Logical drive number increases by 0x80 when 1522 * a logical drive is deleted 1523 */ 1524 adapter->rd_errors[logdrv%0x80]++; 1525 } 1526 1527 if( status && islogical && (cmd->cmnd[0] == WRITE_6 || 1528 cmd->cmnd[0] == WRITE_10 || 1529 cmd->cmnd[0] == WRITE_12)) { 1530 /* 1531 * Logical drive number increases by 0x80 when 1532 * a logical drive is deleted 1533 */ 1534 adapter->wr_errors[logdrv%0x80]++; 1535 } 1536 1537 } 1538 #endif 1539 } 1540 1541 /* 1542 * Do not return the presence of hard disk on the channel so, 1543 * inquiry sent, and returned data==hard disk or removable 1544 * hard disk and not logical, request should return failure! - 1545 * PJ 1546 */ 1547 islogical = adapter->logdrv_chan[cmd->device->channel]; 1548 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1549 1550 sgl = scsi_sglist(cmd); 1551 if( sg_page(sgl) ) { 1552 c = *(unsigned char *) sg_virt(&sgl[0]); 1553 } else { 1554 dev_warn(&adapter->dev->dev, "invalid sg\n"); 1555 c = 0; 1556 } 1557 1558 if(IS_RAID_CH(adapter, cmd->device->channel) && 1559 ((c & 0x1F ) == TYPE_DISK)) { 1560 status = 0xF0; 1561 } 1562 } 1563 1564 /* clear result; otherwise, success returns corrupt value */ 1565 cmd->result = 0; 1566 1567 /* Convert MegaRAID status to Linux error code */ 1568 switch (status) { 1569 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ 1570 cmd->result |= (DID_OK << 16); 1571 break; 1572 1573 case 0x02: /* ERROR_ABORTED, i.e. 1574 SCSI_STATUS_CHECK_CONDITION */ 1575 1576 /* set sense_buffer and result fields */ 1577 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || 1578 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { 1579 1580 memcpy(cmd->sense_buffer, pthru->reqsensearea, 1581 14); 1582 1583 cmd->result = SAM_STAT_CHECK_CONDITION; 1584 } 1585 else { 1586 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { 1587 1588 memcpy(cmd->sense_buffer, 1589 epthru->reqsensearea, 14); 1590 1591 cmd->result = SAM_STAT_CHECK_CONDITION; 1592 } else 1593 scsi_build_sense(cmd, 0, 1594 ABORTED_COMMAND, 0, 0); 1595 } 1596 break; 1597 1598 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. 1599 SCSI_STATUS_BUSY */ 1600 cmd->result |= (DID_BUS_BUSY << 16) | status; 1601 break; 1602 1603 default: 1604 #if MEGA_HAVE_CLUSTERING 1605 /* 1606 * If TEST_UNIT_READY fails, we know 1607 * MEGA_RESERVATION_STATUS failed 1608 */ 1609 if( cmd->cmnd[0] == TEST_UNIT_READY ) { 1610 cmd->result |= (DID_ERROR << 16) | 1611 SAM_STAT_RESERVATION_CONFLICT; 1612 } 1613 else 1614 /* 1615 * Error code returned is 1 if Reserve or Release 1616 * failed or the input parameter is invalid 1617 */ 1618 if( status == 1 && 1619 (cmd->cmnd[0] == RESERVE || 1620 cmd->cmnd[0] == RELEASE) ) { 1621 1622 cmd->result |= (DID_ERROR << 16) | 1623 SAM_STAT_RESERVATION_CONFLICT; 1624 } 1625 else 1626 #endif 1627 cmd->result |= (DID_BAD_TARGET << 16)|status; 1628 } 1629 1630 mega_free_scb(adapter, scb); 1631 1632 /* Add Scsi_Command to end of completed queue */ 1633 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1634 } 1635 } 1636 1637 1638 /* 1639 * mega_runpendq() 1640 * 1641 * Run through the list of completed requests and finish it 1642 */ 1643 static void 1644 mega_rundoneq (adapter_t *adapter) 1645 { 1646 struct scsi_cmnd *cmd; 1647 struct list_head *pos; 1648 1649 list_for_each(pos, &adapter->completed_list) { 1650 1651 struct scsi_pointer* spos = (struct scsi_pointer *)pos; 1652 1653 cmd = list_entry(spos, struct scsi_cmnd, SCp); 1654 scsi_done(cmd); 1655 } 1656 1657 INIT_LIST_HEAD(&adapter->completed_list); 1658 } 1659 1660 1661 /* 1662 * Free a SCB structure 1663 * Note: We assume the scsi commands associated with this scb is not free yet. 1664 */ 1665 static void 1666 mega_free_scb(adapter_t *adapter, scb_t *scb) 1667 { 1668 switch( scb->dma_type ) { 1669 1670 case MEGA_DMA_TYPE_NONE: 1671 break; 1672 1673 case MEGA_SGLIST: 1674 scsi_dma_unmap(scb->cmd); 1675 break; 1676 default: 1677 break; 1678 } 1679 1680 /* 1681 * Remove from the pending list 1682 */ 1683 list_del_init(&scb->list); 1684 1685 /* Link the scb back into free list */ 1686 scb->state = SCB_FREE; 1687 scb->cmd = NULL; 1688 1689 list_add(&scb->list, &adapter->free_list); 1690 } 1691 1692 1693 static int 1694 __mega_busywait_mbox (adapter_t *adapter) 1695 { 1696 volatile mbox_t *mbox = adapter->mbox; 1697 long counter; 1698 1699 for (counter = 0; counter < 10000; counter++) { 1700 if (!mbox->m_in.busy) 1701 return 0; 1702 udelay(100); 1703 cond_resched(); 1704 } 1705 return -1; /* give up after 1 second */ 1706 } 1707 1708 /* 1709 * Copies data to SGLIST 1710 * Note: For 64 bit cards, we need a minimum of one SG element for read/write 1711 */ 1712 static int 1713 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1714 { 1715 struct scatterlist *sg; 1716 struct scsi_cmnd *cmd; 1717 int sgcnt; 1718 int idx; 1719 1720 cmd = scb->cmd; 1721 1722 /* 1723 * Copy Scatter-Gather list info into controller structure. 1724 * 1725 * The number of sg elements returned must not exceed our limit 1726 */ 1727 sgcnt = scsi_dma_map(cmd); 1728 1729 scb->dma_type = MEGA_SGLIST; 1730 1731 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); 1732 1733 *len = 0; 1734 1735 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { 1736 sg = scsi_sglist(cmd); 1737 scb->dma_h_bulkdata = sg_dma_address(sg); 1738 *buf = (u32)scb->dma_h_bulkdata; 1739 *len = sg_dma_len(sg); 1740 return 0; 1741 } 1742 1743 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1744 if (adapter->has_64bit_addr) { 1745 scb->sgl64[idx].address = sg_dma_address(sg); 1746 *len += scb->sgl64[idx].length = sg_dma_len(sg); 1747 } else { 1748 scb->sgl[idx].address = sg_dma_address(sg); 1749 *len += scb->sgl[idx].length = sg_dma_len(sg); 1750 } 1751 } 1752 1753 /* Reset pointer and length fields */ 1754 *buf = scb->sgl_dma_addr; 1755 1756 /* Return count of SG requests */ 1757 return sgcnt; 1758 } 1759 1760 1761 /* 1762 * mega_8_to_40ld() 1763 * 1764 * takes all info in AdapterInquiry structure and puts it into ProductInfo and 1765 * Enquiry3 structures for later use 1766 */ 1767 static void 1768 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, 1769 mega_product_info *product_info) 1770 { 1771 int i; 1772 1773 product_info->max_commands = inquiry->adapter_info.max_commands; 1774 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; 1775 product_info->nchannels = inquiry->adapter_info.nchannels; 1776 1777 for (i = 0; i < 4; i++) { 1778 product_info->fw_version[i] = 1779 inquiry->adapter_info.fw_version[i]; 1780 1781 product_info->bios_version[i] = 1782 inquiry->adapter_info.bios_version[i]; 1783 } 1784 enquiry3->cache_flush_interval = 1785 inquiry->adapter_info.cache_flush_interval; 1786 1787 product_info->dram_size = inquiry->adapter_info.dram_size; 1788 1789 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; 1790 1791 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { 1792 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; 1793 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; 1794 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; 1795 } 1796 1797 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) 1798 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; 1799 } 1800 1801 static inline void 1802 mega_free_sgl(adapter_t *adapter) 1803 { 1804 scb_t *scb; 1805 int i; 1806 1807 for(i = 0; i < adapter->max_cmds; i++) { 1808 1809 scb = &adapter->scb_list[i]; 1810 1811 if( scb->sgl64 ) { 1812 dma_free_coherent(&adapter->dev->dev, 1813 sizeof(mega_sgl64) * adapter->sglen, 1814 scb->sgl64, scb->sgl_dma_addr); 1815 1816 scb->sgl64 = NULL; 1817 } 1818 1819 if( scb->pthru ) { 1820 dma_free_coherent(&adapter->dev->dev, 1821 sizeof(mega_passthru), scb->pthru, 1822 scb->pthru_dma_addr); 1823 1824 scb->pthru = NULL; 1825 } 1826 1827 if( scb->epthru ) { 1828 dma_free_coherent(&adapter->dev->dev, 1829 sizeof(mega_ext_passthru), 1830 scb->epthru, scb->epthru_dma_addr); 1831 1832 scb->epthru = NULL; 1833 } 1834 1835 } 1836 } 1837 1838 1839 /* 1840 * Get information about the card/driver 1841 */ 1842 const char * 1843 megaraid_info(struct Scsi_Host *host) 1844 { 1845 static char buffer[512]; 1846 adapter_t *adapter; 1847 1848 adapter = (adapter_t *)host->hostdata; 1849 1850 sprintf (buffer, 1851 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", 1852 adapter->fw_version, adapter->product_info.max_commands, 1853 adapter->host->max_id, adapter->host->max_channel, 1854 (u32)adapter->host->max_lun); 1855 return buffer; 1856 } 1857 1858 /* 1859 * Abort a previous SCSI request. Only commands on the pending list can be 1860 * aborted. All the commands issued to the F/W must complete. 1861 */ 1862 static int 1863 megaraid_abort(struct scsi_cmnd *cmd) 1864 { 1865 adapter_t *adapter; 1866 int rval; 1867 1868 adapter = (adapter_t *)cmd->device->host->hostdata; 1869 1870 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); 1871 1872 /* 1873 * This is required here to complete any completed requests 1874 * to be communicated over to the mid layer. 1875 */ 1876 mega_rundoneq(adapter); 1877 1878 return rval; 1879 } 1880 1881 1882 static int 1883 megaraid_reset(struct scsi_cmnd *cmd) 1884 { 1885 adapter_t *adapter; 1886 megacmd_t mc; 1887 int rval; 1888 1889 adapter = (adapter_t *)cmd->device->host->hostdata; 1890 1891 #if MEGA_HAVE_CLUSTERING 1892 mc.cmd = MEGA_CLUSTER_CMD; 1893 mc.opcode = MEGA_RESET_RESERVATIONS; 1894 1895 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1896 dev_warn(&adapter->dev->dev, "reservation reset failed\n"); 1897 } 1898 else { 1899 dev_info(&adapter->dev->dev, "reservation reset\n"); 1900 } 1901 #endif 1902 1903 spin_lock_irq(&adapter->lock); 1904 1905 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); 1906 1907 /* 1908 * This is required here to complete any completed requests 1909 * to be communicated over to the mid layer. 1910 */ 1911 mega_rundoneq(adapter); 1912 spin_unlock_irq(&adapter->lock); 1913 1914 return rval; 1915 } 1916 1917 /** 1918 * megaraid_abort_and_reset() 1919 * @adapter: megaraid soft state 1920 * @cmd: scsi command to be aborted or reset 1921 * @aor: abort or reset flag 1922 * 1923 * Try to locate the scsi command in the pending queue. If found and is not 1924 * issued to the controller, abort/reset it. Otherwise return failure 1925 */ 1926 static int 1927 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) 1928 { 1929 struct list_head *pos, *next; 1930 scb_t *scb; 1931 1932 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", 1933 (aor == SCB_ABORT)? "ABORTING":"RESET", 1934 cmd->cmnd[0], cmd->device->channel, 1935 cmd->device->id, (u32)cmd->device->lun); 1936 1937 if(list_empty(&adapter->pending_list)) 1938 return FAILED; 1939 1940 list_for_each_safe(pos, next, &adapter->pending_list) { 1941 1942 scb = list_entry(pos, scb_t, list); 1943 1944 if (scb->cmd == cmd) { /* Found command */ 1945 1946 scb->state |= aor; 1947 1948 /* 1949 * Check if this command has firmware ownership. If 1950 * yes, we cannot reset this command. Whenever f/w 1951 * completes this command, we will return appropriate 1952 * status from ISR. 1953 */ 1954 if( scb->state & SCB_ISSUED ) { 1955 1956 dev_warn(&adapter->dev->dev, 1957 "%s[%x], fw owner\n", 1958 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1959 scb->idx); 1960 1961 return FAILED; 1962 } 1963 else { 1964 1965 /* 1966 * Not yet issued! Remove from the pending 1967 * list 1968 */ 1969 dev_warn(&adapter->dev->dev, 1970 "%s-[%x], driver owner\n", 1971 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1972 scb->idx); 1973 1974 mega_free_scb(adapter, scb); 1975 1976 if( aor == SCB_ABORT ) { 1977 cmd->result = (DID_ABORT << 16); 1978 } 1979 else { 1980 cmd->result = (DID_RESET << 16); 1981 } 1982 1983 list_add_tail(SCSI_LIST(cmd), 1984 &adapter->completed_list); 1985 1986 return SUCCESS; 1987 } 1988 } 1989 } 1990 1991 return FAILED; 1992 } 1993 1994 static inline int 1995 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 1996 { 1997 *pdev = pci_alloc_dev(NULL); 1998 1999 if( *pdev == NULL ) return -1; 2000 2001 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 2002 2003 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { 2004 kfree(*pdev); 2005 return -1; 2006 } 2007 2008 return 0; 2009 } 2010 2011 static inline void 2012 free_local_pdev(struct pci_dev *pdev) 2013 { 2014 kfree(pdev); 2015 } 2016 2017 /** 2018 * mega_allocate_inquiry() 2019 * @dma_handle: handle returned for dma address 2020 * @pdev: handle to pci device 2021 * 2022 * allocates memory for inquiry structure 2023 */ 2024 static inline void * 2025 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) 2026 { 2027 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), 2028 dma_handle, GFP_KERNEL); 2029 } 2030 2031 2032 static inline void 2033 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) 2034 { 2035 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, 2036 dma_handle); 2037 } 2038 2039 2040 #ifdef CONFIG_PROC_FS 2041 /* Following code handles /proc fs */ 2042 2043 /** 2044 * proc_show_config() 2045 * @m: Synthetic file construction data 2046 * @v: File iterator 2047 * 2048 * Display configuration information about the controller. 2049 */ 2050 static int 2051 proc_show_config(struct seq_file *m, void *v) 2052 { 2053 2054 adapter_t *adapter = m->private; 2055 2056 seq_puts(m, MEGARAID_VERSION); 2057 if(adapter->product_info.product_name[0]) 2058 seq_printf(m, "%s\n", adapter->product_info.product_name); 2059 2060 seq_puts(m, "Controller Type: "); 2061 2062 if( adapter->flag & BOARD_MEMMAP ) 2063 seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); 2064 else 2065 seq_puts(m, "418/428/434\n"); 2066 2067 if(adapter->flag & BOARD_40LD) 2068 seq_puts(m, "Controller Supports 40 Logical Drives\n"); 2069 2070 if(adapter->flag & BOARD_64BIT) 2071 seq_puts(m, "Controller capable of 64-bit memory addressing\n"); 2072 if( adapter->has_64bit_addr ) 2073 seq_puts(m, "Controller using 64-bit memory addressing\n"); 2074 else 2075 seq_puts(m, "Controller is not using 64-bit memory addressing\n"); 2076 2077 seq_printf(m, "Base = %08lx, Irq = %d, ", 2078 adapter->base, adapter->host->irq); 2079 2080 seq_printf(m, "Logical Drives = %d, Channels = %d\n", 2081 adapter->numldrv, adapter->product_info.nchannels); 2082 2083 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", 2084 adapter->fw_version, adapter->bios_version, 2085 adapter->product_info.dram_size); 2086 2087 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", 2088 adapter->product_info.max_commands, adapter->max_cmds); 2089 2090 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); 2091 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); 2092 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); 2093 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); 2094 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); 2095 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); 2096 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); 2097 seq_printf(m, "quiescent = %d\n", 2098 atomic_read(&adapter->quiescent)); 2099 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); 2100 2101 seq_puts(m, "\nModule Parameters:\n"); 2102 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); 2103 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); 2104 return 0; 2105 } 2106 2107 /** 2108 * proc_show_stat() 2109 * @m: Synthetic file construction data 2110 * @v: File iterator 2111 * 2112 * Display statistical information about the I/O activity. 2113 */ 2114 static int 2115 proc_show_stat(struct seq_file *m, void *v) 2116 { 2117 adapter_t *adapter = m->private; 2118 #if MEGA_HAVE_STATS 2119 int i; 2120 #endif 2121 2122 seq_puts(m, "Statistical Information for this controller\n"); 2123 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); 2124 #if MEGA_HAVE_STATS 2125 for(i = 0; i < adapter->numldrv; i++) { 2126 seq_printf(m, "Logical Drive %d:\n", i); 2127 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", 2128 adapter->nreads[i], adapter->nwrites[i]); 2129 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", 2130 adapter->nreadblocks[i], adapter->nwriteblocks[i]); 2131 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", 2132 adapter->rd_errors[i], adapter->wr_errors[i]); 2133 } 2134 #else 2135 seq_puts(m, "IO and error counters not compiled in driver.\n"); 2136 #endif 2137 return 0; 2138 } 2139 2140 2141 /** 2142 * proc_show_mbox() 2143 * @m: Synthetic file construction data 2144 * @v: File iterator 2145 * 2146 * Display mailbox information for the last command issued. This information 2147 * is good for debugging. 2148 */ 2149 static int 2150 proc_show_mbox(struct seq_file *m, void *v) 2151 { 2152 adapter_t *adapter = m->private; 2153 volatile mbox_t *mbox = adapter->mbox; 2154 2155 seq_puts(m, "Contents of Mail Box Structure\n"); 2156 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); 2157 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); 2158 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); 2159 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); 2160 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); 2161 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); 2162 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); 2163 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); 2164 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); 2165 return 0; 2166 } 2167 2168 2169 /** 2170 * proc_show_rebuild_rate() 2171 * @m: Synthetic file construction data 2172 * @v: File iterator 2173 * 2174 * Display current rebuild rate 2175 */ 2176 static int 2177 proc_show_rebuild_rate(struct seq_file *m, void *v) 2178 { 2179 adapter_t *adapter = m->private; 2180 dma_addr_t dma_handle; 2181 caddr_t inquiry; 2182 struct pci_dev *pdev; 2183 2184 if( make_local_pdev(adapter, &pdev) != 0 ) 2185 return 0; 2186 2187 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2188 goto free_pdev; 2189 2190 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2191 seq_puts(m, "Adapter inquiry failed.\n"); 2192 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2193 goto free_inquiry; 2194 } 2195 2196 if( adapter->flag & BOARD_40LD ) 2197 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2198 ((mega_inquiry3 *)inquiry)->rebuild_rate); 2199 else 2200 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2201 ((mraid_ext_inquiry *) 2202 inquiry)->raid_inq.adapter_info.rebuild_rate); 2203 2204 free_inquiry: 2205 mega_free_inquiry(inquiry, dma_handle, pdev); 2206 free_pdev: 2207 free_local_pdev(pdev); 2208 return 0; 2209 } 2210 2211 2212 /** 2213 * proc_show_battery() 2214 * @m: Synthetic file construction data 2215 * @v: File iterator 2216 * 2217 * Display information about the battery module on the controller. 2218 */ 2219 static int 2220 proc_show_battery(struct seq_file *m, void *v) 2221 { 2222 adapter_t *adapter = m->private; 2223 dma_addr_t dma_handle; 2224 caddr_t inquiry; 2225 struct pci_dev *pdev; 2226 u8 battery_status; 2227 2228 if( make_local_pdev(adapter, &pdev) != 0 ) 2229 return 0; 2230 2231 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2232 goto free_pdev; 2233 2234 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2235 seq_puts(m, "Adapter inquiry failed.\n"); 2236 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2237 goto free_inquiry; 2238 } 2239 2240 if( adapter->flag & BOARD_40LD ) { 2241 battery_status = ((mega_inquiry3 *)inquiry)->battery_status; 2242 } 2243 else { 2244 battery_status = ((mraid_ext_inquiry *)inquiry)-> 2245 raid_inq.adapter_info.battery_status; 2246 } 2247 2248 /* 2249 * Decode the battery status 2250 */ 2251 seq_printf(m, "Battery Status:[%d]", battery_status); 2252 2253 if(battery_status == MEGA_BATT_CHARGE_DONE) 2254 seq_puts(m, " Charge Done"); 2255 2256 if(battery_status & MEGA_BATT_MODULE_MISSING) 2257 seq_puts(m, " Module Missing"); 2258 2259 if(battery_status & MEGA_BATT_LOW_VOLTAGE) 2260 seq_puts(m, " Low Voltage"); 2261 2262 if(battery_status & MEGA_BATT_TEMP_HIGH) 2263 seq_puts(m, " Temperature High"); 2264 2265 if(battery_status & MEGA_BATT_PACK_MISSING) 2266 seq_puts(m, " Pack Missing"); 2267 2268 if(battery_status & MEGA_BATT_CHARGE_INPROG) 2269 seq_puts(m, " Charge In-progress"); 2270 2271 if(battery_status & MEGA_BATT_CHARGE_FAIL) 2272 seq_puts(m, " Charge Fail"); 2273 2274 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) 2275 seq_puts(m, " Cycles Exceeded"); 2276 2277 seq_putc(m, '\n'); 2278 2279 free_inquiry: 2280 mega_free_inquiry(inquiry, dma_handle, pdev); 2281 free_pdev: 2282 free_local_pdev(pdev); 2283 return 0; 2284 } 2285 2286 2287 /* 2288 * Display scsi inquiry 2289 */ 2290 static void 2291 mega_print_inquiry(struct seq_file *m, char *scsi_inq) 2292 { 2293 int i; 2294 2295 seq_puts(m, " Vendor: "); 2296 seq_write(m, scsi_inq + 8, 8); 2297 seq_puts(m, " Model: "); 2298 seq_write(m, scsi_inq + 16, 16); 2299 seq_puts(m, " Rev: "); 2300 seq_write(m, scsi_inq + 32, 4); 2301 seq_putc(m, '\n'); 2302 2303 i = scsi_inq[0] & 0x1f; 2304 seq_printf(m, " Type: %s ", scsi_device_type(i)); 2305 2306 seq_printf(m, " ANSI SCSI revision: %02x", 2307 scsi_inq[2] & 0x07); 2308 2309 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) 2310 seq_puts(m, " CCS\n"); 2311 else 2312 seq_putc(m, '\n'); 2313 } 2314 2315 /** 2316 * proc_show_pdrv() 2317 * @m: Synthetic file construction data 2318 * @adapter: pointer to our soft state 2319 * @channel: channel 2320 * 2321 * Display information about the physical drives. 2322 */ 2323 static int 2324 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) 2325 { 2326 dma_addr_t dma_handle; 2327 char *scsi_inq; 2328 dma_addr_t scsi_inq_dma_handle; 2329 caddr_t inquiry; 2330 struct pci_dev *pdev; 2331 u8 *pdrv_state; 2332 u8 state; 2333 int tgt; 2334 int max_channels; 2335 int i; 2336 2337 if( make_local_pdev(adapter, &pdev) != 0 ) 2338 return 0; 2339 2340 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2341 goto free_pdev; 2342 2343 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2344 seq_puts(m, "Adapter inquiry failed.\n"); 2345 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2346 goto free_inquiry; 2347 } 2348 2349 2350 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, 2351 GFP_KERNEL); 2352 if( scsi_inq == NULL ) { 2353 seq_puts(m, "memory not available for scsi inq.\n"); 2354 goto free_inquiry; 2355 } 2356 2357 if( adapter->flag & BOARD_40LD ) { 2358 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; 2359 } 2360 else { 2361 pdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2362 raid_inq.pdrv_info.pdrv_state; 2363 } 2364 2365 max_channels = adapter->product_info.nchannels; 2366 2367 if( channel >= max_channels ) { 2368 goto free_pci; 2369 } 2370 2371 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { 2372 2373 i = channel*16 + tgt; 2374 2375 state = *(pdrv_state + i); 2376 switch( state & 0x0F ) { 2377 case PDRV_ONLINE: 2378 seq_printf(m, "Channel:%2d Id:%2d State: Online", 2379 channel, tgt); 2380 break; 2381 2382 case PDRV_FAILED: 2383 seq_printf(m, "Channel:%2d Id:%2d State: Failed", 2384 channel, tgt); 2385 break; 2386 2387 case PDRV_RBLD: 2388 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", 2389 channel, tgt); 2390 break; 2391 2392 case PDRV_HOTSPARE: 2393 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", 2394 channel, tgt); 2395 break; 2396 2397 default: 2398 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", 2399 channel, tgt); 2400 break; 2401 } 2402 2403 /* 2404 * This interface displays inquiries for disk drives 2405 * only. Inquries for logical drives and non-disk 2406 * devices are available through /proc/scsi/scsi 2407 */ 2408 memset(scsi_inq, 0, 256); 2409 if( mega_internal_dev_inquiry(adapter, channel, tgt, 2410 scsi_inq_dma_handle) || 2411 (scsi_inq[0] & 0x1F) != TYPE_DISK ) { 2412 continue; 2413 } 2414 2415 /* 2416 * Check for overflow. We print less than 240 2417 * characters for inquiry 2418 */ 2419 seq_puts(m, ".\n"); 2420 mega_print_inquiry(m, scsi_inq); 2421 } 2422 2423 free_pci: 2424 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); 2425 free_inquiry: 2426 mega_free_inquiry(inquiry, dma_handle, pdev); 2427 free_pdev: 2428 free_local_pdev(pdev); 2429 return 0; 2430 } 2431 2432 /** 2433 * proc_show_pdrv_ch0() 2434 * @m: Synthetic file construction data 2435 * @v: File iterator 2436 * 2437 * Display information about the physical drives on physical channel 0. 2438 */ 2439 static int 2440 proc_show_pdrv_ch0(struct seq_file *m, void *v) 2441 { 2442 return proc_show_pdrv(m, m->private, 0); 2443 } 2444 2445 2446 /** 2447 * proc_show_pdrv_ch1() 2448 * @m: Synthetic file construction data 2449 * @v: File iterator 2450 * 2451 * Display information about the physical drives on physical channel 1. 2452 */ 2453 static int 2454 proc_show_pdrv_ch1(struct seq_file *m, void *v) 2455 { 2456 return proc_show_pdrv(m, m->private, 1); 2457 } 2458 2459 2460 /** 2461 * proc_show_pdrv_ch2() 2462 * @m: Synthetic file construction data 2463 * @v: File iterator 2464 * 2465 * Display information about the physical drives on physical channel 2. 2466 */ 2467 static int 2468 proc_show_pdrv_ch2(struct seq_file *m, void *v) 2469 { 2470 return proc_show_pdrv(m, m->private, 2); 2471 } 2472 2473 2474 /** 2475 * proc_show_pdrv_ch3() 2476 * @m: Synthetic file construction data 2477 * @v: File iterator 2478 * 2479 * Display information about the physical drives on physical channel 3. 2480 */ 2481 static int 2482 proc_show_pdrv_ch3(struct seq_file *m, void *v) 2483 { 2484 return proc_show_pdrv(m, m->private, 3); 2485 } 2486 2487 2488 /** 2489 * proc_show_rdrv() 2490 * @m: Synthetic file construction data 2491 * @adapter: pointer to our soft state 2492 * @start: starting logical drive to display 2493 * @end: ending logical drive to display 2494 * 2495 * We do not print the inquiry information since its already available through 2496 * /proc/scsi/scsi interface 2497 */ 2498 static int 2499 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) 2500 { 2501 dma_addr_t dma_handle; 2502 logdrv_param *lparam; 2503 megacmd_t mc; 2504 char *disk_array; 2505 dma_addr_t disk_array_dma_handle; 2506 caddr_t inquiry; 2507 struct pci_dev *pdev; 2508 u8 *rdrv_state; 2509 int num_ldrv; 2510 u32 array_sz; 2511 int i; 2512 2513 if( make_local_pdev(adapter, &pdev) != 0 ) 2514 return 0; 2515 2516 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2517 goto free_pdev; 2518 2519 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2520 seq_puts(m, "Adapter inquiry failed.\n"); 2521 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2522 goto free_inquiry; 2523 } 2524 2525 memset(&mc, 0, sizeof(megacmd_t)); 2526 2527 if( adapter->flag & BOARD_40LD ) { 2528 array_sz = sizeof(disk_array_40ld); 2529 2530 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; 2531 2532 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; 2533 } 2534 else { 2535 array_sz = sizeof(disk_array_8ld); 2536 2537 rdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2538 raid_inq.logdrv_info.ldrv_state; 2539 2540 num_ldrv = ((mraid_ext_inquiry *)inquiry)-> 2541 raid_inq.logdrv_info.num_ldrv; 2542 } 2543 2544 disk_array = dma_alloc_coherent(&pdev->dev, array_sz, 2545 &disk_array_dma_handle, GFP_KERNEL); 2546 2547 if( disk_array == NULL ) { 2548 seq_puts(m, "memory not available.\n"); 2549 goto free_inquiry; 2550 } 2551 2552 mc.xferaddr = (u32)disk_array_dma_handle; 2553 2554 if( adapter->flag & BOARD_40LD ) { 2555 mc.cmd = FC_NEW_CONFIG; 2556 mc.opcode = OP_DCMD_READ_CONFIG; 2557 2558 if( mega_internal_command(adapter, &mc, NULL) ) { 2559 seq_puts(m, "40LD read config failed.\n"); 2560 goto free_pci; 2561 } 2562 2563 } 2564 else { 2565 mc.cmd = NEW_READ_CONFIG_8LD; 2566 2567 if( mega_internal_command(adapter, &mc, NULL) ) { 2568 mc.cmd = READ_CONFIG_8LD; 2569 if( mega_internal_command(adapter, &mc, NULL) ) { 2570 seq_puts(m, "8LD read config failed.\n"); 2571 goto free_pci; 2572 } 2573 } 2574 } 2575 2576 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { 2577 2578 if( adapter->flag & BOARD_40LD ) { 2579 lparam = 2580 &((disk_array_40ld *)disk_array)->ldrv[i].lparam; 2581 } 2582 else { 2583 lparam = 2584 &((disk_array_8ld *)disk_array)->ldrv[i].lparam; 2585 } 2586 2587 /* 2588 * Check for overflow. We print less than 240 characters for 2589 * information about each logical drive. 2590 */ 2591 seq_printf(m, "Logical drive:%2d:, ", i); 2592 2593 switch( rdrv_state[i] & 0x0F ) { 2594 case RDRV_OFFLINE: 2595 seq_puts(m, "state: offline"); 2596 break; 2597 case RDRV_DEGRADED: 2598 seq_puts(m, "state: degraded"); 2599 break; 2600 case RDRV_OPTIMAL: 2601 seq_puts(m, "state: optimal"); 2602 break; 2603 case RDRV_DELETED: 2604 seq_puts(m, "state: deleted"); 2605 break; 2606 default: 2607 seq_puts(m, "state: unknown"); 2608 break; 2609 } 2610 2611 /* 2612 * Check if check consistency or initialization is going on 2613 * for this logical drive. 2614 */ 2615 if( (rdrv_state[i] & 0xF0) == 0x20 ) 2616 seq_puts(m, ", check-consistency in progress"); 2617 else if( (rdrv_state[i] & 0xF0) == 0x10 ) 2618 seq_puts(m, ", initialization in progress"); 2619 2620 seq_putc(m, '\n'); 2621 2622 seq_printf(m, "Span depth:%3d, ", lparam->span_depth); 2623 seq_printf(m, "RAID level:%3d, ", lparam->level); 2624 seq_printf(m, "Stripe size:%3d, ", 2625 lparam->stripe_sz ? lparam->stripe_sz/2: 128); 2626 seq_printf(m, "Row size:%3d\n", lparam->row_size); 2627 2628 seq_puts(m, "Read Policy: "); 2629 switch(lparam->read_ahead) { 2630 case NO_READ_AHEAD: 2631 seq_puts(m, "No read ahead, "); 2632 break; 2633 case READ_AHEAD: 2634 seq_puts(m, "Read ahead, "); 2635 break; 2636 case ADAP_READ_AHEAD: 2637 seq_puts(m, "Adaptive, "); 2638 break; 2639 2640 } 2641 2642 seq_puts(m, "Write Policy: "); 2643 switch(lparam->write_mode) { 2644 case WRMODE_WRITE_THRU: 2645 seq_puts(m, "Write thru, "); 2646 break; 2647 case WRMODE_WRITE_BACK: 2648 seq_puts(m, "Write back, "); 2649 break; 2650 } 2651 2652 seq_puts(m, "Cache Policy: "); 2653 switch(lparam->direct_io) { 2654 case CACHED_IO: 2655 seq_puts(m, "Cached IO\n\n"); 2656 break; 2657 case DIRECT_IO: 2658 seq_puts(m, "Direct IO\n\n"); 2659 break; 2660 } 2661 } 2662 2663 free_pci: 2664 dma_free_coherent(&pdev->dev, array_sz, disk_array, 2665 disk_array_dma_handle); 2666 free_inquiry: 2667 mega_free_inquiry(inquiry, dma_handle, pdev); 2668 free_pdev: 2669 free_local_pdev(pdev); 2670 return 0; 2671 } 2672 2673 /** 2674 * proc_show_rdrv_10() 2675 * @m: Synthetic file construction data 2676 * @v: File iterator 2677 * 2678 * Display real time information about the logical drives 0 through 9. 2679 */ 2680 static int 2681 proc_show_rdrv_10(struct seq_file *m, void *v) 2682 { 2683 return proc_show_rdrv(m, m->private, 0, 9); 2684 } 2685 2686 2687 /** 2688 * proc_show_rdrv_20() 2689 * @m: Synthetic file construction data 2690 * @v: File iterator 2691 * 2692 * Display real time information about the logical drives 0 through 9. 2693 */ 2694 static int 2695 proc_show_rdrv_20(struct seq_file *m, void *v) 2696 { 2697 return proc_show_rdrv(m, m->private, 10, 19); 2698 } 2699 2700 2701 /** 2702 * proc_show_rdrv_30() 2703 * @m: Synthetic file construction data 2704 * @v: File iterator 2705 * 2706 * Display real time information about the logical drives 0 through 9. 2707 */ 2708 static int 2709 proc_show_rdrv_30(struct seq_file *m, void *v) 2710 { 2711 return proc_show_rdrv(m, m->private, 20, 29); 2712 } 2713 2714 2715 /** 2716 * proc_show_rdrv_40() 2717 * @m: Synthetic file construction data 2718 * @v: File iterator 2719 * 2720 * Display real time information about the logical drives 0 through 9. 2721 */ 2722 static int 2723 proc_show_rdrv_40(struct seq_file *m, void *v) 2724 { 2725 return proc_show_rdrv(m, m->private, 30, 39); 2726 } 2727 2728 /** 2729 * mega_create_proc_entry() 2730 * @index: index in soft state array 2731 * @parent: parent node for this /proc entry 2732 * 2733 * Creates /proc entries for our controllers. 2734 */ 2735 static void 2736 mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2737 { 2738 adapter_t *adapter = hba_soft_state[index]; 2739 struct proc_dir_entry *dir; 2740 u8 string[16]; 2741 2742 sprintf(string, "hba%d", adapter->host->host_no); 2743 dir = proc_mkdir_data(string, 0, parent, adapter); 2744 if (!dir) { 2745 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); 2746 return; 2747 } 2748 2749 proc_create_single_data("config", S_IRUSR, dir, 2750 proc_show_config, adapter); 2751 proc_create_single_data("stat", S_IRUSR, dir, 2752 proc_show_stat, adapter); 2753 proc_create_single_data("mailbox", S_IRUSR, dir, 2754 proc_show_mbox, adapter); 2755 #if MEGA_HAVE_ENH_PROC 2756 proc_create_single_data("rebuild-rate", S_IRUSR, dir, 2757 proc_show_rebuild_rate, adapter); 2758 proc_create_single_data("battery-status", S_IRUSR, dir, 2759 proc_show_battery, adapter); 2760 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, 2761 proc_show_pdrv_ch0, adapter); 2762 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, 2763 proc_show_pdrv_ch1, adapter); 2764 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, 2765 proc_show_pdrv_ch2, adapter); 2766 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, 2767 proc_show_pdrv_ch3, adapter); 2768 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, 2769 proc_show_rdrv_10, adapter); 2770 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, 2771 proc_show_rdrv_20, adapter); 2772 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, 2773 proc_show_rdrv_30, adapter); 2774 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, 2775 proc_show_rdrv_40, adapter); 2776 #endif 2777 } 2778 2779 #else 2780 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2781 { 2782 } 2783 #endif 2784 2785 2786 /* 2787 * megaraid_biosparam() 2788 * 2789 * Return the disk geometry for a particular disk 2790 */ 2791 static int 2792 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, 2793 sector_t capacity, int geom[]) 2794 { 2795 adapter_t *adapter; 2796 int heads; 2797 int sectors; 2798 int cylinders; 2799 2800 /* Get pointer to host config structure */ 2801 adapter = (adapter_t *)sdev->host->hostdata; 2802 2803 if (IS_RAID_CH(adapter, sdev->channel)) { 2804 /* Default heads (64) & sectors (32) */ 2805 heads = 64; 2806 sectors = 32; 2807 cylinders = (ulong)capacity / (heads * sectors); 2808 2809 /* 2810 * Handle extended translation size for logical drives 2811 * > 1Gb 2812 */ 2813 if ((ulong)capacity >= 0x200000) { 2814 heads = 255; 2815 sectors = 63; 2816 cylinders = (ulong)capacity / (heads * sectors); 2817 } 2818 2819 /* return result */ 2820 geom[0] = heads; 2821 geom[1] = sectors; 2822 geom[2] = cylinders; 2823 } 2824 else { 2825 if (scsi_partsize(bdev, capacity, geom)) 2826 return 0; 2827 2828 dev_info(&adapter->dev->dev, 2829 "invalid partition on this disk on channel %d\n", 2830 sdev->channel); 2831 2832 /* Default heads (64) & sectors (32) */ 2833 heads = 64; 2834 sectors = 32; 2835 cylinders = (ulong)capacity / (heads * sectors); 2836 2837 /* Handle extended translation size for logical drives > 1Gb */ 2838 if ((ulong)capacity >= 0x200000) { 2839 heads = 255; 2840 sectors = 63; 2841 cylinders = (ulong)capacity / (heads * sectors); 2842 } 2843 2844 /* return result */ 2845 geom[0] = heads; 2846 geom[1] = sectors; 2847 geom[2] = cylinders; 2848 } 2849 2850 return 0; 2851 } 2852 2853 /** 2854 * mega_init_scb() 2855 * @adapter: pointer to our soft state 2856 * 2857 * Allocate memory for the various pointers in the scb structures: 2858 * scatter-gather list pointer, passthru and extended passthru structure 2859 * pointers. 2860 */ 2861 static int 2862 mega_init_scb(adapter_t *adapter) 2863 { 2864 scb_t *scb; 2865 int i; 2866 2867 for( i = 0; i < adapter->max_cmds; i++ ) { 2868 2869 scb = &adapter->scb_list[i]; 2870 2871 scb->sgl64 = NULL; 2872 scb->sgl = NULL; 2873 scb->pthru = NULL; 2874 scb->epthru = NULL; 2875 } 2876 2877 for( i = 0; i < adapter->max_cmds; i++ ) { 2878 2879 scb = &adapter->scb_list[i]; 2880 2881 scb->idx = i; 2882 2883 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, 2884 sizeof(mega_sgl64) * adapter->sglen, 2885 &scb->sgl_dma_addr, GFP_KERNEL); 2886 2887 scb->sgl = (mega_sglist *)scb->sgl64; 2888 2889 if( !scb->sgl ) { 2890 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); 2891 mega_free_sgl(adapter); 2892 return -1; 2893 } 2894 2895 scb->pthru = dma_alloc_coherent(&adapter->dev->dev, 2896 sizeof(mega_passthru), 2897 &scb->pthru_dma_addr, GFP_KERNEL); 2898 2899 if( !scb->pthru ) { 2900 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); 2901 mega_free_sgl(adapter); 2902 return -1; 2903 } 2904 2905 scb->epthru = dma_alloc_coherent(&adapter->dev->dev, 2906 sizeof(mega_ext_passthru), 2907 &scb->epthru_dma_addr, GFP_KERNEL); 2908 2909 if( !scb->epthru ) { 2910 dev_warn(&adapter->dev->dev, 2911 "Can't allocate extended passthru\n"); 2912 mega_free_sgl(adapter); 2913 return -1; 2914 } 2915 2916 2917 scb->dma_type = MEGA_DMA_TYPE_NONE; 2918 2919 /* 2920 * Link to free list 2921 * lock not required since we are loading the driver, so no 2922 * commands possible right now. 2923 */ 2924 scb->state = SCB_FREE; 2925 scb->cmd = NULL; 2926 list_add(&scb->list, &adapter->free_list); 2927 } 2928 2929 return 0; 2930 } 2931 2932 2933 /** 2934 * megadev_open() 2935 * @inode: unused 2936 * @filep: unused 2937 * 2938 * Routines for the character/ioctl interface to the driver. Find out if this 2939 * is a valid open. 2940 */ 2941 static int 2942 megadev_open (struct inode *inode, struct file *filep) 2943 { 2944 /* 2945 * Only allow superuser to access private ioctl interface 2946 */ 2947 if( !capable(CAP_SYS_ADMIN) ) return -EACCES; 2948 2949 return 0; 2950 } 2951 2952 2953 /** 2954 * megadev_ioctl() 2955 * @filep: Our device file 2956 * @cmd: ioctl command 2957 * @arg: user buffer 2958 * 2959 * ioctl entry point for our private ioctl interface. We move the data in from 2960 * the user space, prepare the command (if necessary, convert the old MIMD 2961 * ioctl to new ioctl command), and issue a synchronous command to the 2962 * controller. 2963 */ 2964 static int 2965 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2966 { 2967 adapter_t *adapter; 2968 nitioctl_t uioc; 2969 int adapno; 2970 int rval; 2971 mega_passthru __user *upthru; /* user address for passthru */ 2972 mega_passthru *pthru; /* copy user passthru here */ 2973 dma_addr_t pthru_dma_hndl; 2974 void *data = NULL; /* data to be transferred */ 2975 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ 2976 megacmd_t mc; 2977 #if MEGA_HAVE_STATS 2978 megastat_t __user *ustats = NULL; 2979 int num_ldrv = 0; 2980 #endif 2981 u32 uxferaddr = 0; 2982 struct pci_dev *pdev; 2983 2984 /* 2985 * Make sure only USCSICMD are issued through this interface. 2986 * MIMD application would still fire different command. 2987 */ 2988 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { 2989 return -EINVAL; 2990 } 2991 2992 /* 2993 * Check and convert a possible MIMD command to NIT command. 2994 * mega_m_to_n() copies the data from the user space, so we do not 2995 * have to do it here. 2996 * NOTE: We will need some user address to copyout the data, therefore 2997 * the inteface layer will also provide us with the required user 2998 * addresses. 2999 */ 3000 memset(&uioc, 0, sizeof(nitioctl_t)); 3001 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) 3002 return rval; 3003 3004 3005 switch( uioc.opcode ) { 3006 3007 case GET_DRIVER_VER: 3008 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) 3009 return (-EFAULT); 3010 3011 break; 3012 3013 case GET_N_ADAP: 3014 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) 3015 return (-EFAULT); 3016 3017 /* 3018 * Shucks. MIMD interface returns a positive value for number 3019 * of adapters. TODO: Change it to return 0 when there is no 3020 * applicatio using mimd interface. 3021 */ 3022 return hba_count; 3023 3024 case GET_ADAP_INFO: 3025 3026 /* 3027 * Which adapter 3028 */ 3029 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3030 return (-ENODEV); 3031 3032 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, 3033 sizeof(struct mcontroller)) ) 3034 return (-EFAULT); 3035 break; 3036 3037 #if MEGA_HAVE_STATS 3038 3039 case GET_STATS: 3040 /* 3041 * Which adapter 3042 */ 3043 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3044 return (-ENODEV); 3045 3046 adapter = hba_soft_state[adapno]; 3047 3048 ustats = uioc.uioc_uaddr; 3049 3050 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) 3051 return (-EFAULT); 3052 3053 /* 3054 * Check for the validity of the logical drive number 3055 */ 3056 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; 3057 3058 if( copy_to_user(ustats->nreads, adapter->nreads, 3059 num_ldrv*sizeof(u32)) ) 3060 return -EFAULT; 3061 3062 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, 3063 num_ldrv*sizeof(u32)) ) 3064 return -EFAULT; 3065 3066 if( copy_to_user(ustats->nwrites, adapter->nwrites, 3067 num_ldrv*sizeof(u32)) ) 3068 return -EFAULT; 3069 3070 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, 3071 num_ldrv*sizeof(u32)) ) 3072 return -EFAULT; 3073 3074 if( copy_to_user(ustats->rd_errors, adapter->rd_errors, 3075 num_ldrv*sizeof(u32)) ) 3076 return -EFAULT; 3077 3078 if( copy_to_user(ustats->wr_errors, adapter->wr_errors, 3079 num_ldrv*sizeof(u32)) ) 3080 return -EFAULT; 3081 3082 return 0; 3083 3084 #endif 3085 case MBOX_CMD: 3086 3087 /* 3088 * Which adapter 3089 */ 3090 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3091 return (-ENODEV); 3092 3093 adapter = hba_soft_state[adapno]; 3094 3095 /* 3096 * Deletion of logical drive is a special case. The adapter 3097 * should be quiescent before this command is issued. 3098 */ 3099 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && 3100 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { 3101 3102 /* 3103 * Do we support this feature 3104 */ 3105 if( !adapter->support_random_del ) { 3106 dev_warn(&adapter->dev->dev, "logdrv " 3107 "delete on non-supporting F/W\n"); 3108 3109 return (-EINVAL); 3110 } 3111 3112 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); 3113 3114 if( rval == 0 ) { 3115 memset(&mc, 0, sizeof(megacmd_t)); 3116 3117 mc.status = rval; 3118 3119 rval = mega_n_to_m((void __user *)arg, &mc); 3120 } 3121 3122 return rval; 3123 } 3124 /* 3125 * This interface only support the regular passthru commands. 3126 * Reject extended passthru and 64-bit passthru 3127 */ 3128 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3129 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3130 3131 dev_warn(&adapter->dev->dev, "rejected passthru\n"); 3132 3133 return (-EINVAL); 3134 } 3135 3136 /* 3137 * For all internal commands, the buffer must be allocated in 3138 * <4GB address range 3139 */ 3140 if( make_local_pdev(adapter, &pdev) != 0 ) 3141 return -EIO; 3142 3143 /* Is it a passthru command or a DCMD */ 3144 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { 3145 /* Passthru commands */ 3146 3147 pthru = dma_alloc_coherent(&pdev->dev, 3148 sizeof(mega_passthru), 3149 &pthru_dma_hndl, GFP_KERNEL); 3150 3151 if( pthru == NULL ) { 3152 free_local_pdev(pdev); 3153 return (-ENOMEM); 3154 } 3155 3156 /* 3157 * The user passthru structure 3158 */ 3159 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; 3160 3161 /* 3162 * Copy in the user passthru here. 3163 */ 3164 if( copy_from_user(pthru, upthru, 3165 sizeof(mega_passthru)) ) { 3166 3167 dma_free_coherent(&pdev->dev, 3168 sizeof(mega_passthru), 3169 pthru, pthru_dma_hndl); 3170 3171 free_local_pdev(pdev); 3172 3173 return (-EFAULT); 3174 } 3175 3176 /* 3177 * Is there a data transfer 3178 */ 3179 if( pthru->dataxferlen ) { 3180 data = dma_alloc_coherent(&pdev->dev, 3181 pthru->dataxferlen, 3182 &data_dma_hndl, 3183 GFP_KERNEL); 3184 3185 if( data == NULL ) { 3186 dma_free_coherent(&pdev->dev, 3187 sizeof(mega_passthru), 3188 pthru, 3189 pthru_dma_hndl); 3190 3191 free_local_pdev(pdev); 3192 3193 return (-ENOMEM); 3194 } 3195 3196 /* 3197 * Save the user address and point the kernel 3198 * address at just allocated memory 3199 */ 3200 uxferaddr = pthru->dataxferaddr; 3201 pthru->dataxferaddr = data_dma_hndl; 3202 } 3203 3204 3205 /* 3206 * Is data coming down-stream 3207 */ 3208 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { 3209 /* 3210 * Get the user data 3211 */ 3212 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3213 pthru->dataxferlen) ) { 3214 rval = (-EFAULT); 3215 goto freemem_and_return; 3216 } 3217 } 3218 3219 memset(&mc, 0, sizeof(megacmd_t)); 3220 3221 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 3222 mc.xferaddr = (u32)pthru_dma_hndl; 3223 3224 /* 3225 * Issue the command 3226 */ 3227 mega_internal_command(adapter, &mc, pthru); 3228 3229 rval = mega_n_to_m((void __user *)arg, &mc); 3230 3231 if( rval ) goto freemem_and_return; 3232 3233 3234 /* 3235 * Is data going up-stream 3236 */ 3237 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3238 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3239 pthru->dataxferlen) ) { 3240 rval = (-EFAULT); 3241 } 3242 } 3243 3244 /* 3245 * Send the request sense data also, irrespective of 3246 * whether the user has asked for it or not. 3247 */ 3248 if (copy_to_user(upthru->reqsensearea, 3249 pthru->reqsensearea, 14)) 3250 rval = -EFAULT; 3251 3252 freemem_and_return: 3253 if( pthru->dataxferlen ) { 3254 dma_free_coherent(&pdev->dev, 3255 pthru->dataxferlen, data, 3256 data_dma_hndl); 3257 } 3258 3259 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), 3260 pthru, pthru_dma_hndl); 3261 3262 free_local_pdev(pdev); 3263 3264 return rval; 3265 } 3266 else { 3267 /* DCMD commands */ 3268 3269 /* 3270 * Is there a data transfer 3271 */ 3272 if( uioc.xferlen ) { 3273 data = dma_alloc_coherent(&pdev->dev, 3274 uioc.xferlen, 3275 &data_dma_hndl, 3276 GFP_KERNEL); 3277 3278 if( data == NULL ) { 3279 free_local_pdev(pdev); 3280 return (-ENOMEM); 3281 } 3282 3283 uxferaddr = MBOX(uioc)->xferaddr; 3284 } 3285 3286 /* 3287 * Is data coming down-stream 3288 */ 3289 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { 3290 /* 3291 * Get the user data 3292 */ 3293 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3294 uioc.xferlen) ) { 3295 3296 dma_free_coherent(&pdev->dev, 3297 uioc.xferlen, data, 3298 data_dma_hndl); 3299 3300 free_local_pdev(pdev); 3301 3302 return (-EFAULT); 3303 } 3304 } 3305 3306 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); 3307 3308 mc.xferaddr = (u32)data_dma_hndl; 3309 3310 /* 3311 * Issue the command 3312 */ 3313 mega_internal_command(adapter, &mc, NULL); 3314 3315 rval = mega_n_to_m((void __user *)arg, &mc); 3316 3317 if( rval ) { 3318 if( uioc.xferlen ) { 3319 dma_free_coherent(&pdev->dev, 3320 uioc.xferlen, data, 3321 data_dma_hndl); 3322 } 3323 3324 free_local_pdev(pdev); 3325 3326 return rval; 3327 } 3328 3329 /* 3330 * Is data going up-stream 3331 */ 3332 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3333 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3334 uioc.xferlen) ) { 3335 3336 rval = (-EFAULT); 3337 } 3338 } 3339 3340 if( uioc.xferlen ) { 3341 dma_free_coherent(&pdev->dev, uioc.xferlen, 3342 data, data_dma_hndl); 3343 } 3344 3345 free_local_pdev(pdev); 3346 3347 return rval; 3348 } 3349 3350 default: 3351 return (-EINVAL); 3352 } 3353 3354 return 0; 3355 } 3356 3357 static long 3358 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3359 { 3360 int ret; 3361 3362 mutex_lock(&megadev_mutex); 3363 ret = megadev_ioctl(filep, cmd, arg); 3364 mutex_unlock(&megadev_mutex); 3365 3366 return ret; 3367 } 3368 3369 /** 3370 * mega_m_to_n() 3371 * @arg: user address 3372 * @uioc: new ioctl structure 3373 * 3374 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl 3375 * structure 3376 * 3377 * Converts the older mimd ioctl structure to newer NIT structure 3378 */ 3379 static int 3380 mega_m_to_n(void __user *arg, nitioctl_t *uioc) 3381 { 3382 struct uioctl_t uioc_mimd; 3383 char signature[8] = {0}; 3384 u8 opcode; 3385 u8 subopcode; 3386 3387 3388 /* 3389 * check is the application conforms to NIT. We do not have to do much 3390 * in that case. 3391 * We exploit the fact that the signature is stored in the very 3392 * beginning of the structure. 3393 */ 3394 3395 if( copy_from_user(signature, arg, 7) ) 3396 return (-EFAULT); 3397 3398 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3399 3400 /* 3401 * NOTE NOTE: The nit ioctl is still under flux because of 3402 * change of mailbox definition, in HPE. No applications yet 3403 * use this interface and let's not have applications use this 3404 * interface till the new specifitions are in place. 3405 */ 3406 return -EINVAL; 3407 #if 0 3408 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) 3409 return (-EFAULT); 3410 return 0; 3411 #endif 3412 } 3413 3414 /* 3415 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t 3416 * 3417 * Get the user ioctl structure 3418 */ 3419 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) 3420 return (-EFAULT); 3421 3422 3423 /* 3424 * Get the opcode and subopcode for the commands 3425 */ 3426 opcode = uioc_mimd.ui.fcs.opcode; 3427 subopcode = uioc_mimd.ui.fcs.subopcode; 3428 3429 switch (opcode) { 3430 case 0x82: 3431 3432 switch (subopcode) { 3433 3434 case MEGAIOC_QDRVRVER: /* Query driver version */ 3435 uioc->opcode = GET_DRIVER_VER; 3436 uioc->uioc_uaddr = uioc_mimd.data; 3437 break; 3438 3439 case MEGAIOC_QNADAP: /* Get # of adapters */ 3440 uioc->opcode = GET_N_ADAP; 3441 uioc->uioc_uaddr = uioc_mimd.data; 3442 break; 3443 3444 case MEGAIOC_QADAPINFO: /* Get adapter information */ 3445 uioc->opcode = GET_ADAP_INFO; 3446 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3447 uioc->uioc_uaddr = uioc_mimd.data; 3448 break; 3449 3450 default: 3451 return(-EINVAL); 3452 } 3453 3454 break; 3455 3456 3457 case 0x81: 3458 3459 uioc->opcode = MBOX_CMD; 3460 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3461 3462 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3463 3464 uioc->xferlen = uioc_mimd.ui.fcs.length; 3465 3466 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3467 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3468 3469 break; 3470 3471 case 0x80: 3472 3473 uioc->opcode = MBOX_CMD; 3474 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3475 3476 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3477 3478 /* 3479 * Choose the xferlen bigger of input and output data 3480 */ 3481 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? 3482 uioc_mimd.outlen : uioc_mimd.inlen; 3483 3484 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3485 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3486 3487 break; 3488 3489 default: 3490 return (-EINVAL); 3491 3492 } 3493 3494 return 0; 3495 } 3496 3497 /* 3498 * mega_n_to_m() 3499 * @arg: user address 3500 * @mc: mailbox command 3501 * 3502 * Updates the status information to the application, depending on application 3503 * conforms to older mimd ioctl interface or newer NIT ioctl interface 3504 */ 3505 static int 3506 mega_n_to_m(void __user *arg, megacmd_t *mc) 3507 { 3508 nitioctl_t __user *uiocp; 3509 megacmd_t __user *umc; 3510 mega_passthru __user *upthru; 3511 struct uioctl_t __user *uioc_mimd; 3512 char signature[8] = {0}; 3513 3514 /* 3515 * check is the application conforms to NIT. 3516 */ 3517 if( copy_from_user(signature, arg, 7) ) 3518 return -EFAULT; 3519 3520 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3521 3522 uiocp = arg; 3523 3524 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) 3525 return (-EFAULT); 3526 3527 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3528 3529 umc = MBOX_P(uiocp); 3530 3531 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3532 return -EFAULT; 3533 3534 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) 3535 return (-EFAULT); 3536 } 3537 } 3538 else { 3539 uioc_mimd = arg; 3540 3541 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) 3542 return (-EFAULT); 3543 3544 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3545 3546 umc = (megacmd_t __user *)uioc_mimd->mbox; 3547 3548 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3549 return (-EFAULT); 3550 3551 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) 3552 return (-EFAULT); 3553 } 3554 } 3555 3556 return 0; 3557 } 3558 3559 3560 /* 3561 * MEGARAID 'FW' commands. 3562 */ 3563 3564 /** 3565 * mega_is_bios_enabled() 3566 * @adapter: pointer to our soft state 3567 * 3568 * issue command to find out if the BIOS is enabled for this controller 3569 */ 3570 static int 3571 mega_is_bios_enabled(adapter_t *adapter) 3572 { 3573 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3574 mbox_t *mbox; 3575 3576 mbox = (mbox_t *)raw_mbox; 3577 3578 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3579 3580 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3581 3582 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3583 3584 raw_mbox[0] = IS_BIOS_ENABLED; 3585 raw_mbox[2] = GET_BIOS; 3586 3587 issue_scb_block(adapter, raw_mbox); 3588 3589 return *(char *)adapter->mega_buffer; 3590 } 3591 3592 3593 /** 3594 * mega_enum_raid_scsi() 3595 * @adapter: pointer to our soft state 3596 * 3597 * Find out what channels are RAID/SCSI. This information is used to 3598 * differentiate the virtual channels and physical channels and to support 3599 * ROMB feature and non-disk devices. 3600 */ 3601 static void 3602 mega_enum_raid_scsi(adapter_t *adapter) 3603 { 3604 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3605 mbox_t *mbox; 3606 int i; 3607 3608 mbox = (mbox_t *)raw_mbox; 3609 3610 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3611 3612 /* 3613 * issue command to find out what channels are raid/scsi 3614 */ 3615 raw_mbox[0] = CHNL_CLASS; 3616 raw_mbox[2] = GET_CHNL_CLASS; 3617 3618 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3619 3620 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3621 3622 /* 3623 * Non-ROMB firmware fail this command, so all channels 3624 * must be shown RAID 3625 */ 3626 adapter->mega_ch_class = 0xFF; 3627 3628 if(!issue_scb_block(adapter, raw_mbox)) { 3629 adapter->mega_ch_class = *((char *)adapter->mega_buffer); 3630 3631 } 3632 3633 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3634 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3635 dev_info(&adapter->dev->dev, "channel[%d] is raid\n", 3636 i); 3637 } 3638 else { 3639 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", 3640 i); 3641 } 3642 } 3643 3644 return; 3645 } 3646 3647 3648 /** 3649 * mega_get_boot_drv() 3650 * @adapter: pointer to our soft state 3651 * 3652 * Find out which device is the boot device. Note, any logical drive or any 3653 * phyical device (e.g., a CDROM) can be designated as a boot device. 3654 */ 3655 static void 3656 mega_get_boot_drv(adapter_t *adapter) 3657 { 3658 struct private_bios_data *prv_bios_data; 3659 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3660 mbox_t *mbox; 3661 u16 cksum = 0; 3662 u8 *cksum_p; 3663 u8 boot_pdrv; 3664 int i; 3665 3666 mbox = (mbox_t *)raw_mbox; 3667 3668 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3669 3670 raw_mbox[0] = BIOS_PVT_DATA; 3671 raw_mbox[2] = GET_BIOS_PVT_DATA; 3672 3673 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3674 3675 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3676 3677 adapter->boot_ldrv_enabled = 0; 3678 adapter->boot_ldrv = 0; 3679 3680 adapter->boot_pdrv_enabled = 0; 3681 adapter->boot_pdrv_ch = 0; 3682 adapter->boot_pdrv_tgt = 0; 3683 3684 if(issue_scb_block(adapter, raw_mbox) == 0) { 3685 prv_bios_data = 3686 (struct private_bios_data *)adapter->mega_buffer; 3687 3688 cksum = 0; 3689 cksum_p = (char *)prv_bios_data; 3690 for (i = 0; i < 14; i++ ) { 3691 cksum += (u16)(*cksum_p++); 3692 } 3693 3694 if (prv_bios_data->cksum == (u16)(0-cksum) ) { 3695 3696 /* 3697 * If MSB is set, a physical drive is set as boot 3698 * device 3699 */ 3700 if( prv_bios_data->boot_drv & 0x80 ) { 3701 adapter->boot_pdrv_enabled = 1; 3702 boot_pdrv = prv_bios_data->boot_drv & 0x7F; 3703 adapter->boot_pdrv_ch = boot_pdrv / 16; 3704 adapter->boot_pdrv_tgt = boot_pdrv % 16; 3705 } 3706 else { 3707 adapter->boot_ldrv_enabled = 1; 3708 adapter->boot_ldrv = prv_bios_data->boot_drv; 3709 } 3710 } 3711 } 3712 3713 } 3714 3715 /** 3716 * mega_support_random_del() 3717 * @adapter: pointer to our soft state 3718 * 3719 * Find out if this controller supports random deletion and addition of 3720 * logical drives 3721 */ 3722 static int 3723 mega_support_random_del(adapter_t *adapter) 3724 { 3725 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3726 mbox_t *mbox; 3727 int rval; 3728 3729 mbox = (mbox_t *)raw_mbox; 3730 3731 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3732 3733 /* 3734 * issue command 3735 */ 3736 raw_mbox[0] = FC_DEL_LOGDRV; 3737 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3738 3739 rval = issue_scb_block(adapter, raw_mbox); 3740 3741 return !rval; 3742 } 3743 3744 3745 /** 3746 * mega_support_ext_cdb() 3747 * @adapter: pointer to our soft state 3748 * 3749 * Find out if this firmware support cdblen > 10 3750 */ 3751 static int 3752 mega_support_ext_cdb(adapter_t *adapter) 3753 { 3754 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3755 mbox_t *mbox; 3756 int rval; 3757 3758 mbox = (mbox_t *)raw_mbox; 3759 3760 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3761 /* 3762 * issue command to find out if controller supports extended CDBs. 3763 */ 3764 raw_mbox[0] = 0xA4; 3765 raw_mbox[2] = 0x16; 3766 3767 rval = issue_scb_block(adapter, raw_mbox); 3768 3769 return !rval; 3770 } 3771 3772 3773 /** 3774 * mega_del_logdrv() 3775 * @adapter: pointer to our soft state 3776 * @logdrv: logical drive to be deleted 3777 * 3778 * Delete the specified logical drive. It is the responsibility of the user 3779 * app to let the OS know about this operation. 3780 */ 3781 static int 3782 mega_del_logdrv(adapter_t *adapter, int logdrv) 3783 { 3784 unsigned long flags; 3785 scb_t *scb; 3786 int rval; 3787 3788 /* 3789 * Stop sending commands to the controller, queue them internally. 3790 * When deletion is complete, ISR will flush the queue. 3791 */ 3792 atomic_set(&adapter->quiescent, 1); 3793 3794 /* 3795 * Wait till all the issued commands are complete and there are no 3796 * commands in the pending queue 3797 */ 3798 while (atomic_read(&adapter->pend_cmds) > 0 || 3799 !list_empty(&adapter->pending_list)) 3800 msleep(1000); /* sleep for 1s */ 3801 3802 rval = mega_do_del_logdrv(adapter, logdrv); 3803 3804 spin_lock_irqsave(&adapter->lock, flags); 3805 3806 /* 3807 * If delete operation was successful, add 0x80 to the logical drive 3808 * ids for commands in the pending queue. 3809 */ 3810 if (adapter->read_ldidmap) { 3811 struct list_head *pos; 3812 list_for_each(pos, &adapter->pending_list) { 3813 scb = list_entry(pos, scb_t, list); 3814 if (scb->pthru->logdrv < 0x80 ) 3815 scb->pthru->logdrv += 0x80; 3816 } 3817 } 3818 3819 atomic_set(&adapter->quiescent, 0); 3820 3821 mega_runpendq(adapter); 3822 3823 spin_unlock_irqrestore(&adapter->lock, flags); 3824 3825 return rval; 3826 } 3827 3828 3829 static int 3830 mega_do_del_logdrv(adapter_t *adapter, int logdrv) 3831 { 3832 megacmd_t mc; 3833 int rval; 3834 3835 memset( &mc, 0, sizeof(megacmd_t)); 3836 3837 mc.cmd = FC_DEL_LOGDRV; 3838 mc.opcode = OP_DEL_LOGDRV; 3839 mc.subopcode = logdrv; 3840 3841 rval = mega_internal_command(adapter, &mc, NULL); 3842 3843 /* log this event */ 3844 if(rval) { 3845 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); 3846 return rval; 3847 } 3848 3849 /* 3850 * After deleting first logical drive, the logical drives must be 3851 * addressed by adding 0x80 to the logical drive id. 3852 */ 3853 adapter->read_ldidmap = 1; 3854 3855 return rval; 3856 } 3857 3858 3859 /** 3860 * mega_get_max_sgl() 3861 * @adapter: pointer to our soft state 3862 * 3863 * Find out the maximum number of scatter-gather elements supported by this 3864 * version of the firmware 3865 */ 3866 static void 3867 mega_get_max_sgl(adapter_t *adapter) 3868 { 3869 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3870 mbox_t *mbox; 3871 3872 mbox = (mbox_t *)raw_mbox; 3873 3874 memset(mbox, 0, sizeof(raw_mbox)); 3875 3876 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3877 3878 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3879 3880 raw_mbox[0] = MAIN_MISC_OPCODE; 3881 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3882 3883 3884 if( issue_scb_block(adapter, raw_mbox) ) { 3885 /* 3886 * f/w does not support this command. Choose the default value 3887 */ 3888 adapter->sglen = MIN_SGLIST; 3889 } 3890 else { 3891 adapter->sglen = *((char *)adapter->mega_buffer); 3892 3893 /* 3894 * Make sure this is not more than the resources we are 3895 * planning to allocate 3896 */ 3897 if ( adapter->sglen > MAX_SGLIST ) 3898 adapter->sglen = MAX_SGLIST; 3899 } 3900 3901 return; 3902 } 3903 3904 3905 /** 3906 * mega_support_cluster() 3907 * @adapter: pointer to our soft state 3908 * 3909 * Find out if this firmware support cluster calls. 3910 */ 3911 static int 3912 mega_support_cluster(adapter_t *adapter) 3913 { 3914 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3915 mbox_t *mbox; 3916 3917 mbox = (mbox_t *)raw_mbox; 3918 3919 memset(mbox, 0, sizeof(raw_mbox)); 3920 3921 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3922 3923 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3924 3925 /* 3926 * Try to get the initiator id. This command will succeed iff the 3927 * clustering is available on this HBA. 3928 */ 3929 raw_mbox[0] = MEGA_GET_TARGET_ID; 3930 3931 if( issue_scb_block(adapter, raw_mbox) == 0 ) { 3932 3933 /* 3934 * Cluster support available. Get the initiator target id. 3935 * Tell our id to mid-layer too. 3936 */ 3937 adapter->this_id = *(u32 *)adapter->mega_buffer; 3938 adapter->host->this_id = adapter->this_id; 3939 3940 return 1; 3941 } 3942 3943 return 0; 3944 } 3945 3946 #ifdef CONFIG_PROC_FS 3947 /** 3948 * mega_adapinq() 3949 * @adapter: pointer to our soft state 3950 * @dma_handle: DMA address of the buffer 3951 * 3952 * Issue internal commands while interrupts are available. 3953 * We only issue direct mailbox commands from within the driver. ioctl() 3954 * interface using these routines can issue passthru commands. 3955 */ 3956 static int 3957 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) 3958 { 3959 megacmd_t mc; 3960 3961 memset(&mc, 0, sizeof(megacmd_t)); 3962 3963 if( adapter->flag & BOARD_40LD ) { 3964 mc.cmd = FC_NEW_CONFIG; 3965 mc.opcode = NC_SUBOP_ENQUIRY3; 3966 mc.subopcode = ENQ3_GET_SOLICITED_FULL; 3967 } 3968 else { 3969 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; 3970 } 3971 3972 mc.xferaddr = (u32)dma_handle; 3973 3974 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { 3975 return -1; 3976 } 3977 3978 return 0; 3979 } 3980 3981 3982 /** 3983 * mega_internal_dev_inquiry() 3984 * @adapter: pointer to our soft state 3985 * @ch: channel for this device 3986 * @tgt: ID of this device 3987 * @buf_dma_handle: DMA address of the buffer 3988 * 3989 * Issue the scsi inquiry for the specified device. 3990 */ 3991 static int 3992 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, 3993 dma_addr_t buf_dma_handle) 3994 { 3995 mega_passthru *pthru; 3996 dma_addr_t pthru_dma_handle; 3997 megacmd_t mc; 3998 int rval; 3999 struct pci_dev *pdev; 4000 4001 4002 /* 4003 * For all internal commands, the buffer must be allocated in <4GB 4004 * address range 4005 */ 4006 if( make_local_pdev(adapter, &pdev) != 0 ) return -1; 4007 4008 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), 4009 &pthru_dma_handle, GFP_KERNEL); 4010 4011 if( pthru == NULL ) { 4012 free_local_pdev(pdev); 4013 return -1; 4014 } 4015 4016 pthru->timeout = 2; 4017 pthru->ars = 1; 4018 pthru->reqsenselen = 14; 4019 pthru->islogical = 0; 4020 4021 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; 4022 4023 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; 4024 4025 pthru->cdblen = 6; 4026 4027 pthru->cdb[0] = INQUIRY; 4028 pthru->cdb[1] = 0; 4029 pthru->cdb[2] = 0; 4030 pthru->cdb[3] = 0; 4031 pthru->cdb[4] = 255; 4032 pthru->cdb[5] = 0; 4033 4034 4035 pthru->dataxferaddr = (u32)buf_dma_handle; 4036 pthru->dataxferlen = 256; 4037 4038 memset(&mc, 0, sizeof(megacmd_t)); 4039 4040 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 4041 mc.xferaddr = (u32)pthru_dma_handle; 4042 4043 rval = mega_internal_command(adapter, &mc, pthru); 4044 4045 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, 4046 pthru_dma_handle); 4047 4048 free_local_pdev(pdev); 4049 4050 return rval; 4051 } 4052 #endif 4053 4054 /** 4055 * mega_internal_command() 4056 * @adapter: pointer to our soft state 4057 * @mc: the mailbox command 4058 * @pthru: Passthru structure for DCDB commands 4059 * 4060 * Issue the internal commands in interrupt mode. 4061 * The last argument is the address of the passthru structure if the command 4062 * to be fired is a passthru command 4063 * 4064 * Note: parameter 'pthru' is null for non-passthru commands. 4065 */ 4066 static int 4067 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4068 { 4069 unsigned long flags; 4070 scb_t *scb; 4071 int rval; 4072 4073 /* 4074 * The internal commands share one command id and hence are 4075 * serialized. This is so because we want to reserve maximum number of 4076 * available command ids for the I/O commands. 4077 */ 4078 mutex_lock(&adapter->int_mtx); 4079 4080 scb = &adapter->int_scb; 4081 memset(scb, 0, sizeof(scb_t)); 4082 4083 scb->idx = CMDID_INT_CMDS; 4084 scb->state |= SCB_ACTIVE | SCB_PENDQ; 4085 4086 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4087 4088 /* 4089 * Is it a passthru command 4090 */ 4091 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) 4092 scb->pthru = pthru; 4093 4094 spin_lock_irqsave(&adapter->lock, flags); 4095 list_add_tail(&scb->list, &adapter->pending_list); 4096 /* 4097 * Check if the HBA is in quiescent state, e.g., during a 4098 * delete logical drive opertion. If it is, don't run 4099 * the pending_list. 4100 */ 4101 if (atomic_read(&adapter->quiescent) == 0) 4102 mega_runpendq(adapter); 4103 spin_unlock_irqrestore(&adapter->lock, flags); 4104 4105 wait_for_completion(&adapter->int_waitq); 4106 4107 mc->status = rval = adapter->int_status; 4108 4109 /* 4110 * Print a debug message for all failed commands. Applications can use 4111 * this information. 4112 */ 4113 if (rval && trace_level) { 4114 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", 4115 mc->cmd, mc->opcode, mc->subopcode, rval); 4116 } 4117 4118 mutex_unlock(&adapter->int_mtx); 4119 return rval; 4120 } 4121 4122 static struct scsi_host_template megaraid_template = { 4123 .module = THIS_MODULE, 4124 .name = "MegaRAID", 4125 .proc_name = "megaraid_legacy", 4126 .info = megaraid_info, 4127 .queuecommand = megaraid_queue, 4128 .bios_param = megaraid_biosparam, 4129 .max_sectors = MAX_SECTORS_PER_IO, 4130 .can_queue = MAX_COMMANDS, 4131 .this_id = DEFAULT_INITIATOR_ID, 4132 .sg_tablesize = MAX_SGLIST, 4133 .cmd_per_lun = DEF_CMD_PER_LUN, 4134 .eh_abort_handler = megaraid_abort, 4135 .eh_device_reset_handler = megaraid_reset, 4136 .eh_bus_reset_handler = megaraid_reset, 4137 .eh_host_reset_handler = megaraid_reset, 4138 .no_write_same = 1, 4139 }; 4140 4141 static int 4142 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4143 { 4144 struct Scsi_Host *host; 4145 adapter_t *adapter; 4146 unsigned long mega_baseport, tbase, flag = 0; 4147 u16 subsysid, subsysvid; 4148 u8 pci_bus, pci_dev_func; 4149 int irq, i, j; 4150 int error = -ENODEV; 4151 4152 if (hba_count >= MAX_CONTROLLERS) 4153 goto out; 4154 4155 if (pci_enable_device(pdev)) 4156 goto out; 4157 pci_set_master(pdev); 4158 4159 pci_bus = pdev->bus->number; 4160 pci_dev_func = pdev->devfn; 4161 4162 /* 4163 * The megaraid3 stuff reports the ID of the Intel part which is not 4164 * remotely specific to the megaraid 4165 */ 4166 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 4167 u16 magic; 4168 /* 4169 * Don't fall over the Compaq management cards using the same 4170 * PCI identifier 4171 */ 4172 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && 4173 pdev->subsystem_device == 0xC000) 4174 goto out_disable_device; 4175 /* Now check the magic signature byte */ 4176 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); 4177 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) 4178 goto out_disable_device; 4179 /* Ok it is probably a megaraid */ 4180 } 4181 4182 /* 4183 * For these vendor and device ids, signature offsets are not 4184 * valid and 64 bit is implicit 4185 */ 4186 if (id->driver_data & BOARD_64BIT) 4187 flag |= BOARD_64BIT; 4188 else { 4189 u32 magic64; 4190 4191 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); 4192 if (magic64 == HBA_SIGNATURE_64BIT) 4193 flag |= BOARD_64BIT; 4194 } 4195 4196 subsysvid = pdev->subsystem_vendor; 4197 subsysid = pdev->subsystem_device; 4198 4199 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", 4200 id->vendor, id->device); 4201 4202 /* Read the base port and IRQ from PCI */ 4203 mega_baseport = pci_resource_start(pdev, 0); 4204 irq = pdev->irq; 4205 4206 tbase = mega_baseport; 4207 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { 4208 flag |= BOARD_MEMMAP; 4209 4210 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4211 dev_warn(&pdev->dev, "mem region busy!\n"); 4212 goto out_disable_device; 4213 } 4214 4215 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4216 if (!mega_baseport) { 4217 dev_warn(&pdev->dev, "could not map hba memory\n"); 4218 goto out_release_region; 4219 } 4220 } else { 4221 flag |= BOARD_IOMAP; 4222 mega_baseport += 0x10; 4223 4224 if (!request_region(mega_baseport, 16, "megaraid")) 4225 goto out_disable_device; 4226 } 4227 4228 /* Initialize SCSI Host structure */ 4229 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); 4230 if (!host) 4231 goto out_iounmap; 4232 4233 adapter = (adapter_t *)host->hostdata; 4234 memset(adapter, 0, sizeof(adapter_t)); 4235 4236 dev_notice(&pdev->dev, 4237 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4238 host->host_no, mega_baseport, irq); 4239 4240 adapter->base = mega_baseport; 4241 if (flag & BOARD_MEMMAP) 4242 adapter->mmio_base = (void __iomem *) mega_baseport; 4243 4244 INIT_LIST_HEAD(&adapter->free_list); 4245 INIT_LIST_HEAD(&adapter->pending_list); 4246 INIT_LIST_HEAD(&adapter->completed_list); 4247 4248 adapter->flag = flag; 4249 spin_lock_init(&adapter->lock); 4250 4251 host->cmd_per_lun = max_cmd_per_lun; 4252 host->max_sectors = max_sectors_per_io; 4253 4254 adapter->dev = pdev; 4255 adapter->host = host; 4256 4257 adapter->host->irq = irq; 4258 4259 if (flag & BOARD_MEMMAP) 4260 adapter->host->base = tbase; 4261 else { 4262 adapter->host->io_port = tbase; 4263 adapter->host->n_io_port = 16; 4264 } 4265 4266 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; 4267 4268 /* 4269 * Allocate buffer to issue internal commands. 4270 */ 4271 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, 4272 MEGA_BUFFER_SIZE, 4273 &adapter->buf_dma_handle, 4274 GFP_KERNEL); 4275 if (!adapter->mega_buffer) { 4276 dev_warn(&pdev->dev, "out of RAM\n"); 4277 goto out_host_put; 4278 } 4279 4280 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), 4281 GFP_KERNEL); 4282 if (!adapter->scb_list) { 4283 dev_warn(&pdev->dev, "out of RAM\n"); 4284 goto out_free_cmd_buffer; 4285 } 4286 4287 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4288 megaraid_isr_memmapped : megaraid_isr_iomapped, 4289 IRQF_SHARED, "megaraid", adapter)) { 4290 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); 4291 goto out_free_scb_list; 4292 } 4293 4294 if (mega_setup_mailbox(adapter)) 4295 goto out_free_irq; 4296 4297 if (mega_query_adapter(adapter)) 4298 goto out_free_mbox; 4299 4300 /* 4301 * Have checks for some buggy f/w 4302 */ 4303 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { 4304 /* 4305 * Which firmware 4306 */ 4307 if (!strcmp(adapter->fw_version, "3.00") || 4308 !strcmp(adapter->fw_version, "3.01")) { 4309 4310 dev_warn(&pdev->dev, 4311 "Your card is a Dell PERC " 4312 "2/SC RAID controller with " 4313 "firmware\nmegaraid: 3.00 or 3.01. " 4314 "This driver is known to have " 4315 "corruption issues\nmegaraid: with " 4316 "those firmware versions on this " 4317 "specific card. In order\nmegaraid: " 4318 "to protect your data, please upgrade " 4319 "your firmware to version\nmegaraid: " 4320 "3.10 or later, available from the " 4321 "Dell Technical Support web\n" 4322 "megaraid: site at\nhttp://support." 4323 "dell.com/us/en/filelib/download/" 4324 "index.asp?fileid=2940\n" 4325 ); 4326 } 4327 } 4328 4329 /* 4330 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with 4331 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit 4332 * support, since this firmware cannot handle 64 bit 4333 * addressing 4334 */ 4335 if ((subsysvid == PCI_VENDOR_ID_HP) && 4336 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { 4337 /* 4338 * which firmware 4339 */ 4340 if (!strcmp(adapter->fw_version, "H01.07") || 4341 !strcmp(adapter->fw_version, "H01.08") || 4342 !strcmp(adapter->fw_version, "H01.09") ) { 4343 dev_warn(&pdev->dev, 4344 "Firmware H.01.07, " 4345 "H.01.08, and H.01.09 on 1M/2M " 4346 "controllers\n" 4347 "do not support 64 bit " 4348 "addressing.\nDISABLING " 4349 "64 bit support.\n"); 4350 adapter->flag &= ~BOARD_64BIT; 4351 } 4352 } 4353 4354 if (mega_is_bios_enabled(adapter)) 4355 mega_hbas[hba_count].is_bios_enabled = 1; 4356 mega_hbas[hba_count].hostdata_addr = adapter; 4357 4358 /* 4359 * Find out which channel is raid and which is scsi. This is 4360 * for ROMB support. 4361 */ 4362 mega_enum_raid_scsi(adapter); 4363 4364 /* 4365 * Find out if a logical drive is set as the boot drive. If 4366 * there is one, will make that as the first logical drive. 4367 * ROMB: Do we have to boot from a physical drive. Then all 4368 * the physical drives would appear before the logical disks. 4369 * Else, all the physical drives would be exported to the mid 4370 * layer after logical drives. 4371 */ 4372 mega_get_boot_drv(adapter); 4373 4374 if (adapter->boot_pdrv_enabled) { 4375 j = adapter->product_info.nchannels; 4376 for( i = 0; i < j; i++ ) 4377 adapter->logdrv_chan[i] = 0; 4378 for( i = j; i < NVIRT_CHAN + j; i++ ) 4379 adapter->logdrv_chan[i] = 1; 4380 } else { 4381 for (i = 0; i < NVIRT_CHAN; i++) 4382 adapter->logdrv_chan[i] = 1; 4383 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) 4384 adapter->logdrv_chan[i] = 0; 4385 adapter->mega_ch_class <<= NVIRT_CHAN; 4386 } 4387 4388 /* 4389 * Do we support random deletion and addition of logical 4390 * drives 4391 */ 4392 adapter->read_ldidmap = 0; /* set it after first logdrv 4393 delete cmd */ 4394 adapter->support_random_del = mega_support_random_del(adapter); 4395 4396 /* Initialize SCBs */ 4397 if (mega_init_scb(adapter)) 4398 goto out_free_mbox; 4399 4400 /* 4401 * Reset the pending commands counter 4402 */ 4403 atomic_set(&adapter->pend_cmds, 0); 4404 4405 /* 4406 * Reset the adapter quiescent flag 4407 */ 4408 atomic_set(&adapter->quiescent, 0); 4409 4410 hba_soft_state[hba_count] = adapter; 4411 4412 /* 4413 * Fill in the structure which needs to be passed back to the 4414 * application when it does an ioctl() for controller related 4415 * information. 4416 */ 4417 i = hba_count; 4418 4419 mcontroller[i].base = mega_baseport; 4420 mcontroller[i].irq = irq; 4421 mcontroller[i].numldrv = adapter->numldrv; 4422 mcontroller[i].pcibus = pci_bus; 4423 mcontroller[i].pcidev = id->device; 4424 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); 4425 mcontroller[i].pciid = -1; 4426 mcontroller[i].pcivendor = id->vendor; 4427 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); 4428 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; 4429 4430 4431 /* Set the Mode of addressing to 64 bit if we can */ 4432 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4433 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4434 adapter->has_64bit_addr = 1; 4435 } else { 4436 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4437 adapter->has_64bit_addr = 0; 4438 } 4439 4440 mutex_init(&adapter->int_mtx); 4441 init_completion(&adapter->int_waitq); 4442 4443 adapter->this_id = DEFAULT_INITIATOR_ID; 4444 adapter->host->this_id = DEFAULT_INITIATOR_ID; 4445 4446 #if MEGA_HAVE_CLUSTERING 4447 /* 4448 * Is cluster support enabled on this controller 4449 * Note: In a cluster the HBAs ( the initiators ) will have 4450 * different target IDs and we cannot assume it to be 7. Call 4451 * to mega_support_cluster() will get the target ids also if 4452 * the cluster support is available 4453 */ 4454 adapter->has_cluster = mega_support_cluster(adapter); 4455 if (adapter->has_cluster) { 4456 dev_notice(&pdev->dev, 4457 "Cluster driver, initiator id:%d\n", 4458 adapter->this_id); 4459 } 4460 #endif 4461 4462 pci_set_drvdata(pdev, host); 4463 4464 mega_create_proc_entry(hba_count, mega_proc_dir_entry); 4465 4466 error = scsi_add_host(host, &pdev->dev); 4467 if (error) 4468 goto out_free_mbox; 4469 4470 scsi_scan_host(host); 4471 hba_count++; 4472 return 0; 4473 4474 out_free_mbox: 4475 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4476 adapter->una_mbox64, adapter->una_mbox64_dma); 4477 out_free_irq: 4478 free_irq(adapter->host->irq, adapter); 4479 out_free_scb_list: 4480 kfree(adapter->scb_list); 4481 out_free_cmd_buffer: 4482 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4483 adapter->mega_buffer, adapter->buf_dma_handle); 4484 out_host_put: 4485 scsi_host_put(host); 4486 out_iounmap: 4487 if (flag & BOARD_MEMMAP) 4488 iounmap((void *)mega_baseport); 4489 out_release_region: 4490 if (flag & BOARD_MEMMAP) 4491 release_mem_region(tbase, 128); 4492 else 4493 release_region(mega_baseport, 16); 4494 out_disable_device: 4495 pci_disable_device(pdev); 4496 out: 4497 return error; 4498 } 4499 4500 static void 4501 __megaraid_shutdown(adapter_t *adapter) 4502 { 4503 u_char raw_mbox[sizeof(struct mbox_out)]; 4504 mbox_t *mbox = (mbox_t *)raw_mbox; 4505 int i; 4506 4507 /* Flush adapter cache */ 4508 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4509 raw_mbox[0] = FLUSH_ADAPTER; 4510 4511 free_irq(adapter->host->irq, adapter); 4512 4513 /* Issue a blocking (interrupts disabled) command to the card */ 4514 issue_scb_block(adapter, raw_mbox); 4515 4516 /* Flush disks cache */ 4517 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4518 raw_mbox[0] = FLUSH_SYSTEM; 4519 4520 /* Issue a blocking (interrupts disabled) command to the card */ 4521 issue_scb_block(adapter, raw_mbox); 4522 4523 if (atomic_read(&adapter->pend_cmds) > 0) 4524 dev_warn(&adapter->dev->dev, "pending commands!!\n"); 4525 4526 /* 4527 * Have a delibrate delay to make sure all the caches are 4528 * actually flushed. 4529 */ 4530 for (i = 0; i <= 10; i++) 4531 mdelay(1000); 4532 } 4533 4534 static void 4535 megaraid_remove_one(struct pci_dev *pdev) 4536 { 4537 struct Scsi_Host *host = pci_get_drvdata(pdev); 4538 adapter_t *adapter = (adapter_t *)host->hostdata; 4539 char buf[12] = { 0 }; 4540 4541 scsi_remove_host(host); 4542 4543 __megaraid_shutdown(adapter); 4544 4545 /* Free our resources */ 4546 if (adapter->flag & BOARD_MEMMAP) { 4547 iounmap((void *)adapter->base); 4548 release_mem_region(adapter->host->base, 128); 4549 } else 4550 release_region(adapter->base, 16); 4551 4552 mega_free_sgl(adapter); 4553 4554 sprintf(buf, "hba%d", adapter->host->host_no); 4555 remove_proc_subtree(buf, mega_proc_dir_entry); 4556 4557 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4558 adapter->mega_buffer, adapter->buf_dma_handle); 4559 kfree(adapter->scb_list); 4560 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4561 adapter->una_mbox64, adapter->una_mbox64_dma); 4562 4563 scsi_host_put(host); 4564 pci_disable_device(pdev); 4565 4566 hba_count--; 4567 } 4568 4569 static void 4570 megaraid_shutdown(struct pci_dev *pdev) 4571 { 4572 struct Scsi_Host *host = pci_get_drvdata(pdev); 4573 adapter_t *adapter = (adapter_t *)host->hostdata; 4574 4575 __megaraid_shutdown(adapter); 4576 } 4577 4578 static struct pci_device_id megaraid_pci_tbl[] = { 4579 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, 4580 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4581 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, 4582 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4583 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, 4584 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4585 {0,} 4586 }; 4587 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 4588 4589 static struct pci_driver megaraid_pci_driver = { 4590 .name = "megaraid_legacy", 4591 .id_table = megaraid_pci_tbl, 4592 .probe = megaraid_probe_one, 4593 .remove = megaraid_remove_one, 4594 .shutdown = megaraid_shutdown, 4595 }; 4596 4597 static int __init megaraid_init(void) 4598 { 4599 int error; 4600 4601 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) 4602 max_cmd_per_lun = MAX_CMD_PER_LUN; 4603 if (max_mbox_busy_wait > MBOX_BUSY_WAIT) 4604 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4605 4606 #ifdef CONFIG_PROC_FS 4607 mega_proc_dir_entry = proc_mkdir("megaraid", NULL); 4608 if (!mega_proc_dir_entry) { 4609 printk(KERN_WARNING 4610 "megaraid: failed to create megaraid root\n"); 4611 } 4612 #endif 4613 error = pci_register_driver(&megaraid_pci_driver); 4614 if (error) { 4615 #ifdef CONFIG_PROC_FS 4616 remove_proc_entry("megaraid", NULL); 4617 #endif 4618 return error; 4619 } 4620 4621 /* 4622 * Register the driver as a character device, for applications 4623 * to access it for ioctls. 4624 * First argument (major) to register_chrdev implies a dynamic 4625 * major number allocation. 4626 */ 4627 major = register_chrdev(0, "megadev_legacy", &megadev_fops); 4628 if (!major) { 4629 printk(KERN_WARNING 4630 "megaraid: failed to register char device\n"); 4631 } 4632 4633 return 0; 4634 } 4635 4636 static void __exit megaraid_exit(void) 4637 { 4638 /* 4639 * Unregister the character device interface to the driver. 4640 */ 4641 unregister_chrdev(major, "megadev_legacy"); 4642 4643 pci_unregister_driver(&megaraid_pci_driver); 4644 4645 #ifdef CONFIG_PROC_FS 4646 remove_proc_entry("megaraid", NULL); 4647 #endif 4648 } 4649 4650 module_init(megaraid_init); 4651 module_exit(megaraid_exit); 4652 4653 /* vi: set ts=8 sw=8 tw=78: */ 4654