1 /* 2 * linux/drivers/message/fusion/mptbase.c 3 * This is the Fusion MPT base driver which supports multiple 4 * (SCSI + LAN) specialized protocol drivers. 5 * For use with LSI PCI chip/adapter(s) 6 * running LSI Fusion MPT (Message Passing Technology) firmware. 7 * 8 * Copyright (c) 1999-2008 LSI Corporation 9 * (mailto:DL-MPTFusionLinux@lsi.com) 10 * 11 */ 12 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 13 /* 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; version 2 of the License. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 NO WARRANTY 24 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 25 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 26 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 27 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 28 solely responsible for determining the appropriateness of using and 29 distributing the Program and assumes all risks associated with its 30 exercise of rights under this Agreement, including but not limited to 31 the risks and costs of program errors, damage to or loss of data, 32 programs or equipment, and unavailability or interruption of operations. 33 34 DISCLAIMER OF LIABILITY 35 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 36 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 38 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 39 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 40 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 41 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 42 43 You should have received a copy of the GNU General Public License 44 along with this program; if not, write to the Free Software 45 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 46 */ 47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 48 49 #include <linux/kernel.h> 50 #include <linux/module.h> 51 #include <linux/errno.h> 52 #include <linux/init.h> 53 #include <linux/seq_file.h> 54 #include <linux/slab.h> 55 #include <linux/types.h> 56 #include <linux/pci.h> 57 #include <linux/kdev_t.h> 58 #include <linux/blkdev.h> 59 #include <linux/delay.h> 60 #include <linux/interrupt.h> /* needed for in_interrupt() proto */ 61 #include <linux/dma-mapping.h> 62 #include <linux/kthread.h> 63 #include <scsi/scsi_host.h> 64 65 #include "mptbase.h" 66 #include "lsi/mpi_log_fc.h" 67 68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 69 #define my_NAME "Fusion MPT base driver" 70 #define my_VERSION MPT_LINUX_VERSION_COMMON 71 #define MYNAM "mptbase" 72 73 MODULE_AUTHOR(MODULEAUTHOR); 74 MODULE_DESCRIPTION(my_NAME); 75 MODULE_LICENSE("GPL"); 76 MODULE_VERSION(my_VERSION); 77 78 /* 79 * cmd line parameters 80 */ 81 82 static int mpt_msi_enable_spi; 83 module_param(mpt_msi_enable_spi, int, 0); 84 MODULE_PARM_DESC(mpt_msi_enable_spi, 85 " Enable MSI Support for SPI controllers (default=0)"); 86 87 static int mpt_msi_enable_fc; 88 module_param(mpt_msi_enable_fc, int, 0); 89 MODULE_PARM_DESC(mpt_msi_enable_fc, 90 " Enable MSI Support for FC controllers (default=0)"); 91 92 static int mpt_msi_enable_sas; 93 module_param(mpt_msi_enable_sas, int, 0); 94 MODULE_PARM_DESC(mpt_msi_enable_sas, 95 " Enable MSI Support for SAS controllers (default=0)"); 96 97 static int mpt_channel_mapping; 98 module_param(mpt_channel_mapping, int, 0); 99 MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)"); 100 101 static int mpt_debug_level; 102 static int mpt_set_debug_level(const char *val, const struct kernel_param *kp); 103 module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int, 104 &mpt_debug_level, 0600); 105 MODULE_PARM_DESC(mpt_debug_level, 106 " debug level - refer to mptdebug.h - (default=0)"); 107 108 int mpt_fwfault_debug; 109 EXPORT_SYMBOL(mpt_fwfault_debug); 110 module_param(mpt_fwfault_debug, int, 0600); 111 MODULE_PARM_DESC(mpt_fwfault_debug, 112 "Enable detection of Firmware fault and halt Firmware on fault - (default=0)"); 113 114 static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS] 115 [MPT_MAX_CALLBACKNAME_LEN+1]; 116 117 #ifdef MFCNT 118 static int mfcounter = 0; 119 #define PRINT_MF_COUNT 20000 120 #endif 121 122 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 123 /* 124 * Public data... 125 */ 126 127 #define WHOINIT_UNKNOWN 0xAA 128 129 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 130 /* 131 * Private data... 132 */ 133 /* Adapter link list */ 134 LIST_HEAD(ioc_list); 135 /* Callback lookup table */ 136 static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS]; 137 /* Protocol driver class lookup table */ 138 static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS]; 139 /* Event handler lookup table */ 140 static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 141 /* Reset handler lookup table */ 142 static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 143 static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 144 145 #ifdef CONFIG_PROC_FS 146 static struct proc_dir_entry *mpt_proc_root_dir; 147 #endif 148 149 /* 150 * Driver Callback Index's 151 */ 152 static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS; 153 static u8 last_drv_idx; 154 155 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 156 /* 157 * Forward protos... 158 */ 159 static irqreturn_t mpt_interrupt(int irq, void *bus_id); 160 static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, 161 MPT_FRAME_HDR *reply); 162 static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, 163 u32 *req, int replyBytes, u16 *u16reply, int maxwait, 164 int sleepFlag); 165 static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag); 166 static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev); 167 static void mpt_adapter_disable(MPT_ADAPTER *ioc); 168 static void mpt_adapter_dispose(MPT_ADAPTER *ioc); 169 170 static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc); 171 static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag); 172 static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason); 173 static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 174 static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag); 175 static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 176 static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag); 177 static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag); 178 static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 179 static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 180 static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag); 181 static int PrimeIocFifos(MPT_ADAPTER *ioc); 182 static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag); 183 static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag); 184 static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag); 185 static int GetLanConfigPages(MPT_ADAPTER *ioc); 186 static int GetIoUnitPage2(MPT_ADAPTER *ioc); 187 int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 188 static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); 189 static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 190 static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 191 static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); 192 static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); 193 static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, 194 int sleepFlag); 195 static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 196 static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); 197 static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); 198 199 #ifdef CONFIG_PROC_FS 200 static int mpt_summary_proc_show(struct seq_file *m, void *v); 201 static int mpt_version_proc_show(struct seq_file *m, void *v); 202 static int mpt_iocinfo_proc_show(struct seq_file *m, void *v); 203 #endif 204 static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); 205 206 static int ProcessEventNotification(MPT_ADAPTER *ioc, 207 EventNotificationReply_t *evReply, int *evHandlers); 208 static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 209 static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 210 static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); 211 static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx); 212 static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 213 static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc); 214 215 /* module entry point */ 216 static int __init fusion_init (void); 217 static void __exit fusion_exit (void); 218 219 #define CHIPREG_READ32(addr) readl_relaxed(addr) 220 #define CHIPREG_READ32_dmasync(addr) readl(addr) 221 #define CHIPREG_WRITE32(addr,val) writel(val, addr) 222 #define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr) 223 #define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr) 224 225 static void 226 pci_disable_io_access(struct pci_dev *pdev) 227 { 228 u16 command_reg; 229 230 pci_read_config_word(pdev, PCI_COMMAND, &command_reg); 231 command_reg &= ~1; 232 pci_write_config_word(pdev, PCI_COMMAND, command_reg); 233 } 234 235 static void 236 pci_enable_io_access(struct pci_dev *pdev) 237 { 238 u16 command_reg; 239 240 pci_read_config_word(pdev, PCI_COMMAND, &command_reg); 241 command_reg |= 1; 242 pci_write_config_word(pdev, PCI_COMMAND, command_reg); 243 } 244 245 static int mpt_set_debug_level(const char *val, const struct kernel_param *kp) 246 { 247 int ret = param_set_int(val, kp); 248 MPT_ADAPTER *ioc; 249 250 if (ret) 251 return ret; 252 253 list_for_each_entry(ioc, &ioc_list, list) 254 ioc->debug_level = mpt_debug_level; 255 return 0; 256 } 257 258 /** 259 * mpt_get_cb_idx - obtain cb_idx for registered driver 260 * @dclass: class driver enum 261 * 262 * Returns cb_idx, or zero means it wasn't found 263 **/ 264 static u8 265 mpt_get_cb_idx(MPT_DRIVER_CLASS dclass) 266 { 267 u8 cb_idx; 268 269 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) 270 if (MptDriverClass[cb_idx] == dclass) 271 return cb_idx; 272 return 0; 273 } 274 275 /** 276 * mpt_is_discovery_complete - determine if discovery has completed 277 * @ioc: per adatper instance 278 * 279 * Returns 1 when discovery completed, else zero. 280 */ 281 static int 282 mpt_is_discovery_complete(MPT_ADAPTER *ioc) 283 { 284 ConfigExtendedPageHeader_t hdr; 285 CONFIGPARMS cfg; 286 SasIOUnitPage0_t *buffer; 287 dma_addr_t dma_handle; 288 int rc = 0; 289 290 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t)); 291 memset(&cfg, 0, sizeof(CONFIGPARMS)); 292 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION; 293 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 294 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; 295 cfg.cfghdr.ehdr = &hdr; 296 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 297 298 if ((mpt_config(ioc, &cfg))) 299 goto out; 300 if (!hdr.ExtPageLength) 301 goto out; 302 303 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 304 &dma_handle); 305 if (!buffer) 306 goto out; 307 308 cfg.physAddr = dma_handle; 309 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 310 311 if ((mpt_config(ioc, &cfg))) 312 goto out_free_consistent; 313 314 if (!(buffer->PhyData[0].PortFlags & 315 MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS)) 316 rc = 1; 317 318 out_free_consistent: 319 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 320 buffer, dma_handle); 321 out: 322 return rc; 323 } 324 325 326 /** 327 * mpt_remove_dead_ioc_func - kthread context to remove dead ioc 328 * @arg: input argument, used to derive ioc 329 * 330 * Return 0 if controller is removed from pci subsystem. 331 * Return -1 for other case. 332 */ 333 static int mpt_remove_dead_ioc_func(void *arg) 334 { 335 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 336 struct pci_dev *pdev; 337 338 if (!ioc) 339 return -1; 340 341 pdev = ioc->pcidev; 342 if (!pdev) 343 return -1; 344 345 pci_stop_and_remove_bus_device_locked(pdev); 346 return 0; 347 } 348 349 350 351 /** 352 * mpt_fault_reset_work - work performed on workq after ioc fault 353 * @work: input argument, used to derive ioc 354 * 355 **/ 356 static void 357 mpt_fault_reset_work(struct work_struct *work) 358 { 359 MPT_ADAPTER *ioc = 360 container_of(work, MPT_ADAPTER, fault_reset_work.work); 361 u32 ioc_raw_state; 362 int rc; 363 unsigned long flags; 364 MPT_SCSI_HOST *hd; 365 struct task_struct *p; 366 367 if (ioc->ioc_reset_in_progress || !ioc->active) 368 goto out; 369 370 371 ioc_raw_state = mpt_GetIocState(ioc, 0); 372 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) { 373 printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n", 374 ioc->name, __func__); 375 376 /* 377 * Call mptscsih_flush_pending_cmds callback so that we 378 * flush all pending commands back to OS. 379 * This call is required to aovid deadlock at block layer. 380 * Dead IOC will fail to do diag reset,and this call is safe 381 * since dead ioc will never return any command back from HW. 382 */ 383 hd = shost_priv(ioc->sh); 384 ioc->schedule_dead_ioc_flush_running_cmds(hd); 385 386 /*Remove the Dead Host */ 387 p = kthread_run(mpt_remove_dead_ioc_func, ioc, 388 "mpt_dead_ioc_%d", ioc->id); 389 if (IS_ERR(p)) { 390 printk(MYIOC_s_ERR_FMT 391 "%s: Running mpt_dead_ioc thread failed !\n", 392 ioc->name, __func__); 393 } else { 394 printk(MYIOC_s_WARN_FMT 395 "%s: Running mpt_dead_ioc thread success !\n", 396 ioc->name, __func__); 397 } 398 return; /* don't rearm timer */ 399 } 400 401 if ((ioc_raw_state & MPI_IOC_STATE_MASK) 402 == MPI_IOC_STATE_FAULT) { 403 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", 404 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 405 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", 406 ioc->name, __func__); 407 rc = mpt_HardResetHandler(ioc, CAN_SLEEP); 408 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name, 409 __func__, (rc == 0) ? "success" : "failed"); 410 ioc_raw_state = mpt_GetIocState(ioc, 0); 411 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) 412 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 413 "reset (%04xh)\n", ioc->name, ioc_raw_state & 414 MPI_DOORBELL_DATA_MASK); 415 } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) { 416 if ((mpt_is_discovery_complete(ioc))) { 417 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing " 418 "discovery_quiesce_io flag\n", ioc->name)); 419 ioc->sas_discovery_quiesce_io = 0; 420 } 421 } 422 423 out: 424 /* 425 * Take turns polling alternate controller 426 */ 427 if (ioc->alt_ioc) 428 ioc = ioc->alt_ioc; 429 430 /* rearm the timer */ 431 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 432 if (ioc->reset_work_q) 433 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, 434 msecs_to_jiffies(MPT_POLLING_INTERVAL)); 435 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 436 } 437 438 439 /* 440 * Process turbo (context) reply... 441 */ 442 static void 443 mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa) 444 { 445 MPT_FRAME_HDR *mf = NULL; 446 MPT_FRAME_HDR *mr = NULL; 447 u16 req_idx = 0; 448 u8 cb_idx; 449 450 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n", 451 ioc->name, pa)); 452 453 switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) { 454 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 455 req_idx = pa & 0x0000FFFF; 456 cb_idx = (pa & 0x00FF0000) >> 16; 457 mf = MPT_INDEX_2_MFPTR(ioc, req_idx); 458 break; 459 case MPI_CONTEXT_REPLY_TYPE_LAN: 460 cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER); 461 /* 462 * Blind set of mf to NULL here was fatal 463 * after lan_reply says "freeme" 464 * Fix sort of combined with an optimization here; 465 * added explicit check for case where lan_reply 466 * was just returning 1 and doing nothing else. 467 * For this case skip the callback, but set up 468 * proper mf value first here:-) 469 */ 470 if ((pa & 0x58000000) == 0x58000000) { 471 req_idx = pa & 0x0000FFFF; 472 mf = MPT_INDEX_2_MFPTR(ioc, req_idx); 473 mpt_free_msg_frame(ioc, mf); 474 mb(); 475 return; 476 break; 477 } 478 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa); 479 break; 480 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 481 cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER); 482 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa); 483 break; 484 default: 485 cb_idx = 0; 486 BUG(); 487 } 488 489 /* Check for (valid) IO callback! */ 490 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 491 MptCallbacks[cb_idx] == NULL) { 492 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 493 __func__, ioc->name, cb_idx); 494 goto out; 495 } 496 497 if (MptCallbacks[cb_idx](ioc, mf, mr)) 498 mpt_free_msg_frame(ioc, mf); 499 out: 500 mb(); 501 } 502 503 static void 504 mpt_reply(MPT_ADAPTER *ioc, u32 pa) 505 { 506 MPT_FRAME_HDR *mf; 507 MPT_FRAME_HDR *mr; 508 u16 req_idx; 509 u8 cb_idx; 510 int freeme; 511 512 u32 reply_dma_low; 513 u16 ioc_stat; 514 515 /* non-TURBO reply! Hmmm, something may be up... 516 * Newest turbo reply mechanism; get address 517 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)! 518 */ 519 520 /* Map DMA address of reply header to cpu address. 521 * pa is 32 bits - but the dma address may be 32 or 64 bits 522 * get offset based only only the low addresses 523 */ 524 525 reply_dma_low = (pa <<= 1); 526 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames + 527 (reply_dma_low - ioc->reply_frames_low_dma)); 528 529 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx); 530 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; 531 mf = MPT_INDEX_2_MFPTR(ioc, req_idx); 532 533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n", 534 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function)); 535 DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr); 536 537 /* Check/log IOC log info 538 */ 539 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus); 540 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 541 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); 542 if (ioc->bus_type == FC) 543 mpt_fc_log_info(ioc, log_info); 544 else if (ioc->bus_type == SPI) 545 mpt_spi_log_info(ioc, log_info); 546 else if (ioc->bus_type == SAS) 547 mpt_sas_log_info(ioc, log_info, cb_idx); 548 } 549 550 if (ioc_stat & MPI_IOCSTATUS_MASK) 551 mpt_iocstatus_info(ioc, (u32)ioc_stat, mf); 552 553 /* Check for (valid) IO callback! */ 554 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 555 MptCallbacks[cb_idx] == NULL) { 556 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 557 __func__, ioc->name, cb_idx); 558 freeme = 0; 559 goto out; 560 } 561 562 freeme = MptCallbacks[cb_idx](ioc, mf, mr); 563 564 out: 565 /* Flush (non-TURBO) reply with a WRITE! */ 566 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa); 567 568 if (freeme) 569 mpt_free_msg_frame(ioc, mf); 570 mb(); 571 } 572 573 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 574 /** 575 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. 576 * @irq: irq number (not used) 577 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 578 * 579 * This routine is registered via the request_irq() kernel API call, 580 * and handles all interrupts generated from a specific MPT adapter 581 * (also referred to as a IO Controller or IOC). 582 * This routine must clear the interrupt from the adapter and does 583 * so by reading the reply FIFO. Multiple replies may be processed 584 * per single call to this routine. 585 * 586 * This routine handles register-level access of the adapter but 587 * dispatches (calls) a protocol-specific callback routine to handle 588 * the protocol-specific details of the MPT request completion. 589 */ 590 static irqreturn_t 591 mpt_interrupt(int irq, void *bus_id) 592 { 593 MPT_ADAPTER *ioc = bus_id; 594 u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo); 595 596 if (pa == 0xFFFFFFFF) 597 return IRQ_NONE; 598 599 /* 600 * Drain the reply FIFO! 601 */ 602 do { 603 if (pa & MPI_ADDRESS_REPLY_A_BIT) 604 mpt_reply(ioc, pa); 605 else 606 mpt_turbo_reply(ioc, pa); 607 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo); 608 } while (pa != 0xFFFFFFFF); 609 610 return IRQ_HANDLED; 611 } 612 613 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 614 /** 615 * mptbase_reply - MPT base driver's callback routine 616 * @ioc: Pointer to MPT_ADAPTER structure 617 * @req: Pointer to original MPT request frame 618 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 619 * 620 * MPT base driver's callback routine; all base driver 621 * "internal" request/reply processing is routed here. 622 * Currently used for EventNotification and EventAck handling. 623 * 624 * Returns 1 indicating original alloc'd request frame ptr 625 * should be freed, or 0 if it shouldn't. 626 */ 627 static int 628 mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) 629 { 630 EventNotificationReply_t *pEventReply; 631 u8 event; 632 int evHandlers; 633 int freereq = 1; 634 635 switch (reply->u.hdr.Function) { 636 case MPI_FUNCTION_EVENT_NOTIFICATION: 637 pEventReply = (EventNotificationReply_t *)reply; 638 evHandlers = 0; 639 ProcessEventNotification(ioc, pEventReply, &evHandlers); 640 event = le32_to_cpu(pEventReply->Event) & 0xFF; 641 if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) 642 freereq = 0; 643 if (event != MPI_EVENT_EVENT_CHANGE) 644 break; 645 /* fall through */ 646 case MPI_FUNCTION_CONFIG: 647 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: 648 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; 649 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID; 650 memcpy(ioc->mptbase_cmds.reply, reply, 651 min(MPT_DEFAULT_FRAME_SIZE, 652 4 * reply->u.reply.MsgLength)); 653 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) { 654 ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING; 655 complete(&ioc->mptbase_cmds.done); 656 } else 657 freereq = 0; 658 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF) 659 freereq = 1; 660 break; 661 case MPI_FUNCTION_EVENT_ACK: 662 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 663 "EventAck reply received\n", ioc->name)); 664 break; 665 default: 666 printk(MYIOC_s_ERR_FMT 667 "Unexpected msg function (=%02Xh) reply received!\n", 668 ioc->name, reply->u.hdr.Function); 669 break; 670 } 671 672 /* 673 * Conditionally tell caller to free the original 674 * EventNotification/EventAck/unexpected request frame! 675 */ 676 return freereq; 677 } 678 679 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 680 /** 681 * mpt_register - Register protocol-specific main callback handler. 682 * @cbfunc: callback function pointer 683 * @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value) 684 * @func_name: call function's name 685 * 686 * This routine is called by a protocol-specific driver (SCSI host, 687 * LAN, SCSI target) to register its reply callback routine. Each 688 * protocol-specific driver must do this before it will be able to 689 * use any IOC resources, such as obtaining request frames. 690 * 691 * NOTES: The SCSI protocol driver currently calls this routine thrice 692 * in order to register separate callbacks; one for "normal" SCSI IO; 693 * one for MptScsiTaskMgmt requests; one for Scan/DV requests. 694 * 695 * Returns u8 valued "handle" in the range (and S.O.D. order) 696 * {N,...,7,6,5,...,1} if successful. 697 * A return value of MPT_MAX_PROTOCOL_DRIVERS (including zero!) should be 698 * considered an error by the caller. 699 */ 700 u8 701 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name) 702 { 703 u8 cb_idx; 704 last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS; 705 706 /* 707 * Search for empty callback slot in this order: {N,...,7,6,5,...,1} 708 * (slot/handle 0 is reserved!) 709 */ 710 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 711 if (MptCallbacks[cb_idx] == NULL) { 712 MptCallbacks[cb_idx] = cbfunc; 713 MptDriverClass[cb_idx] = dclass; 714 MptEvHandlers[cb_idx] = NULL; 715 last_drv_idx = cb_idx; 716 strlcpy(MptCallbacksName[cb_idx], func_name, 717 MPT_MAX_CALLBACKNAME_LEN+1); 718 break; 719 } 720 } 721 722 return last_drv_idx; 723 } 724 725 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 726 /** 727 * mpt_deregister - Deregister a protocol drivers resources. 728 * @cb_idx: previously registered callback handle 729 * 730 * Each protocol-specific driver should call this routine when its 731 * module is unloaded. 732 */ 733 void 734 mpt_deregister(u8 cb_idx) 735 { 736 if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) { 737 MptCallbacks[cb_idx] = NULL; 738 MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER; 739 MptEvHandlers[cb_idx] = NULL; 740 741 last_drv_idx++; 742 } 743 } 744 745 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 746 /** 747 * mpt_event_register - Register protocol-specific event callback handler. 748 * @cb_idx: previously registered (via mpt_register) callback handle 749 * @ev_cbfunc: callback function 750 * 751 * This routine can be called by one or more protocol-specific drivers 752 * if/when they choose to be notified of MPT events. 753 * 754 * Returns 0 for success. 755 */ 756 int 757 mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc) 758 { 759 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 760 return -1; 761 762 MptEvHandlers[cb_idx] = ev_cbfunc; 763 return 0; 764 } 765 766 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 767 /** 768 * mpt_event_deregister - Deregister protocol-specific event callback handler 769 * @cb_idx: previously registered callback handle 770 * 771 * Each protocol-specific driver should call this routine 772 * when it does not (or can no longer) handle events, 773 * or when its module is unloaded. 774 */ 775 void 776 mpt_event_deregister(u8 cb_idx) 777 { 778 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 779 return; 780 781 MptEvHandlers[cb_idx] = NULL; 782 } 783 784 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 785 /** 786 * mpt_reset_register - Register protocol-specific IOC reset handler. 787 * @cb_idx: previously registered (via mpt_register) callback handle 788 * @reset_func: reset function 789 * 790 * This routine can be called by one or more protocol-specific drivers 791 * if/when they choose to be notified of IOC resets. 792 * 793 * Returns 0 for success. 794 */ 795 int 796 mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func) 797 { 798 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 799 return -1; 800 801 MptResetHandlers[cb_idx] = reset_func; 802 return 0; 803 } 804 805 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 806 /** 807 * mpt_reset_deregister - Deregister protocol-specific IOC reset handler. 808 * @cb_idx: previously registered callback handle 809 * 810 * Each protocol-specific driver should call this routine 811 * when it does not (or can no longer) handle IOC reset handling, 812 * or when its module is unloaded. 813 */ 814 void 815 mpt_reset_deregister(u8 cb_idx) 816 { 817 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 818 return; 819 820 MptResetHandlers[cb_idx] = NULL; 821 } 822 823 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 824 /** 825 * mpt_device_driver_register - Register device driver hooks 826 * @dd_cbfunc: driver callbacks struct 827 * @cb_idx: MPT protocol driver index 828 */ 829 int 830 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx) 831 { 832 MPT_ADAPTER *ioc; 833 const struct pci_device_id *id; 834 835 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 836 return -EINVAL; 837 838 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc; 839 840 /* call per pci device probe entry point */ 841 list_for_each_entry(ioc, &ioc_list, list) { 842 id = ioc->pcidev->driver ? 843 ioc->pcidev->driver->id_table : NULL; 844 if (dd_cbfunc->probe) 845 dd_cbfunc->probe(ioc->pcidev, id); 846 } 847 848 return 0; 849 } 850 851 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 852 /** 853 * mpt_device_driver_deregister - DeRegister device driver hooks 854 * @cb_idx: MPT protocol driver index 855 */ 856 void 857 mpt_device_driver_deregister(u8 cb_idx) 858 { 859 struct mpt_pci_driver *dd_cbfunc; 860 MPT_ADAPTER *ioc; 861 862 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 863 return; 864 865 dd_cbfunc = MptDeviceDriverHandlers[cb_idx]; 866 867 list_for_each_entry(ioc, &ioc_list, list) { 868 if (dd_cbfunc->remove) 869 dd_cbfunc->remove(ioc->pcidev); 870 } 871 872 MptDeviceDriverHandlers[cb_idx] = NULL; 873 } 874 875 876 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 877 /** 878 * mpt_get_msg_frame - Obtain an MPT request frame from the pool 879 * @cb_idx: Handle of registered MPT protocol driver 880 * @ioc: Pointer to MPT adapter structure 881 * 882 * Obtain an MPT request frame from the pool (of 1024) that are 883 * allocated per MPT adapter. 884 * 885 * Returns pointer to a MPT request frame or %NULL if none are available 886 * or IOC is not active. 887 */ 888 MPT_FRAME_HDR* 889 mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc) 890 { 891 MPT_FRAME_HDR *mf; 892 unsigned long flags; 893 u16 req_idx; /* Request index */ 894 895 /* validate handle and ioc identifier */ 896 897 #ifdef MFCNT 898 if (!ioc->active) 899 printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame " 900 "returning NULL!\n", ioc->name); 901 #endif 902 903 /* If interrupts are not attached, do not return a request frame */ 904 if (!ioc->active) 905 return NULL; 906 907 spin_lock_irqsave(&ioc->FreeQlock, flags); 908 if (!list_empty(&ioc->FreeQ)) { 909 int req_offset; 910 911 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR, 912 u.frame.linkage.list); 913 list_del(&mf->u.frame.linkage.list); 914 mf->u.frame.linkage.arg1 = 0; 915 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */ 916 req_offset = (u8 *)mf - (u8 *)ioc->req_frames; 917 /* u16! */ 918 req_idx = req_offset / ioc->req_sz; 919 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx); 920 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0; 921 /* Default, will be changed if necessary in SG generation */ 922 ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame; 923 #ifdef MFCNT 924 ioc->mfcnt++; 925 #endif 926 } 927 else 928 mf = NULL; 929 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 930 931 #ifdef MFCNT 932 if (mf == NULL) 933 printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! " 934 "Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt, 935 ioc->req_depth); 936 mfcounter++; 937 if (mfcounter == PRINT_MF_COUNT) 938 printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name, 939 ioc->mfcnt, ioc->req_depth); 940 #endif 941 942 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n", 943 ioc->name, cb_idx, ioc->id, mf)); 944 return mf; 945 } 946 947 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 948 /** 949 * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC 950 * @cb_idx: Handle of registered MPT protocol driver 951 * @ioc: Pointer to MPT adapter structure 952 * @mf: Pointer to MPT request frame 953 * 954 * This routine posts an MPT request frame to the request post FIFO of a 955 * specific MPT adapter. 956 */ 957 void 958 mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) 959 { 960 u32 mf_dma_addr; 961 int req_offset; 962 u16 req_idx; /* Request index */ 963 964 /* ensure values are reset properly! */ 965 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */ 966 req_offset = (u8 *)mf - (u8 *)ioc->req_frames; 967 /* u16! */ 968 req_idx = req_offset / ioc->req_sz; 969 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx); 970 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0; 971 972 DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf); 973 974 mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx]; 975 dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d " 976 "RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, 977 ioc->RequestNB[req_idx])); 978 CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr); 979 } 980 981 /** 982 * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame 983 * @cb_idx: Handle of registered MPT protocol driver 984 * @ioc: Pointer to MPT adapter structure 985 * @mf: Pointer to MPT request frame 986 * 987 * Send a protocol-specific MPT request frame to an IOC using 988 * hi-priority request queue. 989 * 990 * This routine posts an MPT request frame to the request post FIFO of a 991 * specific MPT adapter. 992 **/ 993 void 994 mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) 995 { 996 u32 mf_dma_addr; 997 int req_offset; 998 u16 req_idx; /* Request index */ 999 1000 /* ensure values are reset properly! */ 1001 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; 1002 req_offset = (u8 *)mf - (u8 *)ioc->req_frames; 1003 req_idx = req_offset / ioc->req_sz; 1004 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx); 1005 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0; 1006 1007 DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf); 1008 1009 mf_dma_addr = (ioc->req_frames_low_dma + req_offset); 1010 dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n", 1011 ioc->name, mf_dma_addr, req_idx)); 1012 CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr); 1013 } 1014 1015 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1016 /** 1017 * mpt_free_msg_frame - Place MPT request frame back on FreeQ. 1018 * @ioc: Pointer to MPT adapter structure 1019 * @mf: Pointer to MPT request frame 1020 * 1021 * This routine places a MPT request frame back on the MPT adapter's 1022 * FreeQ. 1023 */ 1024 void 1025 mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) 1026 { 1027 unsigned long flags; 1028 1029 /* Put Request back on FreeQ! */ 1030 spin_lock_irqsave(&ioc->FreeQlock, flags); 1031 if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf) 1032 goto out; 1033 /* signature to know if this mf is freed */ 1034 mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf); 1035 list_add(&mf->u.frame.linkage.list, &ioc->FreeQ); 1036 #ifdef MFCNT 1037 ioc->mfcnt--; 1038 #endif 1039 out: 1040 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 1041 } 1042 1043 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1044 /** 1045 * mpt_add_sge - Place a simple 32 bit SGE at address pAddr. 1046 * @pAddr: virtual address for SGE 1047 * @flagslength: SGE flags and data transfer length 1048 * @dma_addr: Physical address 1049 * 1050 * This routine places a MPT request frame back on the MPT adapter's 1051 * FreeQ. 1052 */ 1053 static void 1054 mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr) 1055 { 1056 SGESimple32_t *pSge = (SGESimple32_t *) pAddr; 1057 pSge->FlagsLength = cpu_to_le32(flagslength); 1058 pSge->Address = cpu_to_le32(dma_addr); 1059 } 1060 1061 /** 1062 * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr. 1063 * @pAddr: virtual address for SGE 1064 * @flagslength: SGE flags and data transfer length 1065 * @dma_addr: Physical address 1066 * 1067 * This routine places a MPT request frame back on the MPT adapter's 1068 * FreeQ. 1069 **/ 1070 static void 1071 mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr) 1072 { 1073 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1074 pSge->Address.Low = cpu_to_le32 1075 (lower_32_bits(dma_addr)); 1076 pSge->Address.High = cpu_to_le32 1077 (upper_32_bits(dma_addr)); 1078 pSge->FlagsLength = cpu_to_le32 1079 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); 1080 } 1081 1082 /** 1083 * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround). 1084 * @pAddr: virtual address for SGE 1085 * @flagslength: SGE flags and data transfer length 1086 * @dma_addr: Physical address 1087 * 1088 * This routine places a MPT request frame back on the MPT adapter's 1089 * FreeQ. 1090 **/ 1091 static void 1092 mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr) 1093 { 1094 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1095 u32 tmp; 1096 1097 pSge->Address.Low = cpu_to_le32 1098 (lower_32_bits(dma_addr)); 1099 tmp = (u32)(upper_32_bits(dma_addr)); 1100 1101 /* 1102 * 1078 errata workaround for the 36GB limitation 1103 */ 1104 if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) { 1105 flagslength |= 1106 MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS); 1107 tmp |= (1<<31); 1108 if (mpt_debug_level & MPT_DEBUG_36GB_MEM) 1109 printk(KERN_DEBUG "1078 P0M2 addressing for " 1110 "addr = 0x%llx len = %d\n", 1111 (unsigned long long)dma_addr, 1112 MPI_SGE_LENGTH(flagslength)); 1113 } 1114 1115 pSge->Address.High = cpu_to_le32(tmp); 1116 pSge->FlagsLength = cpu_to_le32( 1117 (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); 1118 } 1119 1120 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1121 /** 1122 * mpt_add_chain - Place a 32 bit chain SGE at address pAddr. 1123 * @pAddr: virtual address for SGE 1124 * @next: nextChainOffset value (u32's) 1125 * @length: length of next SGL segment 1126 * @dma_addr: Physical address 1127 * 1128 */ 1129 static void 1130 mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) 1131 { 1132 SGEChain32_t *pChain = (SGEChain32_t *) pAddr; 1133 1134 pChain->Length = cpu_to_le16(length); 1135 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1136 pChain->NextChainOffset = next; 1137 pChain->Address = cpu_to_le32(dma_addr); 1138 } 1139 1140 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1141 /** 1142 * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr. 1143 * @pAddr: virtual address for SGE 1144 * @next: nextChainOffset value (u32's) 1145 * @length: length of next SGL segment 1146 * @dma_addr: Physical address 1147 * 1148 */ 1149 static void 1150 mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) 1151 { 1152 SGEChain64_t *pChain = (SGEChain64_t *) pAddr; 1153 u32 tmp = dma_addr & 0xFFFFFFFF; 1154 1155 pChain->Length = cpu_to_le16(length); 1156 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT | 1157 MPI_SGE_FLAGS_64_BIT_ADDRESSING); 1158 1159 pChain->NextChainOffset = next; 1160 1161 pChain->Address.Low = cpu_to_le32(tmp); 1162 tmp = (u32)(upper_32_bits(dma_addr)); 1163 pChain->Address.High = cpu_to_le32(tmp); 1164 } 1165 1166 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1167 /** 1168 * mpt_send_handshake_request - Send MPT request via doorbell handshake method. 1169 * @cb_idx: Handle of registered MPT protocol driver 1170 * @ioc: Pointer to MPT adapter structure 1171 * @reqBytes: Size of the request in bytes 1172 * @req: Pointer to MPT request frame 1173 * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. 1174 * 1175 * This routine is used exclusively to send MptScsiTaskMgmt 1176 * requests since they are required to be sent via doorbell handshake. 1177 * 1178 * NOTE: It is the callers responsibility to byte-swap fields in the 1179 * request which are greater than 1 byte in size. 1180 * 1181 * Returns 0 for success, non-zero for failure. 1182 */ 1183 int 1184 mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag) 1185 { 1186 int r = 0; 1187 u8 *req_as_bytes; 1188 int ii; 1189 1190 /* State is known to be good upon entering 1191 * this function so issue the bus reset 1192 * request. 1193 */ 1194 1195 /* 1196 * Emulate what mpt_put_msg_frame() does /wrt to sanity 1197 * setting cb_idx/req_idx. But ONLY if this request 1198 * is in proper (pre-alloc'd) request buffer range... 1199 */ 1200 ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req); 1201 if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) { 1202 MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req; 1203 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii); 1204 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; 1205 } 1206 1207 /* Make sure there are no doorbells */ 1208 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1209 1210 CHIPREG_WRITE32(&ioc->chip->Doorbell, 1211 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) | 1212 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT))); 1213 1214 /* Wait for IOC doorbell int */ 1215 if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) { 1216 return ii; 1217 } 1218 1219 /* Read doorbell and check for active bit */ 1220 if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE)) 1221 return -5; 1222 1223 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n", 1224 ioc->name, ii)); 1225 1226 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1227 1228 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) { 1229 return -2; 1230 } 1231 1232 /* Send request via doorbell handshake */ 1233 req_as_bytes = (u8 *) req; 1234 for (ii = 0; ii < reqBytes/4; ii++) { 1235 u32 word; 1236 1237 word = ((req_as_bytes[(ii*4) + 0] << 0) | 1238 (req_as_bytes[(ii*4) + 1] << 8) | 1239 (req_as_bytes[(ii*4) + 2] << 16) | 1240 (req_as_bytes[(ii*4) + 3] << 24)); 1241 CHIPREG_WRITE32(&ioc->chip->Doorbell, word); 1242 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) { 1243 r = -3; 1244 break; 1245 } 1246 } 1247 1248 if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0) 1249 r = 0; 1250 else 1251 r = -4; 1252 1253 /* Make sure there are no doorbells */ 1254 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1255 1256 return r; 1257 } 1258 1259 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1260 /** 1261 * mpt_host_page_access_control - control the IOC's Host Page Buffer access 1262 * @ioc: Pointer to MPT adapter structure 1263 * @access_control_value: define bits below 1264 * @sleepFlag: Specifies whether the process can sleep 1265 * 1266 * Provides mechanism for the host driver to control the IOC's 1267 * Host Page Buffer access. 1268 * 1269 * Access Control Value - bits[15:12] 1270 * 0h Reserved 1271 * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS } 1272 * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS } 1273 * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER } 1274 * 1275 * Returns 0 for success, non-zero for failure. 1276 */ 1277 1278 static int 1279 mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag) 1280 { 1281 int r = 0; 1282 1283 /* return if in use */ 1284 if (CHIPREG_READ32(&ioc->chip->Doorbell) 1285 & MPI_DOORBELL_ACTIVE) 1286 return -1; 1287 1288 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1289 1290 CHIPREG_WRITE32(&ioc->chip->Doorbell, 1291 ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL 1292 <<MPI_DOORBELL_FUNCTION_SHIFT) | 1293 (access_control_value<<12))); 1294 1295 /* Wait for IOC to clear Doorbell Status bit */ 1296 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) { 1297 return -2; 1298 }else 1299 return 0; 1300 } 1301 1302 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1303 /** 1304 * mpt_host_page_alloc - allocate system memory for the fw 1305 * @ioc: Pointer to pointer to IOC adapter 1306 * @ioc_init: Pointer to ioc init config page 1307 * 1308 * If we already allocated memory in past, then resend the same pointer. 1309 * Returns 0 for success, non-zero for failure. 1310 */ 1311 static int 1312 mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init) 1313 { 1314 char *psge; 1315 int flags_length; 1316 u32 host_page_buffer_sz=0; 1317 1318 if(!ioc->HostPageBuffer) { 1319 1320 host_page_buffer_sz = 1321 le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF; 1322 1323 if(!host_page_buffer_sz) 1324 return 0; /* fw doesn't need any host buffers */ 1325 1326 /* spin till we get enough memory */ 1327 while (host_page_buffer_sz > 0) { 1328 ioc->HostPageBuffer = 1329 dma_alloc_coherent(&ioc->pcidev->dev, 1330 host_page_buffer_sz, 1331 &ioc->HostPageBuffer_dma, 1332 GFP_KERNEL); 1333 if (ioc->HostPageBuffer) { 1334 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 1335 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", 1336 ioc->name, ioc->HostPageBuffer, 1337 (u32)ioc->HostPageBuffer_dma, 1338 host_page_buffer_sz)); 1339 ioc->alloc_total += host_page_buffer_sz; 1340 ioc->HostPageBuffer_sz = host_page_buffer_sz; 1341 break; 1342 } 1343 1344 host_page_buffer_sz -= (4*1024); 1345 } 1346 } 1347 1348 if(!ioc->HostPageBuffer) { 1349 printk(MYIOC_s_ERR_FMT 1350 "Failed to alloc memory for host_page_buffer!\n", 1351 ioc->name); 1352 return -999; 1353 } 1354 1355 psge = (char *)&ioc_init->HostPageBufferSGE; 1356 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1357 MPI_SGE_FLAGS_SYSTEM_ADDRESS | 1358 MPI_SGE_FLAGS_HOST_TO_IOC | 1359 MPI_SGE_FLAGS_END_OF_BUFFER; 1360 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; 1361 flags_length |= ioc->HostPageBuffer_sz; 1362 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1363 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; 1364 1365 return 0; 1366 } 1367 1368 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1369 /** 1370 * mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure. 1371 * @iocid: IOC unique identifier (integer) 1372 * @iocpp: Pointer to pointer to IOC adapter 1373 * 1374 * Given a unique IOC identifier, set pointer to the associated MPT 1375 * adapter structure. 1376 * 1377 * Returns iocid and sets iocpp if iocid is found. 1378 * Returns -1 if iocid is not found. 1379 */ 1380 int 1381 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp) 1382 { 1383 MPT_ADAPTER *ioc; 1384 1385 list_for_each_entry(ioc,&ioc_list,list) { 1386 if (ioc->id == iocid) { 1387 *iocpp =ioc; 1388 return iocid; 1389 } 1390 } 1391 1392 *iocpp = NULL; 1393 return -1; 1394 } 1395 1396 /** 1397 * mpt_get_product_name - returns product string 1398 * @vendor: pci vendor id 1399 * @device: pci device id 1400 * @revision: pci revision id 1401 * 1402 * Returns product string displayed when driver loads, 1403 * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product 1404 * 1405 **/ 1406 static const char* 1407 mpt_get_product_name(u16 vendor, u16 device, u8 revision) 1408 { 1409 char *product_str = NULL; 1410 1411 if (vendor == PCI_VENDOR_ID_BROCADE) { 1412 switch (device) 1413 { 1414 case MPI_MANUFACTPAGE_DEVICEID_FC949E: 1415 switch (revision) 1416 { 1417 case 0x00: 1418 product_str = "BRE040 A0"; 1419 break; 1420 case 0x01: 1421 product_str = "BRE040 A1"; 1422 break; 1423 default: 1424 product_str = "BRE040"; 1425 break; 1426 } 1427 break; 1428 } 1429 goto out; 1430 } 1431 1432 switch (device) 1433 { 1434 case MPI_MANUFACTPAGE_DEVICEID_FC909: 1435 product_str = "LSIFC909 B1"; 1436 break; 1437 case MPI_MANUFACTPAGE_DEVICEID_FC919: 1438 product_str = "LSIFC919 B0"; 1439 break; 1440 case MPI_MANUFACTPAGE_DEVICEID_FC929: 1441 product_str = "LSIFC929 B0"; 1442 break; 1443 case MPI_MANUFACTPAGE_DEVICEID_FC919X: 1444 if (revision < 0x80) 1445 product_str = "LSIFC919X A0"; 1446 else 1447 product_str = "LSIFC919XL A1"; 1448 break; 1449 case MPI_MANUFACTPAGE_DEVICEID_FC929X: 1450 if (revision < 0x80) 1451 product_str = "LSIFC929X A0"; 1452 else 1453 product_str = "LSIFC929XL A1"; 1454 break; 1455 case MPI_MANUFACTPAGE_DEVICEID_FC939X: 1456 product_str = "LSIFC939X A1"; 1457 break; 1458 case MPI_MANUFACTPAGE_DEVICEID_FC949X: 1459 product_str = "LSIFC949X A1"; 1460 break; 1461 case MPI_MANUFACTPAGE_DEVICEID_FC949E: 1462 switch (revision) 1463 { 1464 case 0x00: 1465 product_str = "LSIFC949E A0"; 1466 break; 1467 case 0x01: 1468 product_str = "LSIFC949E A1"; 1469 break; 1470 default: 1471 product_str = "LSIFC949E"; 1472 break; 1473 } 1474 break; 1475 case MPI_MANUFACTPAGE_DEVID_53C1030: 1476 switch (revision) 1477 { 1478 case 0x00: 1479 product_str = "LSI53C1030 A0"; 1480 break; 1481 case 0x01: 1482 product_str = "LSI53C1030 B0"; 1483 break; 1484 case 0x03: 1485 product_str = "LSI53C1030 B1"; 1486 break; 1487 case 0x07: 1488 product_str = "LSI53C1030 B2"; 1489 break; 1490 case 0x08: 1491 product_str = "LSI53C1030 C0"; 1492 break; 1493 case 0x80: 1494 product_str = "LSI53C1030T A0"; 1495 break; 1496 case 0x83: 1497 product_str = "LSI53C1030T A2"; 1498 break; 1499 case 0x87: 1500 product_str = "LSI53C1030T A3"; 1501 break; 1502 case 0xc1: 1503 product_str = "LSI53C1020A A1"; 1504 break; 1505 default: 1506 product_str = "LSI53C1030"; 1507 break; 1508 } 1509 break; 1510 case MPI_MANUFACTPAGE_DEVID_1030_53C1035: 1511 switch (revision) 1512 { 1513 case 0x03: 1514 product_str = "LSI53C1035 A2"; 1515 break; 1516 case 0x04: 1517 product_str = "LSI53C1035 B0"; 1518 break; 1519 default: 1520 product_str = "LSI53C1035"; 1521 break; 1522 } 1523 break; 1524 case MPI_MANUFACTPAGE_DEVID_SAS1064: 1525 switch (revision) 1526 { 1527 case 0x00: 1528 product_str = "LSISAS1064 A1"; 1529 break; 1530 case 0x01: 1531 product_str = "LSISAS1064 A2"; 1532 break; 1533 case 0x02: 1534 product_str = "LSISAS1064 A3"; 1535 break; 1536 case 0x03: 1537 product_str = "LSISAS1064 A4"; 1538 break; 1539 default: 1540 product_str = "LSISAS1064"; 1541 break; 1542 } 1543 break; 1544 case MPI_MANUFACTPAGE_DEVID_SAS1064E: 1545 switch (revision) 1546 { 1547 case 0x00: 1548 product_str = "LSISAS1064E A0"; 1549 break; 1550 case 0x01: 1551 product_str = "LSISAS1064E B0"; 1552 break; 1553 case 0x02: 1554 product_str = "LSISAS1064E B1"; 1555 break; 1556 case 0x04: 1557 product_str = "LSISAS1064E B2"; 1558 break; 1559 case 0x08: 1560 product_str = "LSISAS1064E B3"; 1561 break; 1562 default: 1563 product_str = "LSISAS1064E"; 1564 break; 1565 } 1566 break; 1567 case MPI_MANUFACTPAGE_DEVID_SAS1068: 1568 switch (revision) 1569 { 1570 case 0x00: 1571 product_str = "LSISAS1068 A0"; 1572 break; 1573 case 0x01: 1574 product_str = "LSISAS1068 B0"; 1575 break; 1576 case 0x02: 1577 product_str = "LSISAS1068 B1"; 1578 break; 1579 default: 1580 product_str = "LSISAS1068"; 1581 break; 1582 } 1583 break; 1584 case MPI_MANUFACTPAGE_DEVID_SAS1068E: 1585 switch (revision) 1586 { 1587 case 0x00: 1588 product_str = "LSISAS1068E A0"; 1589 break; 1590 case 0x01: 1591 product_str = "LSISAS1068E B0"; 1592 break; 1593 case 0x02: 1594 product_str = "LSISAS1068E B1"; 1595 break; 1596 case 0x04: 1597 product_str = "LSISAS1068E B2"; 1598 break; 1599 case 0x08: 1600 product_str = "LSISAS1068E B3"; 1601 break; 1602 default: 1603 product_str = "LSISAS1068E"; 1604 break; 1605 } 1606 break; 1607 case MPI_MANUFACTPAGE_DEVID_SAS1078: 1608 switch (revision) 1609 { 1610 case 0x00: 1611 product_str = "LSISAS1078 A0"; 1612 break; 1613 case 0x01: 1614 product_str = "LSISAS1078 B0"; 1615 break; 1616 case 0x02: 1617 product_str = "LSISAS1078 C0"; 1618 break; 1619 case 0x03: 1620 product_str = "LSISAS1078 C1"; 1621 break; 1622 case 0x04: 1623 product_str = "LSISAS1078 C2"; 1624 break; 1625 default: 1626 product_str = "LSISAS1078"; 1627 break; 1628 } 1629 break; 1630 } 1631 1632 out: 1633 return product_str; 1634 } 1635 1636 /** 1637 * mpt_mapresources - map in memory mapped io 1638 * @ioc: Pointer to pointer to IOC adapter 1639 * 1640 **/ 1641 static int 1642 mpt_mapresources(MPT_ADAPTER *ioc) 1643 { 1644 u8 __iomem *mem; 1645 int ii; 1646 resource_size_t mem_phys; 1647 unsigned long port; 1648 u32 msize; 1649 u32 psize; 1650 int r = -ENODEV; 1651 struct pci_dev *pdev; 1652 1653 pdev = ioc->pcidev; 1654 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1655 if (pci_enable_device_mem(pdev)) { 1656 printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " 1657 "failed\n", ioc->name); 1658 return r; 1659 } 1660 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { 1661 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " 1662 "MEM failed\n", ioc->name); 1663 goto out_pci_disable_device; 1664 } 1665 1666 if (sizeof(dma_addr_t) > 4) { 1667 const uint64_t required_mask = dma_get_required_mask 1668 (&pdev->dev); 1669 if (required_mask > DMA_BIT_MASK(32) 1670 && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1671 && !pci_set_consistent_dma_mask(pdev, 1672 DMA_BIT_MASK(64))) { 1673 ioc->dma_mask = DMA_BIT_MASK(64); 1674 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1675 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1676 ioc->name)); 1677 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1678 && !pci_set_consistent_dma_mask(pdev, 1679 DMA_BIT_MASK(32))) { 1680 ioc->dma_mask = DMA_BIT_MASK(32); 1681 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1682 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1683 ioc->name)); 1684 } else { 1685 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1686 ioc->name, pci_name(pdev)); 1687 goto out_pci_release_region; 1688 } 1689 } else { 1690 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1691 && !pci_set_consistent_dma_mask(pdev, 1692 DMA_BIT_MASK(32))) { 1693 ioc->dma_mask = DMA_BIT_MASK(32); 1694 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1695 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1696 ioc->name)); 1697 } else { 1698 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1699 ioc->name, pci_name(pdev)); 1700 goto out_pci_release_region; 1701 } 1702 } 1703 1704 mem_phys = msize = 0; 1705 port = psize = 0; 1706 for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) { 1707 if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) { 1708 if (psize) 1709 continue; 1710 /* Get I/O space! */ 1711 port = pci_resource_start(pdev, ii); 1712 psize = pci_resource_len(pdev, ii); 1713 } else { 1714 if (msize) 1715 continue; 1716 /* Get memmap */ 1717 mem_phys = pci_resource_start(pdev, ii); 1718 msize = pci_resource_len(pdev, ii); 1719 } 1720 } 1721 ioc->mem_size = msize; 1722 1723 mem = NULL; 1724 /* Get logical ptr for PciMem0 space */ 1725 /*mem = ioremap(mem_phys, msize);*/ 1726 mem = ioremap(mem_phys, msize); 1727 if (mem == NULL) { 1728 printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter" 1729 " memory!\n", ioc->name); 1730 r = -EINVAL; 1731 goto out_pci_release_region; 1732 } 1733 ioc->memmap = mem; 1734 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n", 1735 ioc->name, mem, (unsigned long long)mem_phys)); 1736 1737 ioc->mem_phys = mem_phys; 1738 ioc->chip = (SYSIF_REGS __iomem *)mem; 1739 1740 /* Save Port IO values in case we need to do downloadboot */ 1741 ioc->pio_mem_phys = port; 1742 ioc->pio_chip = (SYSIF_REGS __iomem *)port; 1743 1744 return 0; 1745 1746 out_pci_release_region: 1747 pci_release_selected_regions(pdev, ioc->bars); 1748 out_pci_disable_device: 1749 pci_disable_device(pdev); 1750 return r; 1751 } 1752 1753 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1754 /** 1755 * mpt_attach - Install a PCI intelligent MPT adapter. 1756 * @pdev: Pointer to pci_dev structure 1757 * @id: PCI device ID information 1758 * 1759 * This routine performs all the steps necessary to bring the IOC of 1760 * a MPT adapter to a OPERATIONAL state. This includes registering 1761 * memory regions, registering the interrupt, and allocating request 1762 * and reply memory pools. 1763 * 1764 * This routine also pre-fetches the LAN MAC address of a Fibre Channel 1765 * MPT adapter. 1766 * 1767 * Returns 0 for success, non-zero for failure. 1768 * 1769 * TODO: Add support for polled controllers 1770 */ 1771 int 1772 mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) 1773 { 1774 MPT_ADAPTER *ioc; 1775 u8 cb_idx; 1776 int r = -ENODEV; 1777 u8 pcixcmd; 1778 static int mpt_ids = 0; 1779 #ifdef CONFIG_PROC_FS 1780 struct proc_dir_entry *dent; 1781 #endif 1782 1783 ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL); 1784 if (ioc == NULL) { 1785 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); 1786 return -ENOMEM; 1787 } 1788 1789 ioc->id = mpt_ids++; 1790 sprintf(ioc->name, "ioc%d", ioc->id); 1791 dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n")); 1792 1793 /* 1794 * set initial debug level 1795 * (refer to mptdebug.h) 1796 * 1797 */ 1798 ioc->debug_level = mpt_debug_level; 1799 if (mpt_debug_level) 1800 printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level); 1801 1802 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); 1803 1804 ioc->pcidev = pdev; 1805 if (mpt_mapresources(ioc)) { 1806 goto out_free_ioc; 1807 } 1808 1809 /* 1810 * Setting up proper handlers for scatter gather handling 1811 */ 1812 if (ioc->dma_mask == DMA_BIT_MASK(64)) { 1813 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) 1814 ioc->add_sge = &mpt_add_sge_64bit_1078; 1815 else 1816 ioc->add_sge = &mpt_add_sge_64bit; 1817 ioc->add_chain = &mpt_add_chain_64bit; 1818 ioc->sg_addr_size = 8; 1819 } else { 1820 ioc->add_sge = &mpt_add_sge; 1821 ioc->add_chain = &mpt_add_chain; 1822 ioc->sg_addr_size = 4; 1823 } 1824 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size; 1825 1826 ioc->alloc_total = sizeof(MPT_ADAPTER); 1827 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1828 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1829 1830 1831 spin_lock_init(&ioc->taskmgmt_lock); 1832 mutex_init(&ioc->internal_cmds.mutex); 1833 init_completion(&ioc->internal_cmds.done); 1834 mutex_init(&ioc->mptbase_cmds.mutex); 1835 init_completion(&ioc->mptbase_cmds.done); 1836 mutex_init(&ioc->taskmgmt_cmds.mutex); 1837 init_completion(&ioc->taskmgmt_cmds.done); 1838 1839 /* Initialize the event logging. 1840 */ 1841 ioc->eventTypes = 0; /* None */ 1842 ioc->eventContext = 0; 1843 ioc->eventLogSize = 0; 1844 ioc->events = NULL; 1845 1846 #ifdef MFCNT 1847 ioc->mfcnt = 0; 1848 #endif 1849 1850 ioc->sh = NULL; 1851 ioc->cached_fw = NULL; 1852 1853 /* Initialize SCSI Config Data structure 1854 */ 1855 memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); 1856 1857 /* Initialize the fc rport list head. 1858 */ 1859 INIT_LIST_HEAD(&ioc->fc_rports); 1860 1861 /* Find lookup slot. */ 1862 INIT_LIST_HEAD(&ioc->list); 1863 1864 1865 /* Initialize workqueue */ 1866 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); 1867 1868 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN, 1869 "mpt_poll_%d", ioc->id); 1870 ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name, 1871 WQ_MEM_RECLAIM, 0); 1872 if (!ioc->reset_work_q) { 1873 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", 1874 ioc->name); 1875 r = -ENOMEM; 1876 goto out_unmap_resources; 1877 } 1878 1879 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n", 1880 ioc->name, &ioc->facts, &ioc->pfacts[0])); 1881 1882 ioc->prod_name = mpt_get_product_name(pdev->vendor, pdev->device, 1883 pdev->revision); 1884 1885 switch (pdev->device) 1886 { 1887 case MPI_MANUFACTPAGE_DEVICEID_FC939X: 1888 case MPI_MANUFACTPAGE_DEVICEID_FC949X: 1889 ioc->errata_flag_1064 = 1; 1890 /* fall through */ 1891 case MPI_MANUFACTPAGE_DEVICEID_FC909: 1892 case MPI_MANUFACTPAGE_DEVICEID_FC929: 1893 case MPI_MANUFACTPAGE_DEVICEID_FC919: 1894 case MPI_MANUFACTPAGE_DEVICEID_FC949E: 1895 ioc->bus_type = FC; 1896 break; 1897 1898 case MPI_MANUFACTPAGE_DEVICEID_FC929X: 1899 if (pdev->revision < XL_929) { 1900 /* 929X Chip Fix. Set Split transactions level 1901 * for PCIX. Set MOST bits to zero. 1902 */ 1903 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1904 pcixcmd &= 0x8F; 1905 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1906 } else { 1907 /* 929XL Chip Fix. Set MMRBC to 0x08. 1908 */ 1909 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1910 pcixcmd |= 0x08; 1911 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1912 } 1913 ioc->bus_type = FC; 1914 break; 1915 1916 case MPI_MANUFACTPAGE_DEVICEID_FC919X: 1917 /* 919X Chip Fix. Set Split transactions level 1918 * for PCIX. Set MOST bits to zero. 1919 */ 1920 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1921 pcixcmd &= 0x8F; 1922 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1923 ioc->bus_type = FC; 1924 break; 1925 1926 case MPI_MANUFACTPAGE_DEVID_53C1030: 1927 /* 1030 Chip Fix. Disable Split transactions 1928 * for PCIX. Set MOST bits to zero if Rev < C0( = 8). 1929 */ 1930 if (pdev->revision < C0_1030) { 1931 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1932 pcixcmd &= 0x8F; 1933 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1934 } 1935 /* fall through */ 1936 1937 case MPI_MANUFACTPAGE_DEVID_1030_53C1035: 1938 ioc->bus_type = SPI; 1939 break; 1940 1941 case MPI_MANUFACTPAGE_DEVID_SAS1064: 1942 case MPI_MANUFACTPAGE_DEVID_SAS1068: 1943 ioc->errata_flag_1064 = 1; 1944 ioc->bus_type = SAS; 1945 break; 1946 1947 case MPI_MANUFACTPAGE_DEVID_SAS1064E: 1948 case MPI_MANUFACTPAGE_DEVID_SAS1068E: 1949 case MPI_MANUFACTPAGE_DEVID_SAS1078: 1950 ioc->bus_type = SAS; 1951 break; 1952 } 1953 1954 1955 switch (ioc->bus_type) { 1956 1957 case SAS: 1958 ioc->msi_enable = mpt_msi_enable_sas; 1959 break; 1960 1961 case SPI: 1962 ioc->msi_enable = mpt_msi_enable_spi; 1963 break; 1964 1965 case FC: 1966 ioc->msi_enable = mpt_msi_enable_fc; 1967 break; 1968 1969 default: 1970 ioc->msi_enable = 0; 1971 break; 1972 } 1973 1974 ioc->fw_events_off = 1; 1975 1976 if (ioc->errata_flag_1064) 1977 pci_disable_io_access(pdev); 1978 1979 spin_lock_init(&ioc->FreeQlock); 1980 1981 /* Disable all! */ 1982 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 1983 ioc->active = 0; 1984 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1985 1986 /* Set IOC ptr in the pcidev's driver data. */ 1987 pci_set_drvdata(ioc->pcidev, ioc); 1988 1989 /* Set lookup ptr. */ 1990 list_add_tail(&ioc->list, &ioc_list); 1991 1992 /* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets. 1993 */ 1994 mpt_detect_bound_ports(ioc, pdev); 1995 1996 INIT_LIST_HEAD(&ioc->fw_event_list); 1997 spin_lock_init(&ioc->fw_event_lock); 1998 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id); 1999 ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name, 2000 WQ_MEM_RECLAIM, 0); 2001 if (!ioc->fw_event_q) { 2002 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", 2003 ioc->name); 2004 r = -ENOMEM; 2005 goto out_remove_ioc; 2006 } 2007 2008 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 2009 CAN_SLEEP)) != 0){ 2010 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", 2011 ioc->name, r); 2012 2013 destroy_workqueue(ioc->fw_event_q); 2014 ioc->fw_event_q = NULL; 2015 2016 list_del(&ioc->list); 2017 if (ioc->alt_ioc) 2018 ioc->alt_ioc->alt_ioc = NULL; 2019 iounmap(ioc->memmap); 2020 if (pci_is_enabled(pdev)) 2021 pci_disable_device(pdev); 2022 if (r != -5) 2023 pci_release_selected_regions(pdev, ioc->bars); 2024 2025 destroy_workqueue(ioc->reset_work_q); 2026 ioc->reset_work_q = NULL; 2027 2028 kfree(ioc); 2029 return r; 2030 } 2031 2032 /* call per device driver probe entry point */ 2033 for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) { 2034 if(MptDeviceDriverHandlers[cb_idx] && 2035 MptDeviceDriverHandlers[cb_idx]->probe) { 2036 MptDeviceDriverHandlers[cb_idx]->probe(pdev,id); 2037 } 2038 } 2039 2040 #ifdef CONFIG_PROC_FS 2041 /* 2042 * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter. 2043 */ 2044 dent = proc_mkdir(ioc->name, mpt_proc_root_dir); 2045 if (dent) { 2046 proc_create_single_data("info", S_IRUGO, dent, 2047 mpt_iocinfo_proc_show, ioc); 2048 proc_create_single_data("summary", S_IRUGO, dent, 2049 mpt_summary_proc_show, ioc); 2050 } 2051 #endif 2052 2053 if (!ioc->alt_ioc) 2054 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, 2055 msecs_to_jiffies(MPT_POLLING_INTERVAL)); 2056 2057 return 0; 2058 2059 out_remove_ioc: 2060 list_del(&ioc->list); 2061 if (ioc->alt_ioc) 2062 ioc->alt_ioc->alt_ioc = NULL; 2063 2064 destroy_workqueue(ioc->reset_work_q); 2065 ioc->reset_work_q = NULL; 2066 2067 out_unmap_resources: 2068 iounmap(ioc->memmap); 2069 pci_disable_device(pdev); 2070 pci_release_selected_regions(pdev, ioc->bars); 2071 2072 out_free_ioc: 2073 kfree(ioc); 2074 2075 return r; 2076 } 2077 2078 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2079 /** 2080 * mpt_detach - Remove a PCI intelligent MPT adapter. 2081 * @pdev: Pointer to pci_dev structure 2082 */ 2083 2084 void 2085 mpt_detach(struct pci_dev *pdev) 2086 { 2087 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2088 char pname[64]; 2089 u8 cb_idx; 2090 unsigned long flags; 2091 struct workqueue_struct *wq; 2092 2093 /* 2094 * Stop polling ioc for fault condition 2095 */ 2096 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 2097 wq = ioc->reset_work_q; 2098 ioc->reset_work_q = NULL; 2099 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 2100 cancel_delayed_work(&ioc->fault_reset_work); 2101 destroy_workqueue(wq); 2102 2103 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2104 wq = ioc->fw_event_q; 2105 ioc->fw_event_q = NULL; 2106 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2107 destroy_workqueue(wq); 2108 2109 snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); 2110 remove_proc_entry(pname, NULL); 2111 snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name); 2112 remove_proc_entry(pname, NULL); 2113 snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); 2114 remove_proc_entry(pname, NULL); 2115 2116 /* call per device driver remove entry point */ 2117 for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) { 2118 if(MptDeviceDriverHandlers[cb_idx] && 2119 MptDeviceDriverHandlers[cb_idx]->remove) { 2120 MptDeviceDriverHandlers[cb_idx]->remove(pdev); 2121 } 2122 } 2123 2124 /* Disable interrupts! */ 2125 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2126 2127 ioc->active = 0; 2128 synchronize_irq(pdev->irq); 2129 2130 /* Clear any lingering interrupt */ 2131 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2132 2133 CHIPREG_READ32(&ioc->chip->IntStatus); 2134 2135 mpt_adapter_dispose(ioc); 2136 2137 } 2138 2139 /************************************************************************** 2140 * Power Management 2141 */ 2142 #ifdef CONFIG_PM 2143 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2144 /** 2145 * mpt_suspend - Fusion MPT base driver suspend routine. 2146 * @pdev: Pointer to pci_dev structure 2147 * @state: new state to enter 2148 */ 2149 int 2150 mpt_suspend(struct pci_dev *pdev, pm_message_t state) 2151 { 2152 u32 device_state; 2153 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2154 2155 device_state = pci_choose_state(pdev, state); 2156 printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering " 2157 "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev), 2158 device_state); 2159 2160 /* put ioc into READY_STATE */ 2161 if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) { 2162 printk(MYIOC_s_ERR_FMT 2163 "pci-suspend: IOC msg unit reset failed!\n", ioc->name); 2164 } 2165 2166 /* disable interrupts */ 2167 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2168 ioc->active = 0; 2169 2170 /* Clear any lingering interrupt */ 2171 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2172 2173 free_irq(ioc->pci_irq, ioc); 2174 if (ioc->msi_enable) 2175 pci_disable_msi(ioc->pcidev); 2176 ioc->pci_irq = -1; 2177 pci_save_state(pdev); 2178 pci_disable_device(pdev); 2179 pci_release_selected_regions(pdev, ioc->bars); 2180 pci_set_power_state(pdev, device_state); 2181 return 0; 2182 } 2183 2184 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2185 /** 2186 * mpt_resume - Fusion MPT base driver resume routine. 2187 * @pdev: Pointer to pci_dev structure 2188 */ 2189 int 2190 mpt_resume(struct pci_dev *pdev) 2191 { 2192 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2193 u32 device_state = pdev->current_state; 2194 int recovery_state; 2195 int err; 2196 2197 printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous " 2198 "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev), 2199 device_state); 2200 2201 pci_set_power_state(pdev, PCI_D0); 2202 pci_enable_wake(pdev, PCI_D0, 0); 2203 pci_restore_state(pdev); 2204 ioc->pcidev = pdev; 2205 err = mpt_mapresources(ioc); 2206 if (err) 2207 return err; 2208 2209 if (ioc->dma_mask == DMA_BIT_MASK(64)) { 2210 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) 2211 ioc->add_sge = &mpt_add_sge_64bit_1078; 2212 else 2213 ioc->add_sge = &mpt_add_sge_64bit; 2214 ioc->add_chain = &mpt_add_chain_64bit; 2215 ioc->sg_addr_size = 8; 2216 } else { 2217 2218 ioc->add_sge = &mpt_add_sge; 2219 ioc->add_chain = &mpt_add_chain; 2220 ioc->sg_addr_size = 4; 2221 } 2222 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size; 2223 2224 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", 2225 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), 2226 CHIPREG_READ32(&ioc->chip->Doorbell)); 2227 2228 /* 2229 * Errata workaround for SAS pci express: 2230 * Upon returning to the D0 state, the contents of the doorbell will be 2231 * stale data, and this will incorrectly signal to the host driver that 2232 * the firmware is ready to process mpt commands. The workaround is 2233 * to issue a diagnostic reset. 2234 */ 2235 if (ioc->bus_type == SAS && (pdev->device == 2236 MPI_MANUFACTPAGE_DEVID_SAS1068E || pdev->device == 2237 MPI_MANUFACTPAGE_DEVID_SAS1064E)) { 2238 if (KickStart(ioc, 1, CAN_SLEEP) < 0) { 2239 printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n", 2240 ioc->name); 2241 goto out; 2242 } 2243 } 2244 2245 /* bring ioc to operational state */ 2246 printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name); 2247 recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 2248 CAN_SLEEP); 2249 if (recovery_state != 0) 2250 printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, " 2251 "error:[%x]\n", ioc->name, recovery_state); 2252 else 2253 printk(MYIOC_s_INFO_FMT 2254 "pci-resume: success\n", ioc->name); 2255 out: 2256 return 0; 2257 2258 } 2259 #endif 2260 2261 static int 2262 mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase) 2263 { 2264 if ((MptDriverClass[index] == MPTSPI_DRIVER && 2265 ioc->bus_type != SPI) || 2266 (MptDriverClass[index] == MPTFC_DRIVER && 2267 ioc->bus_type != FC) || 2268 (MptDriverClass[index] == MPTSAS_DRIVER && 2269 ioc->bus_type != SAS)) 2270 /* make sure we only call the relevant reset handler 2271 * for the bus */ 2272 return 0; 2273 return (MptResetHandlers[index])(ioc, reset_phase); 2274 } 2275 2276 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2277 /** 2278 * mpt_do_ioc_recovery - Initialize or recover MPT adapter. 2279 * @ioc: Pointer to MPT adapter structure 2280 * @reason: Event word / reason 2281 * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. 2282 * 2283 * This routine performs all the steps necessary to bring the IOC 2284 * to a OPERATIONAL state. 2285 * 2286 * This routine also pre-fetches the LAN MAC address of a Fibre Channel 2287 * MPT adapter. 2288 * 2289 * Returns: 2290 * 0 for success 2291 * -1 if failed to get board READY 2292 * -2 if READY but IOCFacts Failed 2293 * -3 if READY but PrimeIOCFifos Failed 2294 * -4 if READY but IOCInit Failed 2295 * -5 if failed to enable_device and/or request_selected_regions 2296 * -6 if failed to upload firmware 2297 */ 2298 static int 2299 mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) 2300 { 2301 int hard_reset_done = 0; 2302 int alt_ioc_ready = 0; 2303 int hard; 2304 int rc=0; 2305 int ii; 2306 int ret = 0; 2307 int reset_alt_ioc_active = 0; 2308 int irq_allocated = 0; 2309 u8 *a; 2310 2311 printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name, 2312 reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery"); 2313 2314 /* Disable reply interrupts (also blocks FreeQ) */ 2315 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2316 ioc->active = 0; 2317 2318 if (ioc->alt_ioc) { 2319 if (ioc->alt_ioc->active || 2320 reason == MPT_HOSTEVENT_IOC_RECOVER) { 2321 reset_alt_ioc_active = 1; 2322 /* Disable alt-IOC's reply interrupts 2323 * (and FreeQ) for a bit 2324 **/ 2325 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 2326 0xFFFFFFFF); 2327 ioc->alt_ioc->active = 0; 2328 } 2329 } 2330 2331 hard = 1; 2332 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) 2333 hard = 0; 2334 2335 if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) { 2336 if (hard_reset_done == -4) { 2337 printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n", 2338 ioc->name); 2339 2340 if (reset_alt_ioc_active && ioc->alt_ioc) { 2341 /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */ 2342 dprintk(ioc, printk(MYIOC_s_INFO_FMT 2343 "alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name)); 2344 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); 2345 ioc->alt_ioc->active = 1; 2346 } 2347 2348 } else { 2349 printk(MYIOC_s_WARN_FMT 2350 "NOT READY WARNING!\n", ioc->name); 2351 } 2352 ret = -1; 2353 goto out; 2354 } 2355 2356 /* hard_reset_done = 0 if a soft reset was performed 2357 * and 1 if a hard reset was performed. 2358 */ 2359 if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) { 2360 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) 2361 alt_ioc_ready = 1; 2362 else 2363 printk(MYIOC_s_WARN_FMT 2364 ": alt-ioc Not ready WARNING!\n", 2365 ioc->alt_ioc->name); 2366 } 2367 2368 for (ii=0; ii<5; ii++) { 2369 /* Get IOC facts! Allow 5 retries */ 2370 if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0) 2371 break; 2372 } 2373 2374 2375 if (ii == 5) { 2376 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2377 "Retry IocFacts failed rc=%x\n", ioc->name, rc)); 2378 ret = -2; 2379 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { 2380 MptDisplayIocCapabilities(ioc); 2381 } 2382 2383 if (alt_ioc_ready) { 2384 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { 2385 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2386 "Initial Alt IocFacts failed rc=%x\n", 2387 ioc->name, rc)); 2388 /* Retry - alt IOC was initialized once 2389 */ 2390 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); 2391 } 2392 if (rc) { 2393 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2394 "Retry Alt IocFacts failed rc=%x\n", ioc->name, rc)); 2395 alt_ioc_ready = 0; 2396 reset_alt_ioc_active = 0; 2397 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { 2398 MptDisplayIocCapabilities(ioc->alt_ioc); 2399 } 2400 } 2401 2402 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) && 2403 (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) { 2404 pci_release_selected_regions(ioc->pcidev, ioc->bars); 2405 ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | 2406 IORESOURCE_IO); 2407 if (pci_enable_device(ioc->pcidev)) 2408 return -5; 2409 if (pci_request_selected_regions(ioc->pcidev, ioc->bars, 2410 "mpt")) 2411 return -5; 2412 } 2413 2414 /* 2415 * Device is reset now. It must have de-asserted the interrupt line 2416 * (if it was asserted) and it should be safe to register for the 2417 * interrupt now. 2418 */ 2419 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { 2420 ioc->pci_irq = -1; 2421 if (ioc->pcidev->irq) { 2422 if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev)) 2423 printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n", 2424 ioc->name); 2425 else 2426 ioc->msi_enable = 0; 2427 rc = request_irq(ioc->pcidev->irq, mpt_interrupt, 2428 IRQF_SHARED, ioc->name, ioc); 2429 if (rc < 0) { 2430 printk(MYIOC_s_ERR_FMT "Unable to allocate " 2431 "interrupt %d!\n", 2432 ioc->name, ioc->pcidev->irq); 2433 if (ioc->msi_enable) 2434 pci_disable_msi(ioc->pcidev); 2435 ret = -EBUSY; 2436 goto out; 2437 } 2438 irq_allocated = 1; 2439 ioc->pci_irq = ioc->pcidev->irq; 2440 pci_set_master(ioc->pcidev); /* ?? */ 2441 pci_set_drvdata(ioc->pcidev, ioc); 2442 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 2443 "installed at interrupt %d\n", ioc->name, 2444 ioc->pcidev->irq)); 2445 } 2446 } 2447 2448 /* Prime reply & request queues! 2449 * (mucho alloc's) Must be done prior to 2450 * init as upper addresses are needed for init. 2451 * If fails, continue with alt-ioc processing 2452 */ 2453 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n", 2454 ioc->name)); 2455 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) 2456 ret = -3; 2457 2458 /* May need to check/upload firmware & data here! 2459 * If fails, continue with alt-ioc processing 2460 */ 2461 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n", 2462 ioc->name)); 2463 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) 2464 ret = -4; 2465 // NEW! 2466 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { 2467 printk(MYIOC_s_WARN_FMT 2468 ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n", 2469 ioc->alt_ioc->name, rc); 2470 alt_ioc_ready = 0; 2471 reset_alt_ioc_active = 0; 2472 } 2473 2474 if (alt_ioc_ready) { 2475 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { 2476 alt_ioc_ready = 0; 2477 reset_alt_ioc_active = 0; 2478 printk(MYIOC_s_WARN_FMT 2479 ": alt-ioc: (%d) init failure WARNING!\n", 2480 ioc->alt_ioc->name, rc); 2481 } 2482 } 2483 2484 if (reason == MPT_HOSTEVENT_IOC_BRINGUP){ 2485 if (ioc->upload_fw) { 2486 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2487 "firmware upload required!\n", ioc->name)); 2488 2489 /* Controller is not operational, cannot do upload 2490 */ 2491 if (ret == 0) { 2492 rc = mpt_do_upload(ioc, sleepFlag); 2493 if (rc == 0) { 2494 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { 2495 /* 2496 * Maintain only one pointer to FW memory 2497 * so there will not be two attempt to 2498 * downloadboot onboard dual function 2499 * chips (mpt_adapter_disable, 2500 * mpt_diag_reset) 2501 */ 2502 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2503 "mpt_upload: alt_%s has cached_fw=%p \n", 2504 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw)); 2505 ioc->cached_fw = NULL; 2506 } 2507 } else { 2508 printk(MYIOC_s_WARN_FMT 2509 "firmware upload failure!\n", ioc->name); 2510 ret = -6; 2511 } 2512 } 2513 } 2514 } 2515 2516 /* Enable MPT base driver management of EventNotification 2517 * and EventAck handling. 2518 */ 2519 if ((ret == 0) && (!ioc->facts.EventState)) { 2520 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 2521 "SendEventNotification\n", 2522 ioc->name)); 2523 ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */ 2524 } 2525 2526 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) 2527 rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag); 2528 2529 if (ret == 0) { 2530 /* Enable! (reply interrupt) */ 2531 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 2532 ioc->active = 1; 2533 } 2534 if (rc == 0) { /* alt ioc */ 2535 if (reset_alt_ioc_active && ioc->alt_ioc) { 2536 /* (re)Enable alt-IOC! (reply interrupt) */ 2537 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc" 2538 "reply irq re-enabled\n", 2539 ioc->alt_ioc->name)); 2540 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 2541 MPI_HIM_DIM); 2542 ioc->alt_ioc->active = 1; 2543 } 2544 } 2545 2546 2547 /* Add additional "reason" check before call to GetLanConfigPages 2548 * (combined with GetIoUnitPage2 call). This prevents a somewhat 2549 * recursive scenario; GetLanConfigPages times out, timer expired 2550 * routine calls HardResetHandler, which calls into here again, 2551 * and we try GetLanConfigPages again... 2552 */ 2553 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { 2554 2555 /* 2556 * Initialize link list for inactive raid volumes. 2557 */ 2558 mutex_init(&ioc->raid_data.inactive_list_mutex); 2559 INIT_LIST_HEAD(&ioc->raid_data.inactive_list); 2560 2561 switch (ioc->bus_type) { 2562 2563 case SAS: 2564 /* clear persistency table */ 2565 if(ioc->facts.IOCExceptions & 2566 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { 2567 ret = mptbase_sas_persist_operation(ioc, 2568 MPI_SAS_OP_CLEAR_NOT_PRESENT); 2569 if(ret != 0) 2570 goto out; 2571 } 2572 2573 /* Find IM volumes 2574 */ 2575 mpt_findImVolumes(ioc); 2576 2577 /* Check, and possibly reset, the coalescing value 2578 */ 2579 mpt_read_ioc_pg_1(ioc); 2580 2581 break; 2582 2583 case FC: 2584 if ((ioc->pfacts[0].ProtocolFlags & 2585 MPI_PORTFACTS_PROTOCOL_LAN) && 2586 (ioc->lan_cnfg_page0.Header.PageLength == 0)) { 2587 /* 2588 * Pre-fetch the ports LAN MAC address! 2589 * (LANPage1_t stuff) 2590 */ 2591 (void) GetLanConfigPages(ioc); 2592 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 2593 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2594 "LanAddr = %pMR\n", ioc->name, a)); 2595 } 2596 break; 2597 2598 case SPI: 2599 /* Get NVRAM and adapter maximums from SPP 0 and 2 2600 */ 2601 mpt_GetScsiPortSettings(ioc, 0); 2602 2603 /* Get version and length of SDP 1 2604 */ 2605 mpt_readScsiDevicePageHeaders(ioc, 0); 2606 2607 /* Find IM volumes 2608 */ 2609 if (ioc->facts.MsgVersion >= MPI_VERSION_01_02) 2610 mpt_findImVolumes(ioc); 2611 2612 /* Check, and possibly reset, the coalescing value 2613 */ 2614 mpt_read_ioc_pg_1(ioc); 2615 2616 mpt_read_ioc_pg_4(ioc); 2617 2618 break; 2619 } 2620 2621 GetIoUnitPage2(ioc); 2622 mpt_get_manufacturing_pg_0(ioc); 2623 } 2624 2625 out: 2626 if ((ret != 0) && irq_allocated) { 2627 free_irq(ioc->pci_irq, ioc); 2628 if (ioc->msi_enable) 2629 pci_disable_msi(ioc->pcidev); 2630 } 2631 return ret; 2632 } 2633 2634 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2635 /** 2636 * mpt_detect_bound_ports - Search for matching PCI bus/dev_function 2637 * @ioc: Pointer to MPT adapter structure 2638 * @pdev: Pointer to (struct pci_dev) structure 2639 * 2640 * Search for PCI bus/dev_function which matches 2641 * PCI bus/dev_function (+/-1) for newly discovered 929, 2642 * 929X, 1030 or 1035. 2643 * 2644 * If match on PCI dev_function +/-1 is found, bind the two MPT adapters 2645 * using alt_ioc pointer fields in their %MPT_ADAPTER structures. 2646 */ 2647 static void 2648 mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) 2649 { 2650 struct pci_dev *peer=NULL; 2651 unsigned int slot = PCI_SLOT(pdev->devfn); 2652 unsigned int func = PCI_FUNC(pdev->devfn); 2653 MPT_ADAPTER *ioc_srch; 2654 2655 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x," 2656 " searching for devfn match on %x or %x\n", 2657 ioc->name, pci_name(pdev), pdev->bus->number, 2658 pdev->devfn, func-1, func+1)); 2659 2660 peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1)); 2661 if (!peer) { 2662 peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1)); 2663 if (!peer) 2664 return; 2665 } 2666 2667 list_for_each_entry(ioc_srch, &ioc_list, list) { 2668 struct pci_dev *_pcidev = ioc_srch->pcidev; 2669 if (_pcidev == peer) { 2670 /* Paranoia checks */ 2671 if (ioc->alt_ioc != NULL) { 2672 printk(MYIOC_s_WARN_FMT 2673 "Oops, already bound (%s <==> %s)!\n", 2674 ioc->name, ioc->name, ioc->alt_ioc->name); 2675 break; 2676 } else if (ioc_srch->alt_ioc != NULL) { 2677 printk(MYIOC_s_WARN_FMT 2678 "Oops, already bound (%s <==> %s)!\n", 2679 ioc_srch->name, ioc_srch->name, 2680 ioc_srch->alt_ioc->name); 2681 break; 2682 } 2683 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2684 "FOUND! binding %s <==> %s\n", 2685 ioc->name, ioc->name, ioc_srch->name)); 2686 ioc_srch->alt_ioc = ioc; 2687 ioc->alt_ioc = ioc_srch; 2688 } 2689 } 2690 pci_dev_put(peer); 2691 } 2692 2693 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2694 /** 2695 * mpt_adapter_disable - Disable misbehaving MPT adapter. 2696 * @ioc: Pointer to MPT adapter structure 2697 */ 2698 static void 2699 mpt_adapter_disable(MPT_ADAPTER *ioc) 2700 { 2701 int sz; 2702 int ret; 2703 2704 if (ioc->cached_fw != NULL) { 2705 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2706 "%s: Pushing FW onto adapter\n", __func__, ioc->name)); 2707 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2708 ioc->cached_fw, CAN_SLEEP)) < 0) { 2709 printk(MYIOC_s_WARN_FMT 2710 ": firmware downloadboot failure (%d)!\n", 2711 ioc->name, ret); 2712 } 2713 } 2714 2715 /* 2716 * Put the controller into ready state (if its not already) 2717 */ 2718 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) { 2719 if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, 2720 CAN_SLEEP)) { 2721 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) 2722 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit " 2723 "reset failed to put ioc in ready state!\n", 2724 ioc->name, __func__); 2725 } else 2726 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset " 2727 "failed!\n", ioc->name, __func__); 2728 } 2729 2730 2731 /* Disable adapter interrupts! */ 2732 synchronize_irq(ioc->pcidev->irq); 2733 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2734 ioc->active = 0; 2735 2736 /* Clear any lingering interrupt */ 2737 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2738 CHIPREG_READ32(&ioc->chip->IntStatus); 2739 2740 if (ioc->alloc != NULL) { 2741 sz = ioc->alloc_sz; 2742 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n", 2743 ioc->name, ioc->alloc, ioc->alloc_sz)); 2744 dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc, 2745 ioc->alloc_dma); 2746 ioc->reply_frames = NULL; 2747 ioc->req_frames = NULL; 2748 ioc->alloc = NULL; 2749 ioc->alloc_total -= sz; 2750 } 2751 2752 if (ioc->sense_buf_pool != NULL) { 2753 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); 2754 dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool, 2755 ioc->sense_buf_pool_dma); 2756 ioc->sense_buf_pool = NULL; 2757 ioc->alloc_total -= sz; 2758 } 2759 2760 if (ioc->events != NULL){ 2761 sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); 2762 kfree(ioc->events); 2763 ioc->events = NULL; 2764 ioc->alloc_total -= sz; 2765 } 2766 2767 mpt_free_fw_memory(ioc); 2768 2769 kfree(ioc->spi_data.nvram); 2770 mpt_inactive_raid_list_free(ioc); 2771 kfree(ioc->raid_data.pIocPg2); 2772 kfree(ioc->raid_data.pIocPg3); 2773 ioc->spi_data.nvram = NULL; 2774 ioc->raid_data.pIocPg3 = NULL; 2775 2776 if (ioc->spi_data.pIocPg4 != NULL) { 2777 sz = ioc->spi_data.IocPg4Sz; 2778 pci_free_consistent(ioc->pcidev, sz, 2779 ioc->spi_data.pIocPg4, 2780 ioc->spi_data.IocPg4_dma); 2781 ioc->spi_data.pIocPg4 = NULL; 2782 ioc->alloc_total -= sz; 2783 } 2784 2785 if (ioc->ReqToChain != NULL) { 2786 kfree(ioc->ReqToChain); 2787 kfree(ioc->RequestNB); 2788 ioc->ReqToChain = NULL; 2789 } 2790 2791 kfree(ioc->ChainToChain); 2792 ioc->ChainToChain = NULL; 2793 2794 if (ioc->HostPageBuffer != NULL) { 2795 if((ret = mpt_host_page_access_control(ioc, 2796 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { 2797 printk(MYIOC_s_ERR_FMT 2798 ": %s: host page buffers free failed (%d)!\n", 2799 ioc->name, __func__, ret); 2800 } 2801 dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2802 "HostPageBuffer free @ %p, sz=%d bytes\n", 2803 ioc->name, ioc->HostPageBuffer, 2804 ioc->HostPageBuffer_sz)); 2805 dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz, 2806 ioc->HostPageBuffer, ioc->HostPageBuffer_dma); 2807 ioc->HostPageBuffer = NULL; 2808 ioc->HostPageBuffer_sz = 0; 2809 ioc->alloc_total -= ioc->HostPageBuffer_sz; 2810 } 2811 2812 pci_set_drvdata(ioc->pcidev, NULL); 2813 } 2814 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2815 /** 2816 * mpt_adapter_dispose - Free all resources associated with an MPT adapter 2817 * @ioc: Pointer to MPT adapter structure 2818 * 2819 * This routine unregisters h/w resources and frees all alloc'd memory 2820 * associated with a MPT adapter structure. 2821 */ 2822 static void 2823 mpt_adapter_dispose(MPT_ADAPTER *ioc) 2824 { 2825 int sz_first, sz_last; 2826 2827 if (ioc == NULL) 2828 return; 2829 2830 sz_first = ioc->alloc_total; 2831 2832 mpt_adapter_disable(ioc); 2833 2834 if (ioc->pci_irq != -1) { 2835 free_irq(ioc->pci_irq, ioc); 2836 if (ioc->msi_enable) 2837 pci_disable_msi(ioc->pcidev); 2838 ioc->pci_irq = -1; 2839 } 2840 2841 if (ioc->memmap != NULL) { 2842 iounmap(ioc->memmap); 2843 ioc->memmap = NULL; 2844 } 2845 2846 pci_disable_device(ioc->pcidev); 2847 pci_release_selected_regions(ioc->pcidev, ioc->bars); 2848 2849 /* Zap the adapter lookup ptr! */ 2850 list_del(&ioc->list); 2851 2852 sz_last = ioc->alloc_total; 2853 dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n", 2854 ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first)); 2855 2856 if (ioc->alt_ioc) 2857 ioc->alt_ioc->alt_ioc = NULL; 2858 2859 kfree(ioc); 2860 } 2861 2862 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2863 /** 2864 * MptDisplayIocCapabilities - Disply IOC's capabilities. 2865 * @ioc: Pointer to MPT adapter structure 2866 */ 2867 static void 2868 MptDisplayIocCapabilities(MPT_ADAPTER *ioc) 2869 { 2870 int i = 0; 2871 2872 printk(KERN_INFO "%s: ", ioc->name); 2873 if (ioc->prod_name) 2874 pr_cont("%s: ", ioc->prod_name); 2875 pr_cont("Capabilities={"); 2876 2877 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2878 pr_cont("Initiator"); 2879 i++; 2880 } 2881 2882 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2883 pr_cont("%sTarget", i ? "," : ""); 2884 i++; 2885 } 2886 2887 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) { 2888 pr_cont("%sLAN", i ? "," : ""); 2889 i++; 2890 } 2891 2892 #if 0 2893 /* 2894 * This would probably evoke more questions than it's worth 2895 */ 2896 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2897 pr_cont("%sLogBusAddr", i ? "," : ""); 2898 i++; 2899 } 2900 #endif 2901 2902 pr_cont("}\n"); 2903 } 2904 2905 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2906 /** 2907 * MakeIocReady - Get IOC to a READY state, using KickStart if needed. 2908 * @ioc: Pointer to MPT_ADAPTER structure 2909 * @force: Force hard KickStart of IOC 2910 * @sleepFlag: Specifies whether the process can sleep 2911 * 2912 * Returns: 2913 * 1 - DIAG reset and READY 2914 * 0 - READY initially OR soft reset and READY 2915 * -1 - Any failure on KickStart 2916 * -2 - Msg Unit Reset Failed 2917 * -3 - IO Unit Reset Failed 2918 * -4 - IOC owned by a PEER 2919 */ 2920 static int 2921 MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) 2922 { 2923 u32 ioc_state; 2924 int statefault = 0; 2925 int cntdn; 2926 int hard_reset_done = 0; 2927 int r; 2928 int ii; 2929 int whoinit; 2930 2931 /* Get current [raw] IOC state */ 2932 ioc_state = mpt_GetIocState(ioc, 0); 2933 dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state)); 2934 2935 /* 2936 * Check to see if IOC got left/stuck in doorbell handshake 2937 * grip of death. If so, hard reset the IOC. 2938 */ 2939 if (ioc_state & MPI_DOORBELL_ACTIVE) { 2940 statefault = 1; 2941 printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n", 2942 ioc->name); 2943 } 2944 2945 /* Is it already READY? */ 2946 if (!statefault && 2947 ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) { 2948 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 2949 "IOC is in READY state\n", ioc->name)); 2950 return 0; 2951 } 2952 2953 /* 2954 * Check to see if IOC is in FAULT state. 2955 */ 2956 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { 2957 statefault = 2; 2958 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n", 2959 ioc->name); 2960 printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n", 2961 ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK); 2962 } 2963 2964 /* 2965 * Hmmm... Did it get left operational? 2966 */ 2967 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) { 2968 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n", 2969 ioc->name)); 2970 2971 /* Check WhoInit. 2972 * If PCI Peer, exit. 2973 * Else, if no fault conditions are present, issue a MessageUnitReset 2974 * Else, fall through to KickStart case 2975 */ 2976 whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT; 2977 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 2978 "whoinit 0x%x statefault %d force %d\n", 2979 ioc->name, whoinit, statefault, force)); 2980 if (whoinit == MPI_WHOINIT_PCI_PEER) 2981 return -4; 2982 else { 2983 if ((statefault == 0 ) && (force == 0)) { 2984 if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0) 2985 return 0; 2986 } 2987 statefault = 3; 2988 } 2989 } 2990 2991 hard_reset_done = KickStart(ioc, statefault||force, sleepFlag); 2992 if (hard_reset_done < 0) 2993 return -1; 2994 2995 /* 2996 * Loop here waiting for IOC to come READY. 2997 */ 2998 ii = 0; 2999 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */ 3000 3001 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { 3002 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) { 3003 /* 3004 * BIOS or previous driver load left IOC in OP state. 3005 * Reset messaging FIFOs. 3006 */ 3007 if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) { 3008 printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name); 3009 return -2; 3010 } 3011 } else if (ioc_state == MPI_IOC_STATE_RESET) { 3012 /* 3013 * Something is wrong. Try to get IOC back 3014 * to a known state. 3015 */ 3016 if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) { 3017 printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name); 3018 return -3; 3019 } 3020 } 3021 3022 ii++; cntdn--; 3023 if (!cntdn) { 3024 printk(MYIOC_s_ERR_FMT 3025 "Wait IOC_READY state (0x%x) timeout(%d)!\n", 3026 ioc->name, ioc_state, (int)((ii+5)/HZ)); 3027 return -ETIME; 3028 } 3029 3030 if (sleepFlag == CAN_SLEEP) { 3031 msleep(1); 3032 } else { 3033 mdelay (1); /* 1 msec delay */ 3034 } 3035 3036 } 3037 3038 if (statefault < 3) { 3039 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name, 3040 statefault == 1 ? "stuck handshake" : "IOC FAULT"); 3041 } 3042 3043 return hard_reset_done; 3044 } 3045 3046 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3047 /** 3048 * mpt_GetIocState - Get the current state of a MPT adapter. 3049 * @ioc: Pointer to MPT_ADAPTER structure 3050 * @cooked: Request raw or cooked IOC state 3051 * 3052 * Returns all IOC Doorbell register bits if cooked==0, else just the 3053 * Doorbell bits in MPI_IOC_STATE_MASK. 3054 */ 3055 u32 3056 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked) 3057 { 3058 u32 s, sc; 3059 3060 /* Get! */ 3061 s = CHIPREG_READ32(&ioc->chip->Doorbell); 3062 sc = s & MPI_IOC_STATE_MASK; 3063 3064 /* Save! */ 3065 ioc->last_state = sc; 3066 3067 return cooked ? sc : s; 3068 } 3069 3070 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3071 /** 3072 * GetIocFacts - Send IOCFacts request to MPT adapter. 3073 * @ioc: Pointer to MPT_ADAPTER structure 3074 * @sleepFlag: Specifies whether the process can sleep 3075 * @reason: If recovery, only update facts. 3076 * 3077 * Returns 0 for success, non-zero for failure. 3078 */ 3079 static int 3080 GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) 3081 { 3082 IOCFacts_t get_facts; 3083 IOCFactsReply_t *facts; 3084 int r; 3085 int req_sz; 3086 int reply_sz; 3087 int sz; 3088 u32 status, vv; 3089 u8 shiftFactor=1; 3090 3091 /* IOC *must* NOT be in RESET state! */ 3092 if (ioc->last_state == MPI_IOC_STATE_RESET) { 3093 printk(KERN_ERR MYNAM 3094 ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n", 3095 ioc->name, ioc->last_state); 3096 return -44; 3097 } 3098 3099 facts = &ioc->facts; 3100 3101 /* Destination (reply area)... */ 3102 reply_sz = sizeof(*facts); 3103 memset(facts, 0, reply_sz); 3104 3105 /* Request area (get_facts on the stack right now!) */ 3106 req_sz = sizeof(get_facts); 3107 memset(&get_facts, 0, req_sz); 3108 3109 get_facts.Function = MPI_FUNCTION_IOC_FACTS; 3110 /* Assert: All other get_facts fields are zero! */ 3111 3112 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 3113 "Sending get IocFacts request req_sz=%d reply_sz=%d\n", 3114 ioc->name, req_sz, reply_sz)); 3115 3116 /* No non-zero fields in the get_facts request are greater than 3117 * 1 byte in size, so we can just fire it off as is. 3118 */ 3119 r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts, 3120 reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag); 3121 if (r != 0) 3122 return r; 3123 3124 /* 3125 * Now byte swap (GRRR) the necessary fields before any further 3126 * inspection of reply contents. 3127 * 3128 * But need to do some sanity checks on MsgLength (byte) field 3129 * to make sure we don't zero IOC's req_sz! 3130 */ 3131 /* Did we get a valid reply? */ 3132 if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) { 3133 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { 3134 /* 3135 * If not been here, done that, save off first WhoInit value 3136 */ 3137 if (ioc->FirstWhoInit == WHOINIT_UNKNOWN) 3138 ioc->FirstWhoInit = facts->WhoInit; 3139 } 3140 3141 facts->MsgVersion = le16_to_cpu(facts->MsgVersion); 3142 facts->MsgContext = le32_to_cpu(facts->MsgContext); 3143 facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions); 3144 facts->IOCStatus = le16_to_cpu(facts->IOCStatus); 3145 facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo); 3146 status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK; 3147 /* CHECKME! IOCStatus, IOCLogInfo */ 3148 3149 facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth); 3150 facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize); 3151 3152 /* 3153 * FC f/w version changed between 1.1 and 1.2 3154 * Old: u16{Major(4),Minor(4),SubMinor(8)} 3155 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} 3156 */ 3157 if (facts->MsgVersion < MPI_VERSION_01_02) { 3158 /* 3159 * Handle old FC f/w style, convert to new... 3160 */ 3161 u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion); 3162 facts->FWVersion.Word = 3163 ((oldv<<12) & 0xFF000000) | 3164 ((oldv<<8) & 0x000FFF00); 3165 } else 3166 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); 3167 3168 facts->ProductID = le16_to_cpu(facts->ProductID); 3169 3170 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) 3171 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) 3172 ioc->ir_firmware = 1; 3173 3174 facts->CurrentHostMfaHighAddr = 3175 le32_to_cpu(facts->CurrentHostMfaHighAddr); 3176 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); 3177 facts->CurrentSenseBufferHighAddr = 3178 le32_to_cpu(facts->CurrentSenseBufferHighAddr); 3179 facts->CurReplyFrameSize = 3180 le16_to_cpu(facts->CurReplyFrameSize); 3181 facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities); 3182 3183 /* 3184 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx 3185 * Older MPI-1.00.xx struct had 13 dwords, and enlarged 3186 * to 14 in MPI-1.01.0x. 3187 */ 3188 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && 3189 facts->MsgVersion > MPI_VERSION_01_00) { 3190 facts->FWImageSize = le32_to_cpu(facts->FWImageSize); 3191 } 3192 3193 facts->FWImageSize = ALIGN(facts->FWImageSize, 4); 3194 3195 if (!facts->RequestFrameSize) { 3196 /* Something is wrong! */ 3197 printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n", 3198 ioc->name); 3199 return -55; 3200 } 3201 3202 r = sz = facts->BlockSize; 3203 vv = ((63 / (sz * 4)) + 1) & 0x03; 3204 ioc->NB_for_64_byte_frame = vv; 3205 while ( sz ) 3206 { 3207 shiftFactor++; 3208 sz = sz >> 1; 3209 } 3210 ioc->NBShiftFactor = shiftFactor; 3211 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 3212 "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n", 3213 ioc->name, vv, shiftFactor, r)); 3214 3215 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { 3216 /* 3217 * Set values for this IOC's request & reply frame sizes, 3218 * and request & reply queue depths... 3219 */ 3220 ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4); 3221 ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits); 3222 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 3223 ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth); 3224 3225 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "reply_sz=%3d, reply_depth=%4d\n", 3226 ioc->name, ioc->reply_sz, ioc->reply_depth)); 3227 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "req_sz =%3d, req_depth =%4d\n", 3228 ioc->name, ioc->req_sz, ioc->req_depth)); 3229 3230 /* Get port facts! */ 3231 if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 ) 3232 return r; 3233 } 3234 } else { 3235 printk(MYIOC_s_ERR_FMT 3236 "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n", 3237 ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t, 3238 RequestFrameSize)/sizeof(u32))); 3239 return -66; 3240 } 3241 3242 return 0; 3243 } 3244 3245 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3246 /** 3247 * GetPortFacts - Send PortFacts request to MPT adapter. 3248 * @ioc: Pointer to MPT_ADAPTER structure 3249 * @portnum: Port number 3250 * @sleepFlag: Specifies whether the process can sleep 3251 * 3252 * Returns 0 for success, non-zero for failure. 3253 */ 3254 static int 3255 GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag) 3256 { 3257 PortFacts_t get_pfacts; 3258 PortFactsReply_t *pfacts; 3259 int ii; 3260 int req_sz; 3261 int reply_sz; 3262 int max_id; 3263 3264 /* IOC *must* NOT be in RESET state! */ 3265 if (ioc->last_state == MPI_IOC_STATE_RESET) { 3266 printk(MYIOC_s_ERR_FMT "Can't get PortFacts NOT READY! (%08x)\n", 3267 ioc->name, ioc->last_state ); 3268 return -4; 3269 } 3270 3271 pfacts = &ioc->pfacts[portnum]; 3272 3273 /* Destination (reply area)... */ 3274 reply_sz = sizeof(*pfacts); 3275 memset(pfacts, 0, reply_sz); 3276 3277 /* Request area (get_pfacts on the stack right now!) */ 3278 req_sz = sizeof(get_pfacts); 3279 memset(&get_pfacts, 0, req_sz); 3280 3281 get_pfacts.Function = MPI_FUNCTION_PORT_FACTS; 3282 get_pfacts.PortNumber = portnum; 3283 /* Assert: All other get_pfacts fields are zero! */ 3284 3285 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending get PortFacts(%d) request\n", 3286 ioc->name, portnum)); 3287 3288 /* No non-zero fields in the get_pfacts request are greater than 3289 * 1 byte in size, so we can just fire it off as is. 3290 */ 3291 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts, 3292 reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag); 3293 if (ii != 0) 3294 return ii; 3295 3296 /* Did we get a valid reply? */ 3297 3298 /* Now byte swap the necessary fields in the response. */ 3299 pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext); 3300 pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus); 3301 pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo); 3302 pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices); 3303 pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID); 3304 pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags); 3305 pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers); 3306 pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs); 3307 pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets); 3308 3309 max_id = (ioc->bus_type == SAS) ? pfacts->PortSCSIID : 3310 pfacts->MaxDevices; 3311 ioc->devices_per_bus = (max_id > 255) ? 256 : max_id; 3312 ioc->number_of_buses = (ioc->devices_per_bus < 256) ? 1 : max_id/256; 3313 3314 /* 3315 * Place all the devices on channels 3316 * 3317 * (for debuging) 3318 */ 3319 if (mpt_channel_mapping) { 3320 ioc->devices_per_bus = 1; 3321 ioc->number_of_buses = (max_id > 255) ? 255 : max_id; 3322 } 3323 3324 return 0; 3325 } 3326 3327 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3328 /** 3329 * SendIocInit - Send IOCInit request to MPT adapter. 3330 * @ioc: Pointer to MPT_ADAPTER structure 3331 * @sleepFlag: Specifies whether the process can sleep 3332 * 3333 * Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state. 3334 * 3335 * Returns 0 for success, non-zero for failure. 3336 */ 3337 static int 3338 SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) 3339 { 3340 IOCInit_t ioc_init; 3341 MPIDefaultReply_t init_reply; 3342 u32 state; 3343 int r; 3344 int count; 3345 int cntdn; 3346 3347 memset(&ioc_init, 0, sizeof(ioc_init)); 3348 memset(&init_reply, 0, sizeof(init_reply)); 3349 3350 ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER; 3351 ioc_init.Function = MPI_FUNCTION_IOC_INIT; 3352 3353 /* If we are in a recovery mode and we uploaded the FW image, 3354 * then this pointer is not NULL. Skip the upload a second time. 3355 * Set this flag if cached_fw set for either IOC. 3356 */ 3357 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 3358 ioc->upload_fw = 1; 3359 else 3360 ioc->upload_fw = 0; 3361 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "upload_fw %d facts.Flags=%x\n", 3362 ioc->name, ioc->upload_fw, ioc->facts.Flags)); 3363 3364 ioc_init.MaxDevices = (U8)ioc->devices_per_bus; 3365 ioc_init.MaxBuses = (U8)ioc->number_of_buses; 3366 3367 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", 3368 ioc->name, ioc->facts.MsgVersion)); 3369 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { 3370 // set MsgVersion and HeaderVersion host driver was built with 3371 ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION); 3372 ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION); 3373 3374 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) { 3375 ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE; 3376 } else if(mpt_host_page_alloc(ioc, &ioc_init)) 3377 return -99; 3378 } 3379 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 3380 3381 if (ioc->sg_addr_size == sizeof(u64)) { 3382 /* Save the upper 32-bits of the request 3383 * (reply) and sense buffers. 3384 */ 3385 ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32)); 3386 ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); 3387 } else { 3388 /* Force 32-bit addressing */ 3389 ioc_init.HostMfaHighAddr = cpu_to_le32(0); 3390 ioc_init.SenseBufferHighAddr = cpu_to_le32(0); 3391 } 3392 3393 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr; 3394 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr; 3395 ioc->facts.MaxDevices = ioc_init.MaxDevices; 3396 ioc->facts.MaxBuses = ioc_init.MaxBuses; 3397 3398 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOCInit (req @ %p)\n", 3399 ioc->name, &ioc_init)); 3400 3401 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, 3402 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag); 3403 if (r != 0) { 3404 printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r); 3405 return r; 3406 } 3407 3408 /* No need to byte swap the multibyte fields in the reply 3409 * since we don't even look at its contents. 3410 */ 3411 3412 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending PortEnable (req @ %p)\n", 3413 ioc->name, &ioc_init)); 3414 3415 if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) { 3416 printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r); 3417 return r; 3418 } 3419 3420 /* YIKES! SUPER IMPORTANT!!! 3421 * Poll IocState until _OPERATIONAL while IOC is doing 3422 * LoopInit and TargetDiscovery! 3423 */ 3424 count = 0; 3425 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */ 3426 state = mpt_GetIocState(ioc, 1); 3427 while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) { 3428 if (sleepFlag == CAN_SLEEP) { 3429 msleep(1); 3430 } else { 3431 mdelay(1); 3432 } 3433 3434 if (!cntdn) { 3435 printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n", 3436 ioc->name, (int)((count+5)/HZ)); 3437 return -9; 3438 } 3439 3440 state = mpt_GetIocState(ioc, 1); 3441 count++; 3442 } 3443 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wait IOC_OPERATIONAL state (cnt=%d)\n", 3444 ioc->name, count)); 3445 3446 ioc->aen_event_read_flag=0; 3447 return r; 3448 } 3449 3450 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3451 /** 3452 * SendPortEnable - Send PortEnable request to MPT adapter port. 3453 * @ioc: Pointer to MPT_ADAPTER structure 3454 * @portnum: Port number to enable 3455 * @sleepFlag: Specifies whether the process can sleep 3456 * 3457 * Send PortEnable to bring IOC to OPERATIONAL state. 3458 * 3459 * Returns 0 for success, non-zero for failure. 3460 */ 3461 static int 3462 SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag) 3463 { 3464 PortEnable_t port_enable; 3465 MPIDefaultReply_t reply_buf; 3466 int rc; 3467 int req_sz; 3468 int reply_sz; 3469 3470 /* Destination... */ 3471 reply_sz = sizeof(MPIDefaultReply_t); 3472 memset(&reply_buf, 0, reply_sz); 3473 3474 req_sz = sizeof(PortEnable_t); 3475 memset(&port_enable, 0, req_sz); 3476 3477 port_enable.Function = MPI_FUNCTION_PORT_ENABLE; 3478 port_enable.PortNumber = portnum; 3479 /* port_enable.ChainOffset = 0; */ 3480 /* port_enable.MsgFlags = 0; */ 3481 /* port_enable.MsgContext = 0; */ 3482 3483 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Port(%d)Enable (req @ %p)\n", 3484 ioc->name, portnum, &port_enable)); 3485 3486 /* RAID FW may take a long time to enable 3487 */ 3488 if (ioc->ir_firmware || ioc->bus_type == SAS) { 3489 rc = mpt_handshake_req_reply_wait(ioc, req_sz, 3490 (u32*)&port_enable, reply_sz, (u16*)&reply_buf, 3491 300 /*seconds*/, sleepFlag); 3492 } else { 3493 rc = mpt_handshake_req_reply_wait(ioc, req_sz, 3494 (u32*)&port_enable, reply_sz, (u16*)&reply_buf, 3495 30 /*seconds*/, sleepFlag); 3496 } 3497 return rc; 3498 } 3499 3500 /** 3501 * mpt_alloc_fw_memory - allocate firmware memory 3502 * @ioc: Pointer to MPT_ADAPTER structure 3503 * @size: total FW bytes 3504 * 3505 * If memory has already been allocated, the same (cached) value 3506 * is returned. 3507 * 3508 * Return 0 if successful, or non-zero for failure 3509 **/ 3510 int 3511 mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size) 3512 { 3513 int rc; 3514 3515 if (ioc->cached_fw) { 3516 rc = 0; /* use already allocated memory */ 3517 goto out; 3518 } 3519 else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { 3520 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */ 3521 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma; 3522 rc = 0; 3523 goto out; 3524 } 3525 ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma); 3526 if (!ioc->cached_fw) { 3527 printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n", 3528 ioc->name); 3529 rc = -1; 3530 } else { 3531 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n", 3532 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size)); 3533 ioc->alloc_total += size; 3534 rc = 0; 3535 } 3536 out: 3537 return rc; 3538 } 3539 3540 /** 3541 * mpt_free_fw_memory - free firmware memory 3542 * @ioc: Pointer to MPT_ADAPTER structure 3543 * 3544 * If alt_img is NULL, delete from ioc structure. 3545 * Else, delete a secondary image in same format. 3546 **/ 3547 void 3548 mpt_free_fw_memory(MPT_ADAPTER *ioc) 3549 { 3550 int sz; 3551 3552 if (!ioc->cached_fw) 3553 return; 3554 3555 sz = ioc->facts.FWImageSize; 3556 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n", 3557 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); 3558 pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma); 3559 ioc->alloc_total -= sz; 3560 ioc->cached_fw = NULL; 3561 } 3562 3563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3564 /** 3565 * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port. 3566 * @ioc: Pointer to MPT_ADAPTER structure 3567 * @sleepFlag: Specifies whether the process can sleep 3568 * 3569 * Returns 0 for success, >0 for handshake failure 3570 * <0 for fw upload failure. 3571 * 3572 * Remark: If bound IOC and a successful FWUpload was performed 3573 * on the bound IOC, the second image is discarded 3574 * and memory is free'd. Both channels must upload to prevent 3575 * IOC from running in degraded mode. 3576 */ 3577 static int 3578 mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) 3579 { 3580 u8 reply[sizeof(FWUploadReply_t)]; 3581 FWUpload_t *prequest; 3582 FWUploadReply_t *preply; 3583 FWUploadTCSGE_t *ptcsge; 3584 u32 flagsLength; 3585 int ii, sz, reply_sz; 3586 int cmdStatus; 3587 int request_size; 3588 /* If the image size is 0, we are done. 3589 */ 3590 if ((sz = ioc->facts.FWImageSize) == 0) 3591 return 0; 3592 3593 if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0) 3594 return -ENOMEM; 3595 3596 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n", 3597 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); 3598 3599 prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) : 3600 kzalloc(ioc->req_sz, GFP_KERNEL); 3601 if (!prequest) { 3602 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed " 3603 "while allocating memory \n", ioc->name)); 3604 mpt_free_fw_memory(ioc); 3605 return -ENOMEM; 3606 } 3607 3608 preply = (FWUploadReply_t *)&reply; 3609 3610 reply_sz = sizeof(reply); 3611 memset(preply, 0, reply_sz); 3612 3613 prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 3614 prequest->Function = MPI_FUNCTION_FW_UPLOAD; 3615 3616 ptcsge = (FWUploadTCSGE_t *) &prequest->SGL; 3617 ptcsge->DetailsLength = 12; 3618 ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 3619 ptcsge->ImageSize = cpu_to_le32(sz); 3620 ptcsge++; 3621 3622 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; 3623 ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); 3624 request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) + 3625 ioc->SGE_size; 3626 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload " 3627 " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest, 3628 ioc->facts.FWImageSize, request_size)); 3629 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); 3630 3631 ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest, 3632 reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag); 3633 3634 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed " 3635 "rc=%x \n", ioc->name, ii)); 3636 3637 cmdStatus = -EFAULT; 3638 if (ii == 0) { 3639 /* Handshake transfer was complete and successful. 3640 * Check the Reply Frame. 3641 */ 3642 int status; 3643 status = le16_to_cpu(preply->IOCStatus) & 3644 MPI_IOCSTATUS_MASK; 3645 if (status == MPI_IOCSTATUS_SUCCESS && 3646 ioc->facts.FWImageSize == 3647 le32_to_cpu(preply->ActualImageSize)) 3648 cmdStatus = 0; 3649 } 3650 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", 3651 ioc->name, cmdStatus)); 3652 3653 3654 if (cmdStatus) { 3655 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, " 3656 "freeing image \n", ioc->name)); 3657 mpt_free_fw_memory(ioc); 3658 } 3659 kfree(prequest); 3660 3661 return cmdStatus; 3662 } 3663 3664 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3665 /** 3666 * mpt_downloadboot - DownloadBoot code 3667 * @ioc: Pointer to MPT_ADAPTER structure 3668 * @pFwHeader: Pointer to firmware header info 3669 * @sleepFlag: Specifies whether the process can sleep 3670 * 3671 * FwDownloadBoot requires Programmed IO access. 3672 * 3673 * Returns 0 for success 3674 * -1 FW Image size is 0 3675 * -2 No valid cached_fw Pointer 3676 * <0 for fw upload failure. 3677 */ 3678 static int 3679 mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag) 3680 { 3681 MpiExtImageHeader_t *pExtImage; 3682 u32 fwSize; 3683 u32 diag0val; 3684 int count; 3685 u32 *ptrFw; 3686 u32 diagRwData; 3687 u32 nextImage; 3688 u32 load_addr; 3689 u32 ioc_state=0; 3690 3691 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n", 3692 ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader)); 3693 3694 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 3695 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 3696 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); 3697 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); 3698 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); 3699 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); 3700 3701 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM)); 3702 3703 /* wait 1 msec */ 3704 if (sleepFlag == CAN_SLEEP) { 3705 msleep(1); 3706 } else { 3707 mdelay (1); 3708 } 3709 3710 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3711 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER); 3712 3713 for (count = 0; count < 30; count ++) { 3714 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3715 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { 3716 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RESET_ADAPTER cleared, count=%d\n", 3717 ioc->name, count)); 3718 break; 3719 } 3720 /* wait .1 sec */ 3721 if (sleepFlag == CAN_SLEEP) { 3722 msleep (100); 3723 } else { 3724 mdelay (100); 3725 } 3726 } 3727 3728 if ( count == 30 ) { 3729 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot failed! " 3730 "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n", 3731 ioc->name, diag0val)); 3732 return -3; 3733 } 3734 3735 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 3736 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 3737 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); 3738 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); 3739 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); 3740 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); 3741 3742 /* Set the DiagRwEn and Disable ARM bits */ 3743 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM)); 3744 3745 fwSize = (pFwHeader->ImageSize + 3)/4; 3746 ptrFw = (u32 *) pFwHeader; 3747 3748 /* Write the LoadStartAddress to the DiagRw Address Register 3749 * using Programmed IO 3750 */ 3751 if (ioc->errata_flag_1064) 3752 pci_enable_io_access(ioc->pcidev); 3753 3754 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress); 3755 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "LoadStart addr written 0x%x \n", 3756 ioc->name, pFwHeader->LoadStartAddress)); 3757 3758 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write FW Image: 0x%x bytes @ %p\n", 3759 ioc->name, fwSize*4, ptrFw)); 3760 while (fwSize--) { 3761 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++); 3762 } 3763 3764 nextImage = pFwHeader->NextImageHeaderOffset; 3765 while (nextImage) { 3766 pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage); 3767 3768 load_addr = pExtImage->LoadStartAddress; 3769 3770 fwSize = (pExtImage->ImageSize + 3) >> 2; 3771 ptrFw = (u32 *)pExtImage; 3772 3773 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n", 3774 ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr)); 3775 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr); 3776 3777 while (fwSize--) { 3778 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++); 3779 } 3780 nextImage = pExtImage->NextImageHeaderOffset; 3781 } 3782 3783 /* Write the IopResetVectorRegAddr */ 3784 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr)); 3785 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr); 3786 3787 /* Write the IopResetVectorValue */ 3788 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue)); 3789 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue); 3790 3791 /* Clear the internal flash bad bit - autoincrementing register, 3792 * so must do two writes. 3793 */ 3794 if (ioc->bus_type == SPI) { 3795 /* 3796 * 1030 and 1035 H/W errata, workaround to access 3797 * the ClearFlashBadSignatureBit 3798 */ 3799 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 3800 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData); 3801 diagRwData |= 0x40000000; 3802 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 3803 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); 3804 3805 } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ { 3806 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3807 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | 3808 MPI_DIAG_CLEAR_FLASH_BAD_SIG); 3809 3810 /* wait 1 msec */ 3811 if (sleepFlag == CAN_SLEEP) { 3812 msleep (1); 3813 } else { 3814 mdelay (1); 3815 } 3816 } 3817 3818 if (ioc->errata_flag_1064) 3819 pci_disable_io_access(ioc->pcidev); 3820 3821 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3822 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot diag0val=%x, " 3823 "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n", 3824 ioc->name, diag0val)); 3825 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE); 3826 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot now diag0val=%x\n", 3827 ioc->name, diag0val)); 3828 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); 3829 3830 /* Write 0xFF to reset the sequencer */ 3831 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 3832 3833 if (ioc->bus_type == SAS) { 3834 ioc_state = mpt_GetIocState(ioc, 0); 3835 if ( (GetIocFacts(ioc, sleepFlag, 3836 MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) { 3837 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "GetIocFacts failed: IocState=%x\n", 3838 ioc->name, ioc_state)); 3839 return -EFAULT; 3840 } 3841 } 3842 3843 for (count=0; count<HZ*20; count++) { 3844 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) { 3845 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 3846 "downloadboot successful! (count=%d) IocState=%x\n", 3847 ioc->name, count, ioc_state)); 3848 if (ioc->bus_type == SAS) { 3849 return 0; 3850 } 3851 if ((SendIocInit(ioc, sleepFlag)) != 0) { 3852 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 3853 "downloadboot: SendIocInit failed\n", 3854 ioc->name)); 3855 return -EFAULT; 3856 } 3857 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 3858 "downloadboot: SendIocInit successful\n", 3859 ioc->name)); 3860 return 0; 3861 } 3862 if (sleepFlag == CAN_SLEEP) { 3863 msleep (10); 3864 } else { 3865 mdelay (10); 3866 } 3867 } 3868 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT 3869 "downloadboot failed! IocState=%x\n",ioc->name, ioc_state)); 3870 return -EFAULT; 3871 } 3872 3873 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3874 /** 3875 * KickStart - Perform hard reset of MPT adapter. 3876 * @ioc: Pointer to MPT_ADAPTER structure 3877 * @force: Force hard reset 3878 * @sleepFlag: Specifies whether the process can sleep 3879 * 3880 * This routine places MPT adapter in diagnostic mode via the 3881 * WriteSequence register, and then performs a hard reset of adapter 3882 * via the Diagnostic register. 3883 * 3884 * Inputs: sleepflag - CAN_SLEEP (non-interrupt thread) 3885 * or NO_SLEEP (interrupt thread, use mdelay) 3886 * force - 1 if doorbell active, board fault state 3887 * board operational, IOC_RECOVERY or 3888 * IOC_BRINGUP and there is an alt_ioc. 3889 * 0 else 3890 * 3891 * Returns: 3892 * 1 - hard reset, READY 3893 * 0 - no reset due to History bit, READY 3894 * -1 - no reset due to History bit but not READY 3895 * OR reset but failed to come READY 3896 * -2 - no reset, could not enter DIAG mode 3897 * -3 - reset but bad FW bit 3898 */ 3899 static int 3900 KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag) 3901 { 3902 int hard_reset_done = 0; 3903 u32 ioc_state=0; 3904 int cnt,cntdn; 3905 3906 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStarting!\n", ioc->name)); 3907 if (ioc->bus_type == SPI) { 3908 /* Always issue a Msg Unit Reset first. This will clear some 3909 * SCSI bus hang conditions. 3910 */ 3911 SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag); 3912 3913 if (sleepFlag == CAN_SLEEP) { 3914 msleep (1000); 3915 } else { 3916 mdelay (1000); 3917 } 3918 } 3919 3920 hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag); 3921 if (hard_reset_done < 0) 3922 return hard_reset_done; 3923 3924 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset successful!\n", 3925 ioc->name)); 3926 3927 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2; /* 2 seconds */ 3928 for (cnt=0; cnt<cntdn; cnt++) { 3929 ioc_state = mpt_GetIocState(ioc, 1); 3930 if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) { 3931 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStart successful! (cnt=%d)\n", 3932 ioc->name, cnt)); 3933 return hard_reset_done; 3934 } 3935 if (sleepFlag == CAN_SLEEP) { 3936 msleep (10); 3937 } else { 3938 mdelay (10); 3939 } 3940 } 3941 3942 dinitprintk(ioc, printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n", 3943 ioc->name, mpt_GetIocState(ioc, 0))); 3944 return -1; 3945 } 3946 3947 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3948 /** 3949 * mpt_diag_reset - Perform hard reset of the adapter. 3950 * @ioc: Pointer to MPT_ADAPTER structure 3951 * @ignore: Set if to honor and clear to ignore 3952 * the reset history bit 3953 * @sleepFlag: CAN_SLEEP if called in a non-interrupt thread, 3954 * else set to NO_SLEEP (use mdelay instead) 3955 * 3956 * This routine places the adapter in diagnostic mode via the 3957 * WriteSequence register and then performs a hard reset of adapter 3958 * via the Diagnostic register. Adapter should be in ready state 3959 * upon successful completion. 3960 * 3961 * Returns: 1 hard reset successful 3962 * 0 no reset performed because reset history bit set 3963 * -2 enabling diagnostic mode failed 3964 * -3 diagnostic reset failed 3965 */ 3966 static int 3967 mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) 3968 { 3969 u32 diag0val; 3970 u32 doorbell; 3971 int hard_reset_done = 0; 3972 int count = 0; 3973 u32 diag1val = 0; 3974 MpiFwHeader_t *cached_fw; /* Pointer to FW */ 3975 u8 cb_idx; 3976 3977 /* Clear any existing interrupts */ 3978 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3979 3980 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3981 3982 if (!ignore) 3983 return 0; 3984 3985 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3986 "address=%p\n", ioc->name, __func__, 3987 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3988 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07); 3989 if (sleepFlag == CAN_SLEEP) 3990 msleep(1); 3991 else 3992 mdelay(1); 3993 3994 /* 3995 * Call each currently registered protocol IOC reset handler 3996 * with pre-reset indication. 3997 * NOTE: If we're doing _IOC_BRINGUP, there can be no 3998 * MptResetHandlers[] registered yet. 3999 */ 4000 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 4001 if (MptResetHandlers[cb_idx]) 4002 (*(MptResetHandlers[cb_idx]))(ioc, 4003 MPT_IOC_PRE_RESET); 4004 } 4005 4006 for (count = 0; count < 60; count ++) { 4007 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 4008 doorbell &= MPI_IOC_STATE_MASK; 4009 4010 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4011 "looking for READY STATE: doorbell=%x" 4012 " count=%d\n", 4013 ioc->name, doorbell, count)); 4014 4015 if (doorbell == MPI_IOC_STATE_READY) { 4016 return 1; 4017 } 4018 4019 /* wait 1 sec */ 4020 if (sleepFlag == CAN_SLEEP) 4021 msleep(1000); 4022 else 4023 mdelay(1000); 4024 } 4025 return -1; 4026 } 4027 4028 /* Use "Diagnostic reset" method! (only thing available!) */ 4029 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4030 4031 if (ioc->debug_level & MPT_DEBUG) { 4032 if (ioc->alt_ioc) 4033 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); 4034 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n", 4035 ioc->name, diag0val, diag1val)); 4036 } 4037 4038 /* Do the reset if we are told to ignore the reset history 4039 * or if the reset history is 0 4040 */ 4041 if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) { 4042 while ((diag0val & MPI_DIAG_DRWE) == 0) { 4043 /* Write magic sequence to WriteSequence register 4044 * Loop until in diagnostic mode 4045 */ 4046 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 4047 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 4048 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); 4049 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); 4050 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); 4051 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); 4052 4053 /* wait 100 msec */ 4054 if (sleepFlag == CAN_SLEEP) { 4055 msleep (100); 4056 } else { 4057 mdelay (100); 4058 } 4059 4060 count++; 4061 if (count > 20) { 4062 printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", 4063 ioc->name, diag0val); 4064 return -2; 4065 4066 } 4067 4068 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4069 4070 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n", 4071 ioc->name, diag0val)); 4072 } 4073 4074 if (ioc->debug_level & MPT_DEBUG) { 4075 if (ioc->alt_ioc) 4076 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); 4077 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n", 4078 ioc->name, diag0val, diag1val)); 4079 } 4080 /* 4081 * Disable the ARM (Bug fix) 4082 * 4083 */ 4084 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM); 4085 mdelay(1); 4086 4087 /* 4088 * Now hit the reset bit in the Diagnostic register 4089 * (THE BIG HAMMER!) (Clears DRWE bit). 4090 */ 4091 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER); 4092 hard_reset_done = 1; 4093 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n", 4094 ioc->name)); 4095 4096 /* 4097 * Call each currently registered protocol IOC reset handler 4098 * with pre-reset indication. 4099 * NOTE: If we're doing _IOC_BRINGUP, there can be no 4100 * MptResetHandlers[] registered yet. 4101 */ 4102 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 4103 if (MptResetHandlers[cb_idx]) { 4104 mpt_signal_reset(cb_idx, 4105 ioc, MPT_IOC_PRE_RESET); 4106 if (ioc->alt_ioc) { 4107 mpt_signal_reset(cb_idx, 4108 ioc->alt_ioc, MPT_IOC_PRE_RESET); 4109 } 4110 } 4111 } 4112 4113 if (ioc->cached_fw) 4114 cached_fw = (MpiFwHeader_t *)ioc->cached_fw; 4115 else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) 4116 cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw; 4117 else 4118 cached_fw = NULL; 4119 if (cached_fw) { 4120 /* If the DownloadBoot operation fails, the 4121 * IOC will be left unusable. This is a fatal error 4122 * case. _diag_reset will return < 0 4123 */ 4124 for (count = 0; count < 30; count ++) { 4125 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4126 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { 4127 break; 4128 } 4129 4130 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n", 4131 ioc->name, diag0val, count)); 4132 /* wait 1 sec */ 4133 if (sleepFlag == CAN_SLEEP) { 4134 msleep (1000); 4135 } else { 4136 mdelay (1000); 4137 } 4138 } 4139 if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) { 4140 printk(MYIOC_s_WARN_FMT 4141 "firmware downloadboot failure (%d)!\n", ioc->name, count); 4142 } 4143 4144 } else { 4145 /* Wait for FW to reload and for board 4146 * to go to the READY state. 4147 * Maximum wait is 60 seconds. 4148 * If fail, no error will check again 4149 * with calling program. 4150 */ 4151 for (count = 0; count < 60; count ++) { 4152 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 4153 doorbell &= MPI_IOC_STATE_MASK; 4154 4155 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4156 "looking for READY STATE: doorbell=%x" 4157 " count=%d\n", ioc->name, doorbell, count)); 4158 4159 if (doorbell == MPI_IOC_STATE_READY) { 4160 break; 4161 } 4162 4163 /* wait 1 sec */ 4164 if (sleepFlag == CAN_SLEEP) { 4165 msleep (1000); 4166 } else { 4167 mdelay (1000); 4168 } 4169 } 4170 4171 if (doorbell != MPI_IOC_STATE_READY) 4172 printk(MYIOC_s_ERR_FMT "Failed to come READY " 4173 "after reset! IocState=%x", ioc->name, 4174 doorbell); 4175 } 4176 } 4177 4178 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4179 if (ioc->debug_level & MPT_DEBUG) { 4180 if (ioc->alt_ioc) 4181 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); 4182 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n", 4183 ioc->name, diag0val, diag1val)); 4184 } 4185 4186 /* Clear RESET_HISTORY bit! Place board in the 4187 * diagnostic mode to update the diag register. 4188 */ 4189 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4190 count = 0; 4191 while ((diag0val & MPI_DIAG_DRWE) == 0) { 4192 /* Write magic sequence to WriteSequence register 4193 * Loop until in diagnostic mode 4194 */ 4195 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 4196 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 4197 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); 4198 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); 4199 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); 4200 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); 4201 4202 /* wait 100 msec */ 4203 if (sleepFlag == CAN_SLEEP) { 4204 msleep (100); 4205 } else { 4206 mdelay (100); 4207 } 4208 4209 count++; 4210 if (count > 20) { 4211 printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", 4212 ioc->name, diag0val); 4213 break; 4214 } 4215 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4216 } 4217 diag0val &= ~MPI_DIAG_RESET_HISTORY; 4218 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); 4219 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4220 if (diag0val & MPI_DIAG_RESET_HISTORY) { 4221 printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n", 4222 ioc->name); 4223 } 4224 4225 /* Disable Diagnostic Mode 4226 */ 4227 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF); 4228 4229 /* Check FW reload status flags. 4230 */ 4231 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 4232 if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) { 4233 printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n", 4234 ioc->name, diag0val); 4235 return -3; 4236 } 4237 4238 if (ioc->debug_level & MPT_DEBUG) { 4239 if (ioc->alt_ioc) 4240 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); 4241 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n", 4242 ioc->name, diag0val, diag1val)); 4243 } 4244 4245 /* 4246 * Reset flag that says we've enabled event notification 4247 */ 4248 ioc->facts.EventState = 0; 4249 4250 if (ioc->alt_ioc) 4251 ioc->alt_ioc->facts.EventState = 0; 4252 4253 return hard_reset_done; 4254 } 4255 4256 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4257 /** 4258 * SendIocReset - Send IOCReset request to MPT adapter. 4259 * @ioc: Pointer to MPT_ADAPTER structure 4260 * @reset_type: reset type, expected values are 4261 * %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET 4262 * @sleepFlag: Specifies whether the process can sleep 4263 * 4264 * Send IOCReset request to the MPT adapter. 4265 * 4266 * Returns 0 for success, non-zero for failure. 4267 */ 4268 static int 4269 SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag) 4270 { 4271 int r; 4272 u32 state; 4273 int cntdn, count; 4274 4275 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOC reset(0x%02x)!\n", 4276 ioc->name, reset_type)); 4277 CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT); 4278 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) 4279 return r; 4280 4281 /* FW ACK'd request, wait for READY state 4282 */ 4283 count = 0; 4284 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */ 4285 4286 while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { 4287 cntdn--; 4288 count++; 4289 if (!cntdn) { 4290 if (sleepFlag != CAN_SLEEP) 4291 count *= 10; 4292 4293 printk(MYIOC_s_ERR_FMT 4294 "Wait IOC_READY state (0x%x) timeout(%d)!\n", 4295 ioc->name, state, (int)((count+5)/HZ)); 4296 return -ETIME; 4297 } 4298 4299 if (sleepFlag == CAN_SLEEP) { 4300 msleep(1); 4301 } else { 4302 mdelay (1); /* 1 msec delay */ 4303 } 4304 } 4305 4306 /* TODO! 4307 * Cleanup all event stuff for this IOC; re-issue EventNotification 4308 * request if needed. 4309 */ 4310 if (ioc->facts.Function) 4311 ioc->facts.EventState = 0; 4312 4313 return 0; 4314 } 4315 4316 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4317 /** 4318 * initChainBuffers - Allocate memory for and initialize chain buffers 4319 * @ioc: Pointer to MPT_ADAPTER structure 4320 * 4321 * Allocates memory for and initializes chain buffers, 4322 * chain buffer control arrays and spinlock. 4323 */ 4324 static int 4325 initChainBuffers(MPT_ADAPTER *ioc) 4326 { 4327 u8 *mem; 4328 int sz, ii, num_chain; 4329 int scale, num_sge, numSGE; 4330 4331 /* ReqToChain size must equal the req_depth 4332 * index = req_idx 4333 */ 4334 if (ioc->ReqToChain == NULL) { 4335 sz = ioc->req_depth * sizeof(int); 4336 mem = kmalloc(sz, GFP_ATOMIC); 4337 if (mem == NULL) 4338 return -1; 4339 4340 ioc->ReqToChain = (int *) mem; 4341 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReqToChain alloc @ %p, sz=%d bytes\n", 4342 ioc->name, mem, sz)); 4343 mem = kmalloc(sz, GFP_ATOMIC); 4344 if (mem == NULL) 4345 return -1; 4346 4347 ioc->RequestNB = (int *) mem; 4348 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestNB alloc @ %p, sz=%d bytes\n", 4349 ioc->name, mem, sz)); 4350 } 4351 for (ii = 0; ii < ioc->req_depth; ii++) { 4352 ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN; 4353 } 4354 4355 /* ChainToChain size must equal the total number 4356 * of chain buffers to be allocated. 4357 * index = chain_idx 4358 * 4359 * Calculate the number of chain buffers needed(plus 1) per I/O 4360 * then multiply the maximum number of simultaneous cmds 4361 * 4362 * num_sge = num sge in request frame + last chain buffer 4363 * scale = num sge per chain buffer if no chain element 4364 */ 4365 scale = ioc->req_sz / ioc->SGE_size; 4366 if (ioc->sg_addr_size == sizeof(u64)) 4367 num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size; 4368 else 4369 num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size; 4370 4371 if (ioc->sg_addr_size == sizeof(u64)) { 4372 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4373 (ioc->req_sz - 60) / ioc->SGE_size; 4374 } else { 4375 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + 4376 scale + (ioc->req_sz - 64) / ioc->SGE_size; 4377 } 4378 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", 4379 ioc->name, num_sge, numSGE)); 4380 4381 if (ioc->bus_type == FC) { 4382 if (numSGE > MPT_SCSI_FC_SG_DEPTH) 4383 numSGE = MPT_SCSI_FC_SG_DEPTH; 4384 } else { 4385 if (numSGE > MPT_SCSI_SG_DEPTH) 4386 numSGE = MPT_SCSI_SG_DEPTH; 4387 } 4388 4389 num_chain = 1; 4390 while (numSGE - num_sge > 0) { 4391 num_chain++; 4392 num_sge += (scale - 1); 4393 } 4394 num_chain++; 4395 4396 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Now numSGE=%d num_sge=%d num_chain=%d\n", 4397 ioc->name, numSGE, num_sge, num_chain)); 4398 4399 if (ioc->bus_type == SPI) 4400 num_chain *= MPT_SCSI_CAN_QUEUE; 4401 else if (ioc->bus_type == SAS) 4402 num_chain *= MPT_SAS_CAN_QUEUE; 4403 else 4404 num_chain *= MPT_FC_CAN_QUEUE; 4405 4406 ioc->num_chain = num_chain; 4407 4408 sz = num_chain * sizeof(int); 4409 if (ioc->ChainToChain == NULL) { 4410 mem = kmalloc(sz, GFP_ATOMIC); 4411 if (mem == NULL) 4412 return -1; 4413 4414 ioc->ChainToChain = (int *) mem; 4415 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainToChain alloc @ %p, sz=%d bytes\n", 4416 ioc->name, mem, sz)); 4417 } else { 4418 mem = (u8 *) ioc->ChainToChain; 4419 } 4420 memset(mem, 0xFF, sz); 4421 return num_chain; 4422 } 4423 4424 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4425 /** 4426 * PrimeIocFifos - Initialize IOC request and reply FIFOs. 4427 * @ioc: Pointer to MPT_ADAPTER structure 4428 * 4429 * This routine allocates memory for the MPT reply and request frame 4430 * pools (if necessary), and primes the IOC reply FIFO with 4431 * reply frames. 4432 * 4433 * Returns 0 for success, non-zero for failure. 4434 */ 4435 static int 4436 PrimeIocFifos(MPT_ADAPTER *ioc) 4437 { 4438 MPT_FRAME_HDR *mf; 4439 unsigned long flags; 4440 dma_addr_t alloc_dma; 4441 u8 *mem; 4442 int i, reply_sz, sz, total_size, num_chain; 4443 u64 dma_mask; 4444 4445 dma_mask = 0; 4446 4447 /* Prime reply FIFO... */ 4448 4449 if (ioc->reply_frames == NULL) { 4450 if ( (num_chain = initChainBuffers(ioc)) < 0) 4451 return -1; 4452 /* 4453 * 1078 errata workaround for the 36GB limitation 4454 */ 4455 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 && 4456 ioc->dma_mask > DMA_BIT_MASK(35)) { 4457 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32)) 4458 && !pci_set_consistent_dma_mask(ioc->pcidev, 4459 DMA_BIT_MASK(32))) { 4460 dma_mask = DMA_BIT_MASK(35); 4461 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4462 "setting 35 bit addressing for " 4463 "Request/Reply/Chain and Sense Buffers\n", 4464 ioc->name)); 4465 } else { 4466 /*Reseting DMA mask to 64 bit*/ 4467 pci_set_dma_mask(ioc->pcidev, 4468 DMA_BIT_MASK(64)); 4469 pci_set_consistent_dma_mask(ioc->pcidev, 4470 DMA_BIT_MASK(64)); 4471 4472 printk(MYIOC_s_ERR_FMT 4473 "failed setting 35 bit addressing for " 4474 "Request/Reply/Chain and Sense Buffers\n", 4475 ioc->name); 4476 return -1; 4477 } 4478 } 4479 4480 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); 4481 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", 4482 ioc->name, ioc->reply_sz, ioc->reply_depth)); 4483 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d[%x] bytes\n", 4484 ioc->name, reply_sz, reply_sz)); 4485 4486 sz = (ioc->req_sz * ioc->req_depth); 4487 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d bytes, RequestDepth=%d\n", 4488 ioc->name, ioc->req_sz, ioc->req_depth)); 4489 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d[%x] bytes\n", 4490 ioc->name, sz, sz)); 4491 total_size += sz; 4492 4493 sz = num_chain * ioc->req_sz; /* chain buffer pool size */ 4494 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d bytes, ChainDepth=%d\n", 4495 ioc->name, ioc->req_sz, num_chain)); 4496 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d[%x] bytes num_chain=%d\n", 4497 ioc->name, sz, sz, num_chain)); 4498 4499 total_size += sz; 4500 mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size, 4501 &alloc_dma, GFP_KERNEL); 4502 if (mem == NULL) { 4503 printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n", 4504 ioc->name); 4505 goto out_fail; 4506 } 4507 4508 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Total alloc @ %p[%p], sz=%d[%x] bytes\n", 4509 ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size)); 4510 4511 memset(mem, 0, total_size); 4512 ioc->alloc_total += total_size; 4513 ioc->alloc = mem; 4514 ioc->alloc_dma = alloc_dma; 4515 ioc->alloc_sz = total_size; 4516 ioc->reply_frames = (MPT_FRAME_HDR *) mem; 4517 ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); 4518 4519 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n", 4520 ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma)); 4521 4522 alloc_dma += reply_sz; 4523 mem += reply_sz; 4524 4525 /* Request FIFO - WE manage this! */ 4526 4527 ioc->req_frames = (MPT_FRAME_HDR *) mem; 4528 ioc->req_frames_dma = alloc_dma; 4529 4530 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffers @ %p[%p]\n", 4531 ioc->name, mem, (void *)(ulong)alloc_dma)); 4532 4533 ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); 4534 4535 for (i = 0; i < ioc->req_depth; i++) { 4536 alloc_dma += ioc->req_sz; 4537 mem += ioc->req_sz; 4538 } 4539 4540 ioc->ChainBuffer = mem; 4541 ioc->ChainBufferDMA = alloc_dma; 4542 4543 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffers @ %p(%p)\n", 4544 ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA)); 4545 4546 /* Initialize the free chain Q. 4547 */ 4548 4549 INIT_LIST_HEAD(&ioc->FreeChainQ); 4550 4551 /* Post the chain buffers to the FreeChainQ. 4552 */ 4553 mem = (u8 *)ioc->ChainBuffer; 4554 for (i=0; i < num_chain; i++) { 4555 mf = (MPT_FRAME_HDR *) mem; 4556 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ); 4557 mem += ioc->req_sz; 4558 } 4559 4560 /* Initialize Request frames linked list 4561 */ 4562 alloc_dma = ioc->req_frames_dma; 4563 mem = (u8 *) ioc->req_frames; 4564 4565 spin_lock_irqsave(&ioc->FreeQlock, flags); 4566 INIT_LIST_HEAD(&ioc->FreeQ); 4567 for (i = 0; i < ioc->req_depth; i++) { 4568 mf = (MPT_FRAME_HDR *) mem; 4569 4570 /* Queue REQUESTs *internally*! */ 4571 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 4572 4573 mem += ioc->req_sz; 4574 } 4575 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 4576 4577 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); 4578 ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz, 4579 &ioc->sense_buf_pool_dma, GFP_KERNEL); 4580 if (ioc->sense_buf_pool == NULL) { 4581 printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n", 4582 ioc->name); 4583 goto out_fail; 4584 } 4585 4586 ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF); 4587 ioc->alloc_total += sz; 4588 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SenseBuffers @ %p[%p]\n", 4589 ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma)); 4590 4591 } 4592 4593 /* Post Reply frames to FIFO 4594 */ 4595 alloc_dma = ioc->alloc_dma; 4596 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n", 4597 ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma)); 4598 4599 for (i = 0; i < ioc->reply_depth; i++) { 4600 /* Write each address to the IOC! */ 4601 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma); 4602 alloc_dma += ioc->reply_sz; 4603 } 4604 4605 if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev, 4606 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev, 4607 ioc->dma_mask)) 4608 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4609 "restoring 64 bit addressing\n", ioc->name)); 4610 4611 return 0; 4612 4613 out_fail: 4614 4615 if (ioc->alloc != NULL) { 4616 sz = ioc->alloc_sz; 4617 dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc, 4618 ioc->alloc_dma); 4619 ioc->reply_frames = NULL; 4620 ioc->req_frames = NULL; 4621 ioc->alloc_total -= sz; 4622 } 4623 if (ioc->sense_buf_pool != NULL) { 4624 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); 4625 dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool, 4626 ioc->sense_buf_pool_dma); 4627 ioc->sense_buf_pool = NULL; 4628 } 4629 4630 if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev, 4631 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev, 4632 DMA_BIT_MASK(64))) 4633 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4634 "restoring 64 bit addressing\n", ioc->name)); 4635 4636 return -1; 4637 } 4638 4639 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4640 /** 4641 * mpt_handshake_req_reply_wait - Send MPT request to and receive reply 4642 * from IOC via doorbell handshake method. 4643 * @ioc: Pointer to MPT_ADAPTER structure 4644 * @reqBytes: Size of the request in bytes 4645 * @req: Pointer to MPT request frame 4646 * @replyBytes: Expected size of the reply in bytes 4647 * @u16reply: Pointer to area where reply should be written 4648 * @maxwait: Max wait time for a reply (in seconds) 4649 * @sleepFlag: Specifies whether the process can sleep 4650 * 4651 * NOTES: It is the callers responsibility to byte-swap fields in the 4652 * request which are greater than 1 byte in size. It is also the 4653 * callers responsibility to byte-swap response fields which are 4654 * greater than 1 byte in size. 4655 * 4656 * Returns 0 for success, non-zero for failure. 4657 */ 4658 static int 4659 mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, 4660 int replyBytes, u16 *u16reply, int maxwait, int sleepFlag) 4661 { 4662 MPIDefaultReply_t *mptReply; 4663 int failcnt = 0; 4664 int t; 4665 4666 /* 4667 * Get ready to cache a handshake reply 4668 */ 4669 ioc->hs_reply_idx = 0; 4670 mptReply = (MPIDefaultReply_t *) ioc->hs_reply; 4671 mptReply->MsgLength = 0; 4672 4673 /* 4674 * Make sure there are no doorbells (WRITE 0 to IntStatus reg), 4675 * then tell IOC that we want to handshake a request of N words. 4676 * (WRITE u32val to Doorbell reg). 4677 */ 4678 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4679 CHIPREG_WRITE32(&ioc->chip->Doorbell, 4680 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) | 4681 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT))); 4682 4683 /* 4684 * Wait for IOC's doorbell handshake int 4685 */ 4686 if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) 4687 failcnt++; 4688 4689 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n", 4690 ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); 4691 4692 /* Read doorbell and check for active bit */ 4693 if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE)) 4694 return -1; 4695 4696 /* 4697 * Clear doorbell int (WRITE 0 to IntStatus reg), 4698 * then wait for IOC to ACKnowledge that it's ready for 4699 * our handshake request. 4700 */ 4701 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4702 if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) 4703 failcnt++; 4704 4705 if (!failcnt) { 4706 int ii; 4707 u8 *req_as_bytes = (u8 *) req; 4708 4709 /* 4710 * Stuff request words via doorbell handshake, 4711 * with ACK from IOC for each. 4712 */ 4713 for (ii = 0; !failcnt && ii < reqBytes/4; ii++) { 4714 u32 word = ((req_as_bytes[(ii*4) + 0] << 0) | 4715 (req_as_bytes[(ii*4) + 1] << 8) | 4716 (req_as_bytes[(ii*4) + 2] << 16) | 4717 (req_as_bytes[(ii*4) + 3] << 24)); 4718 4719 CHIPREG_WRITE32(&ioc->chip->Doorbell, word); 4720 if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) 4721 failcnt++; 4722 } 4723 4724 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handshake request frame (@%p) header\n", ioc->name, req)); 4725 DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)req); 4726 4727 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request post done, WaitCnt=%d%s\n", 4728 ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : "")); 4729 4730 /* 4731 * Wait for completion of doorbell handshake reply from the IOC 4732 */ 4733 if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0) 4734 failcnt++; 4735 4736 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake reply count=%d%s\n", 4737 ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : "")); 4738 4739 /* 4740 * Copy out the cached reply... 4741 */ 4742 for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++) 4743 u16reply[ii] = ioc->hs_reply[ii]; 4744 } else { 4745 return -99; 4746 } 4747 4748 return -failcnt; 4749 } 4750 4751 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4752 /** 4753 * WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge 4754 * @ioc: Pointer to MPT_ADAPTER structure 4755 * @howlong: How long to wait (in seconds) 4756 * @sleepFlag: Specifies whether the process can sleep 4757 * 4758 * This routine waits (up to ~2 seconds max) for IOC doorbell 4759 * handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS 4760 * bit in its IntStatus register being clear. 4761 * 4762 * Returns a negative value on failure, else wait loop count. 4763 */ 4764 static int 4765 WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag) 4766 { 4767 int cntdn; 4768 int count = 0; 4769 u32 intstat=0; 4770 4771 cntdn = 1000 * howlong; 4772 4773 if (sleepFlag == CAN_SLEEP) { 4774 while (--cntdn) { 4775 msleep (1); 4776 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 4777 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 4778 break; 4779 count++; 4780 } 4781 } else { 4782 while (--cntdn) { 4783 udelay (1000); 4784 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 4785 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 4786 break; 4787 count++; 4788 } 4789 } 4790 4791 if (cntdn) { 4792 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell ACK (count=%d)\n", 4793 ioc->name, count)); 4794 return count; 4795 } 4796 4797 printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n", 4798 ioc->name, count, intstat); 4799 return -1; 4800 } 4801 4802 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4803 /** 4804 * WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit 4805 * @ioc: Pointer to MPT_ADAPTER structure 4806 * @howlong: How long to wait (in seconds) 4807 * @sleepFlag: Specifies whether the process can sleep 4808 * 4809 * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt 4810 * (MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register. 4811 * 4812 * Returns a negative value on failure, else wait loop count. 4813 */ 4814 static int 4815 WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag) 4816 { 4817 int cntdn; 4818 int count = 0; 4819 u32 intstat=0; 4820 4821 cntdn = 1000 * howlong; 4822 if (sleepFlag == CAN_SLEEP) { 4823 while (--cntdn) { 4824 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 4825 if (intstat & MPI_HIS_DOORBELL_INTERRUPT) 4826 break; 4827 msleep(1); 4828 count++; 4829 } 4830 } else { 4831 while (--cntdn) { 4832 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 4833 if (intstat & MPI_HIS_DOORBELL_INTERRUPT) 4834 break; 4835 udelay (1000); 4836 count++; 4837 } 4838 } 4839 4840 if (cntdn) { 4841 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n", 4842 ioc->name, count, howlong)); 4843 return count; 4844 } 4845 4846 printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n", 4847 ioc->name, count, intstat); 4848 return -1; 4849 } 4850 4851 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4852 /** 4853 * WaitForDoorbellReply - Wait for and capture an IOC handshake reply. 4854 * @ioc: Pointer to MPT_ADAPTER structure 4855 * @howlong: How long to wait (in seconds) 4856 * @sleepFlag: Specifies whether the process can sleep 4857 * 4858 * This routine polls the IOC for a handshake reply, 16 bits at a time. 4859 * Reply is cached to IOC private area large enough to hold a maximum 4860 * of 128 bytes of reply data. 4861 * 4862 * Returns a negative value on failure, else size of reply in WORDS. 4863 */ 4864 static int 4865 WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag) 4866 { 4867 int u16cnt = 0; 4868 int failcnt = 0; 4869 int t; 4870 u16 *hs_reply = ioc->hs_reply; 4871 volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply; 4872 u16 hword; 4873 4874 hs_reply[0] = hs_reply[1] = hs_reply[7] = 0; 4875 4876 /* 4877 * Get first two u16's so we can look at IOC's intended reply MsgLength 4878 */ 4879 u16cnt=0; 4880 if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) { 4881 failcnt++; 4882 } else { 4883 hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); 4884 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4885 if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) 4886 failcnt++; 4887 else { 4888 hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); 4889 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4890 } 4891 } 4892 4893 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitCnt=%d First handshake reply word=%08x%s\n", 4894 ioc->name, t, le32_to_cpu(*(u32 *)hs_reply), 4895 failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); 4896 4897 /* 4898 * If no error (and IOC said MsgLength is > 0), piece together 4899 * reply 16 bits at a time. 4900 */ 4901 for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) { 4902 if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) 4903 failcnt++; 4904 hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); 4905 /* don't overflow our IOC hs_reply[] buffer! */ 4906 if (u16cnt < ARRAY_SIZE(ioc->hs_reply)) 4907 hs_reply[u16cnt] = hword; 4908 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4909 } 4910 4911 if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) 4912 failcnt++; 4913 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4914 4915 if (failcnt) { 4916 printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n", 4917 ioc->name); 4918 return -failcnt; 4919 } 4920 #if 0 4921 else if (u16cnt != (2 * mptReply->MsgLength)) { 4922 return -101; 4923 } 4924 else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4925 return -102; 4926 } 4927 #endif 4928 4929 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got Handshake reply:\n", ioc->name)); 4930 DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mptReply); 4931 4932 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n", 4933 ioc->name, t, u16cnt/2)); 4934 return u16cnt/2; 4935 } 4936 4937 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4938 /** 4939 * GetLanConfigPages - Fetch LANConfig pages. 4940 * @ioc: Pointer to MPT_ADAPTER structure 4941 * 4942 * Return: 0 for success 4943 * -ENOMEM if no memory available 4944 * -EPERM if not allowed due to ISR context 4945 * -EAGAIN if no msg frames currently available 4946 * -EFAULT for non-successful reply or no reply (timeout) 4947 */ 4948 static int 4949 GetLanConfigPages(MPT_ADAPTER *ioc) 4950 { 4951 ConfigPageHeader_t hdr; 4952 CONFIGPARMS cfg; 4953 LANPage0_t *ppage0_alloc; 4954 dma_addr_t page0_dma; 4955 LANPage1_t *ppage1_alloc; 4956 dma_addr_t page1_dma; 4957 int rc = 0; 4958 int data_sz; 4959 int copy_sz; 4960 4961 /* Get LAN Page 0 header */ 4962 hdr.PageVersion = 0; 4963 hdr.PageLength = 0; 4964 hdr.PageNumber = 0; 4965 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; 4966 cfg.cfghdr.hdr = &hdr; 4967 cfg.physAddr = -1; 4968 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4969 cfg.dir = 0; 4970 cfg.pageAddr = 0; 4971 cfg.timeout = 0; 4972 4973 if ((rc = mpt_config(ioc, &cfg)) != 0) 4974 return rc; 4975 4976 if (hdr.PageLength > 0) { 4977 data_sz = hdr.PageLength * 4; 4978 ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); 4979 rc = -ENOMEM; 4980 if (ppage0_alloc) { 4981 memset((u8 *)ppage0_alloc, 0, data_sz); 4982 cfg.physAddr = page0_dma; 4983 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 4984 4985 if ((rc = mpt_config(ioc, &cfg)) == 0) { 4986 /* save the data */ 4987 copy_sz = min_t(int, sizeof(LANPage0_t), data_sz); 4988 memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz); 4989 4990 } 4991 4992 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); 4993 4994 /* FIXME! 4995 * Normalize endianness of structure data, 4996 * by byte-swapping all > 1 byte fields! 4997 */ 4998 4999 } 5000 5001 if (rc) 5002 return rc; 5003 } 5004 5005 /* Get LAN Page 1 header */ 5006 hdr.PageVersion = 0; 5007 hdr.PageLength = 0; 5008 hdr.PageNumber = 1; 5009 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; 5010 cfg.cfghdr.hdr = &hdr; 5011 cfg.physAddr = -1; 5012 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5013 cfg.dir = 0; 5014 cfg.pageAddr = 0; 5015 5016 if ((rc = mpt_config(ioc, &cfg)) != 0) 5017 return rc; 5018 5019 if (hdr.PageLength == 0) 5020 return 0; 5021 5022 data_sz = hdr.PageLength * 4; 5023 rc = -ENOMEM; 5024 ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma); 5025 if (ppage1_alloc) { 5026 memset((u8 *)ppage1_alloc, 0, data_sz); 5027 cfg.physAddr = page1_dma; 5028 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5029 5030 if ((rc = mpt_config(ioc, &cfg)) == 0) { 5031 /* save the data */ 5032 copy_sz = min_t(int, sizeof(LANPage1_t), data_sz); 5033 memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz); 5034 } 5035 5036 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma); 5037 5038 /* FIXME! 5039 * Normalize endianness of structure data, 5040 * by byte-swapping all > 1 byte fields! 5041 */ 5042 5043 } 5044 5045 return rc; 5046 } 5047 5048 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5049 /** 5050 * mptbase_sas_persist_operation - Perform operation on SAS Persistent Table 5051 * @ioc: Pointer to MPT_ADAPTER structure 5052 * @persist_opcode: see below 5053 * 5054 * =============================== ====================================== 5055 * MPI_SAS_OP_CLEAR_NOT_PRESENT Free all persist TargetID mappings for 5056 * devices not currently present. 5057 * MPI_SAS_OP_CLEAR_ALL_PERSISTENT Clear al persist TargetID mappings 5058 * =============================== ====================================== 5059 * 5060 * NOTE: Don't use not this function during interrupt time. 5061 * 5062 * Returns 0 for success, non-zero error 5063 */ 5064 5065 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5066 int 5067 mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) 5068 { 5069 SasIoUnitControlRequest_t *sasIoUnitCntrReq; 5070 SasIoUnitControlReply_t *sasIoUnitCntrReply; 5071 MPT_FRAME_HDR *mf = NULL; 5072 MPIHeader_t *mpi_hdr; 5073 int ret = 0; 5074 unsigned long timeleft; 5075 5076 mutex_lock(&ioc->mptbase_cmds.mutex); 5077 5078 /* init the internal cmd struct */ 5079 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); 5080 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status) 5081 5082 /* insure garbage is not sent to fw */ 5083 switch(persist_opcode) { 5084 5085 case MPI_SAS_OP_CLEAR_NOT_PRESENT: 5086 case MPI_SAS_OP_CLEAR_ALL_PERSISTENT: 5087 break; 5088 5089 default: 5090 ret = -1; 5091 goto out; 5092 } 5093 5094 printk(KERN_DEBUG "%s: persist_opcode=%x\n", 5095 __func__, persist_opcode); 5096 5097 /* Get a MF for this command. 5098 */ 5099 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5100 printk(KERN_DEBUG "%s: no msg frames!\n", __func__); 5101 ret = -1; 5102 goto out; 5103 } 5104 5105 mpi_hdr = (MPIHeader_t *) mf; 5106 sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf; 5107 memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t)); 5108 sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL; 5109 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; 5110 sasIoUnitCntrReq->Operation = persist_opcode; 5111 5112 mpt_put_msg_frame(mpt_base_index, ioc, mf); 5113 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ); 5114 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { 5115 ret = -ETIME; 5116 printk(KERN_DEBUG "%s: failed\n", __func__); 5117 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) 5118 goto out; 5119 if (!timeleft) { 5120 printk(MYIOC_s_WARN_FMT 5121 "Issuing Reset from %s!!, doorbell=0x%08x\n", 5122 ioc->name, __func__, mpt_GetIocState(ioc, 0)); 5123 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); 5124 mpt_free_msg_frame(ioc, mf); 5125 } 5126 goto out; 5127 } 5128 5129 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { 5130 ret = -1; 5131 goto out; 5132 } 5133 5134 sasIoUnitCntrReply = 5135 (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply; 5136 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 5137 printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 5138 __func__, sasIoUnitCntrReply->IOCStatus, 5139 sasIoUnitCntrReply->IOCLogInfo); 5140 printk(KERN_DEBUG "%s: failed\n", __func__); 5141 ret = -1; 5142 } else 5143 printk(KERN_DEBUG "%s: success\n", __func__); 5144 out: 5145 5146 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) 5147 mutex_unlock(&ioc->mptbase_cmds.mutex); 5148 return ret; 5149 } 5150 5151 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5152 5153 static void 5154 mptbase_raid_process_event_data(MPT_ADAPTER *ioc, 5155 MpiEventDataRaid_t * pRaidEventData) 5156 { 5157 int volume; 5158 int reason; 5159 int disk; 5160 int status; 5161 int flags; 5162 int state; 5163 5164 volume = pRaidEventData->VolumeID; 5165 reason = pRaidEventData->ReasonCode; 5166 disk = pRaidEventData->PhysDiskNum; 5167 status = le32_to_cpu(pRaidEventData->SettingsStatus); 5168 flags = (status >> 0) & 0xff; 5169 state = (status >> 8) & 0xff; 5170 5171 if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) { 5172 return; 5173 } 5174 5175 if ((reason >= MPI_EVENT_RAID_RC_PHYSDISK_CREATED && 5176 reason <= MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED) || 5177 (reason == MPI_EVENT_RAID_RC_SMART_DATA)) { 5178 printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for PhysDisk %d id=%d\n", 5179 ioc->name, disk, volume); 5180 } else { 5181 printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for VolumeID %d\n", 5182 ioc->name, volume); 5183 } 5184 5185 switch(reason) { 5186 case MPI_EVENT_RAID_RC_VOLUME_CREATED: 5187 printk(MYIOC_s_INFO_FMT " volume has been created\n", 5188 ioc->name); 5189 break; 5190 5191 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 5192 5193 printk(MYIOC_s_INFO_FMT " volume has been deleted\n", 5194 ioc->name); 5195 break; 5196 5197 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED: 5198 printk(MYIOC_s_INFO_FMT " volume settings have been changed\n", 5199 ioc->name); 5200 break; 5201 5202 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 5203 printk(MYIOC_s_INFO_FMT " volume is now %s%s%s%s\n", 5204 ioc->name, 5205 state == MPI_RAIDVOL0_STATUS_STATE_OPTIMAL 5206 ? "optimal" 5207 : state == MPI_RAIDVOL0_STATUS_STATE_DEGRADED 5208 ? "degraded" 5209 : state == MPI_RAIDVOL0_STATUS_STATE_FAILED 5210 ? "failed" 5211 : "state unknown", 5212 flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED 5213 ? ", enabled" : "", 5214 flags & MPI_RAIDVOL0_STATUS_FLAG_QUIESCED 5215 ? ", quiesced" : "", 5216 flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS 5217 ? ", resync in progress" : "" ); 5218 break; 5219 5220 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED: 5221 printk(MYIOC_s_INFO_FMT " volume membership of PhysDisk %d has changed\n", 5222 ioc->name, disk); 5223 break; 5224 5225 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: 5226 printk(MYIOC_s_INFO_FMT " PhysDisk has been created\n", 5227 ioc->name); 5228 break; 5229 5230 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 5231 printk(MYIOC_s_INFO_FMT " PhysDisk has been deleted\n", 5232 ioc->name); 5233 break; 5234 5235 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED: 5236 printk(MYIOC_s_INFO_FMT " PhysDisk settings have been changed\n", 5237 ioc->name); 5238 break; 5239 5240 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: 5241 printk(MYIOC_s_INFO_FMT " PhysDisk is now %s%s%s\n", 5242 ioc->name, 5243 state == MPI_PHYSDISK0_STATUS_ONLINE 5244 ? "online" 5245 : state == MPI_PHYSDISK0_STATUS_MISSING 5246 ? "missing" 5247 : state == MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE 5248 ? "not compatible" 5249 : state == MPI_PHYSDISK0_STATUS_FAILED 5250 ? "failed" 5251 : state == MPI_PHYSDISK0_STATUS_INITIALIZING 5252 ? "initializing" 5253 : state == MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED 5254 ? "offline requested" 5255 : state == MPI_PHYSDISK0_STATUS_FAILED_REQUESTED 5256 ? "failed requested" 5257 : state == MPI_PHYSDISK0_STATUS_OTHER_OFFLINE 5258 ? "offline" 5259 : "state unknown", 5260 flags & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC 5261 ? ", out of sync" : "", 5262 flags & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED 5263 ? ", quiesced" : "" ); 5264 break; 5265 5266 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED: 5267 printk(MYIOC_s_INFO_FMT " Domain Validation needed for PhysDisk %d\n", 5268 ioc->name, disk); 5269 break; 5270 5271 case MPI_EVENT_RAID_RC_SMART_DATA: 5272 printk(MYIOC_s_INFO_FMT " SMART data received, ASC/ASCQ = %02xh/%02xh\n", 5273 ioc->name, pRaidEventData->ASC, pRaidEventData->ASCQ); 5274 break; 5275 5276 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED: 5277 printk(MYIOC_s_INFO_FMT " replacement of PhysDisk %d has started\n", 5278 ioc->name, disk); 5279 break; 5280 } 5281 } 5282 5283 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5284 /** 5285 * GetIoUnitPage2 - Retrieve BIOS version and boot order information. 5286 * @ioc: Pointer to MPT_ADAPTER structure 5287 * 5288 * Returns: 0 for success 5289 * -ENOMEM if no memory available 5290 * -EPERM if not allowed due to ISR context 5291 * -EAGAIN if no msg frames currently available 5292 * -EFAULT for non-successful reply or no reply (timeout) 5293 */ 5294 static int 5295 GetIoUnitPage2(MPT_ADAPTER *ioc) 5296 { 5297 ConfigPageHeader_t hdr; 5298 CONFIGPARMS cfg; 5299 IOUnitPage2_t *ppage_alloc; 5300 dma_addr_t page_dma; 5301 int data_sz; 5302 int rc; 5303 5304 /* Get the page header */ 5305 hdr.PageVersion = 0; 5306 hdr.PageLength = 0; 5307 hdr.PageNumber = 2; 5308 hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT; 5309 cfg.cfghdr.hdr = &hdr; 5310 cfg.physAddr = -1; 5311 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5312 cfg.dir = 0; 5313 cfg.pageAddr = 0; 5314 cfg.timeout = 0; 5315 5316 if ((rc = mpt_config(ioc, &cfg)) != 0) 5317 return rc; 5318 5319 if (hdr.PageLength == 0) 5320 return 0; 5321 5322 /* Read the config page */ 5323 data_sz = hdr.PageLength * 4; 5324 rc = -ENOMEM; 5325 ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); 5326 if (ppage_alloc) { 5327 memset((u8 *)ppage_alloc, 0, data_sz); 5328 cfg.physAddr = page_dma; 5329 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5330 5331 /* If Good, save data */ 5332 if ((rc = mpt_config(ioc, &cfg)) == 0) 5333 ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion); 5334 5335 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma); 5336 } 5337 5338 return rc; 5339 } 5340 5341 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5342 /** 5343 * mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2 5344 * @ioc: Pointer to a Adapter Strucutre 5345 * @portnum: IOC port number 5346 * 5347 * Return: -EFAULT if read of config page header fails 5348 * or if no nvram 5349 * If read of SCSI Port Page 0 fails, 5350 * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) 5351 * Adapter settings: async, narrow 5352 * Return 1 5353 * If read of SCSI Port Page 2 fails, 5354 * Adapter settings valid 5355 * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) 5356 * Return 1 5357 * Else 5358 * Both valid 5359 * Return 0 5360 * CHECK - what type of locking mechanisms should be used???? 5361 */ 5362 static int 5363 mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum) 5364 { 5365 u8 *pbuf; 5366 dma_addr_t buf_dma; 5367 CONFIGPARMS cfg; 5368 ConfigPageHeader_t header; 5369 int ii; 5370 int data, rc = 0; 5371 5372 /* Allocate memory 5373 */ 5374 if (!ioc->spi_data.nvram) { 5375 int sz; 5376 u8 *mem; 5377 sz = MPT_MAX_SCSI_DEVICES * sizeof(int); 5378 mem = kmalloc(sz, GFP_ATOMIC); 5379 if (mem == NULL) 5380 return -EFAULT; 5381 5382 ioc->spi_data.nvram = (int *) mem; 5383 5384 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SCSI device NVRAM settings @ %p, sz=%d\n", 5385 ioc->name, ioc->spi_data.nvram, sz)); 5386 } 5387 5388 /* Invalidate NVRAM information 5389 */ 5390 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 5391 ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID; 5392 } 5393 5394 /* Read SPP0 header, allocate memory, then read page. 5395 */ 5396 header.PageVersion = 0; 5397 header.PageLength = 0; 5398 header.PageNumber = 0; 5399 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; 5400 cfg.cfghdr.hdr = &header; 5401 cfg.physAddr = -1; 5402 cfg.pageAddr = portnum; 5403 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5404 cfg.dir = 0; 5405 cfg.timeout = 0; /* use default */ 5406 if (mpt_config(ioc, &cfg) != 0) 5407 return -EFAULT; 5408 5409 if (header.PageLength > 0) { 5410 pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); 5411 if (pbuf) { 5412 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5413 cfg.physAddr = buf_dma; 5414 if (mpt_config(ioc, &cfg) != 0) { 5415 ioc->spi_data.maxBusWidth = MPT_NARROW; 5416 ioc->spi_data.maxSyncOffset = 0; 5417 ioc->spi_data.minSyncFactor = MPT_ASYNC; 5418 ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN; 5419 rc = 1; 5420 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 5421 "Unable to read PortPage0 minSyncFactor=%x\n", 5422 ioc->name, ioc->spi_data.minSyncFactor)); 5423 } else { 5424 /* Save the Port Page 0 data 5425 */ 5426 SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf; 5427 pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities); 5428 pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface); 5429 5430 if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) { 5431 ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS; 5432 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 5433 "noQas due to Capabilities=%x\n", 5434 ioc->name, pPP0->Capabilities)); 5435 } 5436 ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0; 5437 data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK; 5438 if (data) { 5439 ioc->spi_data.maxSyncOffset = (u8) (data >> 16); 5440 data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK; 5441 ioc->spi_data.minSyncFactor = (u8) (data >> 8); 5442 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 5443 "PortPage0 minSyncFactor=%x\n", 5444 ioc->name, ioc->spi_data.minSyncFactor)); 5445 } else { 5446 ioc->spi_data.maxSyncOffset = 0; 5447 ioc->spi_data.minSyncFactor = MPT_ASYNC; 5448 } 5449 5450 ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK; 5451 5452 /* Update the minSyncFactor based on bus type. 5453 */ 5454 if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) || 5455 (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) { 5456 5457 if (ioc->spi_data.minSyncFactor < MPT_ULTRA) { 5458 ioc->spi_data.minSyncFactor = MPT_ULTRA; 5459 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 5460 "HVD or SE detected, minSyncFactor=%x\n", 5461 ioc->name, ioc->spi_data.minSyncFactor)); 5462 } 5463 } 5464 } 5465 if (pbuf) { 5466 pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); 5467 } 5468 } 5469 } 5470 5471 /* SCSI Port Page 2 - Read the header then the page. 5472 */ 5473 header.PageVersion = 0; 5474 header.PageLength = 0; 5475 header.PageNumber = 2; 5476 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; 5477 cfg.cfghdr.hdr = &header; 5478 cfg.physAddr = -1; 5479 cfg.pageAddr = portnum; 5480 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5481 cfg.dir = 0; 5482 if (mpt_config(ioc, &cfg) != 0) 5483 return -EFAULT; 5484 5485 if (header.PageLength > 0) { 5486 /* Allocate memory and read SCSI Port Page 2 5487 */ 5488 pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); 5489 if (pbuf) { 5490 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM; 5491 cfg.physAddr = buf_dma; 5492 if (mpt_config(ioc, &cfg) != 0) { 5493 /* Nvram data is left with INVALID mark 5494 */ 5495 rc = 1; 5496 } else if (ioc->pcidev->vendor == PCI_VENDOR_ID_ATTO) { 5497 5498 /* This is an ATTO adapter, read Page2 accordingly 5499 */ 5500 ATTO_SCSIPortPage2_t *pPP2 = (ATTO_SCSIPortPage2_t *) pbuf; 5501 ATTODeviceInfo_t *pdevice = NULL; 5502 u16 ATTOFlags; 5503 5504 /* Save the Port Page 2 data 5505 * (reformat into a 32bit quantity) 5506 */ 5507 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 5508 pdevice = &pPP2->DeviceSettings[ii]; 5509 ATTOFlags = le16_to_cpu(pdevice->ATTOFlags); 5510 data = 0; 5511 5512 /* Translate ATTO device flags to LSI format 5513 */ 5514 if (ATTOFlags & ATTOFLAG_DISC) 5515 data |= (MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE); 5516 if (ATTOFlags & ATTOFLAG_ID_ENB) 5517 data |= (MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE); 5518 if (ATTOFlags & ATTOFLAG_LUN_ENB) 5519 data |= (MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE); 5520 if (ATTOFlags & ATTOFLAG_TAGGED) 5521 data |= (MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE); 5522 if (!(ATTOFlags & ATTOFLAG_WIDE_ENB)) 5523 data |= (MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE); 5524 5525 data = (data << 16) | (pdevice->Period << 8) | 10; 5526 ioc->spi_data.nvram[ii] = data; 5527 } 5528 } else { 5529 SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf; 5530 MpiDeviceInfo_t *pdevice = NULL; 5531 5532 /* 5533 * Save "Set to Avoid SCSI Bus Resets" flag 5534 */ 5535 ioc->spi_data.bus_reset = 5536 (le32_to_cpu(pPP2->PortFlags) & 5537 MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ? 5538 0 : 1 ; 5539 5540 /* Save the Port Page 2 data 5541 * (reformat into a 32bit quantity) 5542 */ 5543 data = le32_to_cpu(pPP2->PortFlags) & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 5544 ioc->spi_data.PortFlags = data; 5545 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 5546 pdevice = &pPP2->DeviceSettings[ii]; 5547 data = (le16_to_cpu(pdevice->DeviceFlags) << 16) | 5548 (pdevice->SyncFactor << 8) | pdevice->Timeout; 5549 ioc->spi_data.nvram[ii] = data; 5550 } 5551 } 5552 5553 pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); 5554 } 5555 } 5556 5557 /* Update Adapter limits with those from NVRAM 5558 * Comment: Don't need to do this. Target performance 5559 * parameters will never exceed the adapters limits. 5560 */ 5561 5562 return rc; 5563 } 5564 5565 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5566 /** 5567 * mpt_readScsiDevicePageHeaders - save version and length of SDP1 5568 * @ioc: Pointer to a Adapter Strucutre 5569 * @portnum: IOC port number 5570 * 5571 * Return: -EFAULT if read of config page header fails 5572 * or 0 if success. 5573 */ 5574 static int 5575 mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum) 5576 { 5577 CONFIGPARMS cfg; 5578 ConfigPageHeader_t header; 5579 5580 /* Read the SCSI Device Page 1 header 5581 */ 5582 header.PageVersion = 0; 5583 header.PageLength = 0; 5584 header.PageNumber = 1; 5585 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; 5586 cfg.cfghdr.hdr = &header; 5587 cfg.physAddr = -1; 5588 cfg.pageAddr = portnum; 5589 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5590 cfg.dir = 0; 5591 cfg.timeout = 0; 5592 if (mpt_config(ioc, &cfg) != 0) 5593 return -EFAULT; 5594 5595 ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion; 5596 ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength; 5597 5598 header.PageVersion = 0; 5599 header.PageLength = 0; 5600 header.PageNumber = 0; 5601 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; 5602 if (mpt_config(ioc, &cfg) != 0) 5603 return -EFAULT; 5604 5605 ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion; 5606 ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength; 5607 5608 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 0: version %d length %d\n", 5609 ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length)); 5610 5611 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 1: version %d length %d\n", 5612 ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length)); 5613 return 0; 5614 } 5615 5616 /** 5617 * mpt_inactive_raid_list_free - This clears this link list. 5618 * @ioc : pointer to per adapter structure 5619 **/ 5620 static void 5621 mpt_inactive_raid_list_free(MPT_ADAPTER *ioc) 5622 { 5623 struct inactive_raid_component_info *component_info, *pNext; 5624 5625 if (list_empty(&ioc->raid_data.inactive_list)) 5626 return; 5627 5628 mutex_lock(&ioc->raid_data.inactive_list_mutex); 5629 list_for_each_entry_safe(component_info, pNext, 5630 &ioc->raid_data.inactive_list, list) { 5631 list_del(&component_info->list); 5632 kfree(component_info); 5633 } 5634 mutex_unlock(&ioc->raid_data.inactive_list_mutex); 5635 } 5636 5637 /** 5638 * mpt_inactive_raid_volumes - sets up link list of phy_disk_nums for devices belonging in an inactive volume 5639 * 5640 * @ioc : pointer to per adapter structure 5641 * @channel : volume channel 5642 * @id : volume target id 5643 **/ 5644 static void 5645 mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id) 5646 { 5647 CONFIGPARMS cfg; 5648 ConfigPageHeader_t hdr; 5649 dma_addr_t dma_handle; 5650 pRaidVolumePage0_t buffer = NULL; 5651 int i; 5652 RaidPhysDiskPage0_t phys_disk; 5653 struct inactive_raid_component_info *component_info; 5654 int handle_inactive_volumes; 5655 5656 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5657 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5658 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME; 5659 cfg.pageAddr = (channel << 8) + id; 5660 cfg.cfghdr.hdr = &hdr; 5661 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5662 5663 if (mpt_config(ioc, &cfg) != 0) 5664 goto out; 5665 5666 if (!hdr.PageLength) 5667 goto out; 5668 5669 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5670 &dma_handle); 5671 5672 if (!buffer) 5673 goto out; 5674 5675 cfg.physAddr = dma_handle; 5676 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5677 5678 if (mpt_config(ioc, &cfg) != 0) 5679 goto out; 5680 5681 if (!buffer->NumPhysDisks) 5682 goto out; 5683 5684 handle_inactive_volumes = 5685 (buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE || 5686 (buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED) == 0 || 5687 buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_FAILED || 5688 buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_MISSING) ? 1 : 0; 5689 5690 if (!handle_inactive_volumes) 5691 goto out; 5692 5693 mutex_lock(&ioc->raid_data.inactive_list_mutex); 5694 for (i = 0; i < buffer->NumPhysDisks; i++) { 5695 if(mpt_raid_phys_disk_pg0(ioc, 5696 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) 5697 continue; 5698 5699 if ((component_info = kmalloc(sizeof (*component_info), 5700 GFP_KERNEL)) == NULL) 5701 continue; 5702 5703 component_info->volumeID = id; 5704 component_info->volumeBus = channel; 5705 component_info->d.PhysDiskNum = phys_disk.PhysDiskNum; 5706 component_info->d.PhysDiskBus = phys_disk.PhysDiskBus; 5707 component_info->d.PhysDiskID = phys_disk.PhysDiskID; 5708 component_info->d.PhysDiskIOC = phys_disk.PhysDiskIOC; 5709 5710 list_add_tail(&component_info->list, 5711 &ioc->raid_data.inactive_list); 5712 } 5713 mutex_unlock(&ioc->raid_data.inactive_list_mutex); 5714 5715 out: 5716 if (buffer) 5717 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5718 dma_handle); 5719 } 5720 5721 /** 5722 * mpt_raid_phys_disk_pg0 - returns phys disk page zero 5723 * @ioc: Pointer to a Adapter Structure 5724 * @phys_disk_num: io unit unique phys disk num generated by the ioc 5725 * @phys_disk: requested payload data returned 5726 * 5727 * Return: 5728 * 0 on success 5729 * -EFAULT if read of config page header fails or data pointer not NULL 5730 * -ENOMEM if pci_alloc failed 5731 **/ 5732 int 5733 mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, 5734 RaidPhysDiskPage0_t *phys_disk) 5735 { 5736 CONFIGPARMS cfg; 5737 ConfigPageHeader_t hdr; 5738 dma_addr_t dma_handle; 5739 pRaidPhysDiskPage0_t buffer = NULL; 5740 int rc; 5741 5742 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5743 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5744 memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t)); 5745 5746 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION; 5747 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5748 cfg.cfghdr.hdr = &hdr; 5749 cfg.physAddr = -1; 5750 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5751 5752 if (mpt_config(ioc, &cfg) != 0) { 5753 rc = -EFAULT; 5754 goto out; 5755 } 5756 5757 if (!hdr.PageLength) { 5758 rc = -EFAULT; 5759 goto out; 5760 } 5761 5762 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5763 &dma_handle); 5764 5765 if (!buffer) { 5766 rc = -ENOMEM; 5767 goto out; 5768 } 5769 5770 cfg.physAddr = dma_handle; 5771 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5772 cfg.pageAddr = phys_disk_num; 5773 5774 if (mpt_config(ioc, &cfg) != 0) { 5775 rc = -EFAULT; 5776 goto out; 5777 } 5778 5779 rc = 0; 5780 memcpy(phys_disk, buffer, sizeof(*buffer)); 5781 phys_disk->MaxLBA = le32_to_cpu(buffer->MaxLBA); 5782 5783 out: 5784 5785 if (buffer) 5786 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5787 dma_handle); 5788 5789 return rc; 5790 } 5791 5792 /** 5793 * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num 5794 * @ioc: Pointer to a Adapter Structure 5795 * @phys_disk_num: io unit unique phys disk num generated by the ioc 5796 * 5797 * Return: 5798 * returns number paths 5799 **/ 5800 int 5801 mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num) 5802 { 5803 CONFIGPARMS cfg; 5804 ConfigPageHeader_t hdr; 5805 dma_addr_t dma_handle; 5806 pRaidPhysDiskPage1_t buffer = NULL; 5807 int rc; 5808 5809 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5810 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5811 5812 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION; 5813 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5814 hdr.PageNumber = 1; 5815 cfg.cfghdr.hdr = &hdr; 5816 cfg.physAddr = -1; 5817 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5818 5819 if (mpt_config(ioc, &cfg) != 0) { 5820 rc = 0; 5821 goto out; 5822 } 5823 5824 if (!hdr.PageLength) { 5825 rc = 0; 5826 goto out; 5827 } 5828 5829 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5830 &dma_handle); 5831 5832 if (!buffer) { 5833 rc = 0; 5834 goto out; 5835 } 5836 5837 cfg.physAddr = dma_handle; 5838 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5839 cfg.pageAddr = phys_disk_num; 5840 5841 if (mpt_config(ioc, &cfg) != 0) { 5842 rc = 0; 5843 goto out; 5844 } 5845 5846 rc = buffer->NumPhysDiskPaths; 5847 out: 5848 5849 if (buffer) 5850 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5851 dma_handle); 5852 5853 return rc; 5854 } 5855 EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths); 5856 5857 /** 5858 * mpt_raid_phys_disk_pg1 - returns phys disk page 1 5859 * @ioc: Pointer to a Adapter Structure 5860 * @phys_disk_num: io unit unique phys disk num generated by the ioc 5861 * @phys_disk: requested payload data returned 5862 * 5863 * Return: 5864 * 0 on success 5865 * -EFAULT if read of config page header fails or data pointer not NULL 5866 * -ENOMEM if pci_alloc failed 5867 **/ 5868 int 5869 mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, 5870 RaidPhysDiskPage1_t *phys_disk) 5871 { 5872 CONFIGPARMS cfg; 5873 ConfigPageHeader_t hdr; 5874 dma_addr_t dma_handle; 5875 pRaidPhysDiskPage1_t buffer = NULL; 5876 int rc; 5877 int i; 5878 __le64 sas_address; 5879 5880 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5881 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5882 rc = 0; 5883 5884 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION; 5885 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5886 hdr.PageNumber = 1; 5887 cfg.cfghdr.hdr = &hdr; 5888 cfg.physAddr = -1; 5889 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5890 5891 if (mpt_config(ioc, &cfg) != 0) { 5892 rc = -EFAULT; 5893 goto out; 5894 } 5895 5896 if (!hdr.PageLength) { 5897 rc = -EFAULT; 5898 goto out; 5899 } 5900 5901 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5902 &dma_handle); 5903 5904 if (!buffer) { 5905 rc = -ENOMEM; 5906 goto out; 5907 } 5908 5909 cfg.physAddr = dma_handle; 5910 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5911 cfg.pageAddr = phys_disk_num; 5912 5913 if (mpt_config(ioc, &cfg) != 0) { 5914 rc = -EFAULT; 5915 goto out; 5916 } 5917 5918 phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths; 5919 phys_disk->PhysDiskNum = phys_disk_num; 5920 for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) { 5921 phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID; 5922 phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus; 5923 phys_disk->Path[i].OwnerIdentifier = 5924 buffer->Path[i].OwnerIdentifier; 5925 phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags); 5926 memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64)); 5927 sas_address = le64_to_cpu(sas_address); 5928 memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64)); 5929 memcpy(&sas_address, 5930 &buffer->Path[i].OwnerWWID, sizeof(__le64)); 5931 sas_address = le64_to_cpu(sas_address); 5932 memcpy(&phys_disk->Path[i].OwnerWWID, 5933 &sas_address, sizeof(__le64)); 5934 } 5935 5936 out: 5937 5938 if (buffer) 5939 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5940 dma_handle); 5941 5942 return rc; 5943 } 5944 EXPORT_SYMBOL(mpt_raid_phys_disk_pg1); 5945 5946 5947 /** 5948 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes 5949 * @ioc: Pointer to a Adapter Strucutre 5950 * 5951 * Return: 5952 * 0 on success 5953 * -EFAULT if read of config page header fails or data pointer not NULL 5954 * -ENOMEM if pci_alloc failed 5955 **/ 5956 int 5957 mpt_findImVolumes(MPT_ADAPTER *ioc) 5958 { 5959 IOCPage2_t *pIoc2; 5960 u8 *mem; 5961 dma_addr_t ioc2_dma; 5962 CONFIGPARMS cfg; 5963 ConfigPageHeader_t header; 5964 int rc = 0; 5965 int iocpage2sz; 5966 int i; 5967 5968 if (!ioc->ir_firmware) 5969 return 0; 5970 5971 /* Free the old page 5972 */ 5973 kfree(ioc->raid_data.pIocPg2); 5974 ioc->raid_data.pIocPg2 = NULL; 5975 mpt_inactive_raid_list_free(ioc); 5976 5977 /* Read IOCP2 header then the page. 5978 */ 5979 header.PageVersion = 0; 5980 header.PageLength = 0; 5981 header.PageNumber = 2; 5982 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 5983 cfg.cfghdr.hdr = &header; 5984 cfg.physAddr = -1; 5985 cfg.pageAddr = 0; 5986 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 5987 cfg.dir = 0; 5988 cfg.timeout = 0; 5989 if (mpt_config(ioc, &cfg) != 0) 5990 return -EFAULT; 5991 5992 if (header.PageLength == 0) 5993 return -EFAULT; 5994 5995 iocpage2sz = header.PageLength * 4; 5996 pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma); 5997 if (!pIoc2) 5998 return -ENOMEM; 5999 6000 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6001 cfg.physAddr = ioc2_dma; 6002 if (mpt_config(ioc, &cfg) != 0) 6003 goto out; 6004 6005 mem = kmemdup(pIoc2, iocpage2sz, GFP_KERNEL); 6006 if (!mem) { 6007 rc = -ENOMEM; 6008 goto out; 6009 } 6010 6011 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem; 6012 6013 mpt_read_ioc_pg_3(ioc); 6014 6015 for (i = 0; i < pIoc2->NumActiveVolumes ; i++) 6016 mpt_inactive_raid_volumes(ioc, 6017 pIoc2->RaidVolume[i].VolumeBus, 6018 pIoc2->RaidVolume[i].VolumeID); 6019 6020 out: 6021 pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma); 6022 6023 return rc; 6024 } 6025 6026 static int 6027 mpt_read_ioc_pg_3(MPT_ADAPTER *ioc) 6028 { 6029 IOCPage3_t *pIoc3; 6030 u8 *mem; 6031 CONFIGPARMS cfg; 6032 ConfigPageHeader_t header; 6033 dma_addr_t ioc3_dma; 6034 int iocpage3sz = 0; 6035 6036 /* Free the old page 6037 */ 6038 kfree(ioc->raid_data.pIocPg3); 6039 ioc->raid_data.pIocPg3 = NULL; 6040 6041 /* There is at least one physical disk. 6042 * Read and save IOC Page 3 6043 */ 6044 header.PageVersion = 0; 6045 header.PageLength = 0; 6046 header.PageNumber = 3; 6047 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 6048 cfg.cfghdr.hdr = &header; 6049 cfg.physAddr = -1; 6050 cfg.pageAddr = 0; 6051 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 6052 cfg.dir = 0; 6053 cfg.timeout = 0; 6054 if (mpt_config(ioc, &cfg) != 0) 6055 return 0; 6056 6057 if (header.PageLength == 0) 6058 return 0; 6059 6060 /* Read Header good, alloc memory 6061 */ 6062 iocpage3sz = header.PageLength * 4; 6063 pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma); 6064 if (!pIoc3) 6065 return 0; 6066 6067 /* Read the Page and save the data 6068 * into malloc'd memory. 6069 */ 6070 cfg.physAddr = ioc3_dma; 6071 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6072 if (mpt_config(ioc, &cfg) == 0) { 6073 mem = kmalloc(iocpage3sz, GFP_KERNEL); 6074 if (mem) { 6075 memcpy(mem, (u8 *)pIoc3, iocpage3sz); 6076 ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem; 6077 } 6078 } 6079 6080 pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma); 6081 6082 return 0; 6083 } 6084 6085 static void 6086 mpt_read_ioc_pg_4(MPT_ADAPTER *ioc) 6087 { 6088 IOCPage4_t *pIoc4; 6089 CONFIGPARMS cfg; 6090 ConfigPageHeader_t header; 6091 dma_addr_t ioc4_dma; 6092 int iocpage4sz; 6093 6094 /* Read and save IOC Page 4 6095 */ 6096 header.PageVersion = 0; 6097 header.PageLength = 0; 6098 header.PageNumber = 4; 6099 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 6100 cfg.cfghdr.hdr = &header; 6101 cfg.physAddr = -1; 6102 cfg.pageAddr = 0; 6103 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 6104 cfg.dir = 0; 6105 cfg.timeout = 0; 6106 if (mpt_config(ioc, &cfg) != 0) 6107 return; 6108 6109 if (header.PageLength == 0) 6110 return; 6111 6112 if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) { 6113 iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */ 6114 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); 6115 if (!pIoc4) 6116 return; 6117 ioc->alloc_total += iocpage4sz; 6118 } else { 6119 ioc4_dma = ioc->spi_data.IocPg4_dma; 6120 iocpage4sz = ioc->spi_data.IocPg4Sz; 6121 } 6122 6123 /* Read the Page into dma memory. 6124 */ 6125 cfg.physAddr = ioc4_dma; 6126 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6127 if (mpt_config(ioc, &cfg) == 0) { 6128 ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4; 6129 ioc->spi_data.IocPg4_dma = ioc4_dma; 6130 ioc->spi_data.IocPg4Sz = iocpage4sz; 6131 } else { 6132 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); 6133 ioc->spi_data.pIocPg4 = NULL; 6134 ioc->alloc_total -= iocpage4sz; 6135 } 6136 } 6137 6138 static void 6139 mpt_read_ioc_pg_1(MPT_ADAPTER *ioc) 6140 { 6141 IOCPage1_t *pIoc1; 6142 CONFIGPARMS cfg; 6143 ConfigPageHeader_t header; 6144 dma_addr_t ioc1_dma; 6145 int iocpage1sz = 0; 6146 u32 tmp; 6147 6148 /* Check the Coalescing Timeout in IOC Page 1 6149 */ 6150 header.PageVersion = 0; 6151 header.PageLength = 0; 6152 header.PageNumber = 1; 6153 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 6154 cfg.cfghdr.hdr = &header; 6155 cfg.physAddr = -1; 6156 cfg.pageAddr = 0; 6157 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 6158 cfg.dir = 0; 6159 cfg.timeout = 0; 6160 if (mpt_config(ioc, &cfg) != 0) 6161 return; 6162 6163 if (header.PageLength == 0) 6164 return; 6165 6166 /* Read Header good, alloc memory 6167 */ 6168 iocpage1sz = header.PageLength * 4; 6169 pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma); 6170 if (!pIoc1) 6171 return; 6172 6173 /* Read the Page and check coalescing timeout 6174 */ 6175 cfg.physAddr = ioc1_dma; 6176 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6177 if (mpt_config(ioc, &cfg) == 0) { 6178 6179 tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING; 6180 if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) { 6181 tmp = le32_to_cpu(pIoc1->CoalescingTimeout); 6182 6183 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Coalescing Enabled Timeout = %d\n", 6184 ioc->name, tmp)); 6185 6186 if (tmp > MPT_COALESCING_TIMEOUT) { 6187 pIoc1->CoalescingTimeout = cpu_to_le32(MPT_COALESCING_TIMEOUT); 6188 6189 /* Write NVRAM and current 6190 */ 6191 cfg.dir = 1; 6192 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; 6193 if (mpt_config(ioc, &cfg) == 0) { 6194 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Reset Current Coalescing Timeout to = %d\n", 6195 ioc->name, MPT_COALESCING_TIMEOUT)); 6196 6197 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM; 6198 if (mpt_config(ioc, &cfg) == 0) { 6199 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6200 "Reset NVRAM Coalescing Timeout to = %d\n", 6201 ioc->name, MPT_COALESCING_TIMEOUT)); 6202 } else { 6203 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6204 "Reset NVRAM Coalescing Timeout Failed\n", 6205 ioc->name)); 6206 } 6207 6208 } else { 6209 dprintk(ioc, printk(MYIOC_s_WARN_FMT 6210 "Reset of Current Coalescing Timeout Failed!\n", 6211 ioc->name)); 6212 } 6213 } 6214 6215 } else { 6216 dprintk(ioc, printk(MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name)); 6217 } 6218 } 6219 6220 pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma); 6221 6222 return; 6223 } 6224 6225 static void 6226 mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc) 6227 { 6228 CONFIGPARMS cfg; 6229 ConfigPageHeader_t hdr; 6230 dma_addr_t buf_dma; 6231 ManufacturingPage0_t *pbuf = NULL; 6232 6233 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 6234 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 6235 6236 hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; 6237 cfg.cfghdr.hdr = &hdr; 6238 cfg.physAddr = -1; 6239 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 6240 cfg.timeout = 10; 6241 6242 if (mpt_config(ioc, &cfg) != 0) 6243 goto out; 6244 6245 if (!cfg.cfghdr.hdr->PageLength) 6246 goto out; 6247 6248 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6249 pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); 6250 if (!pbuf) 6251 goto out; 6252 6253 cfg.physAddr = buf_dma; 6254 6255 if (mpt_config(ioc, &cfg) != 0) 6256 goto out; 6257 6258 memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name)); 6259 memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly)); 6260 memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer)); 6261 6262 out: 6263 6264 if (pbuf) 6265 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); 6266 } 6267 6268 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6269 /** 6270 * SendEventNotification - Send EventNotification (on or off) request to adapter 6271 * @ioc: Pointer to MPT_ADAPTER structure 6272 * @EvSwitch: Event switch flags 6273 * @sleepFlag: Specifies whether the process can sleep 6274 */ 6275 static int 6276 SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag) 6277 { 6278 EventNotification_t evn; 6279 MPIDefaultReply_t reply_buf; 6280 6281 memset(&evn, 0, sizeof(EventNotification_t)); 6282 memset(&reply_buf, 0, sizeof(MPIDefaultReply_t)); 6283 6284 evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION; 6285 evn.Switch = EvSwitch; 6286 evn.MsgContext = cpu_to_le32(mpt_base_index << 16); 6287 6288 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6289 "Sending EventNotification (%d) request %p\n", 6290 ioc->name, EvSwitch, &evn)); 6291 6292 return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t), 6293 (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30, 6294 sleepFlag); 6295 } 6296 6297 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6298 /** 6299 * SendEventAck - Send EventAck request to MPT adapter. 6300 * @ioc: Pointer to MPT_ADAPTER structure 6301 * @evnp: Pointer to original EventNotification request 6302 */ 6303 static int 6304 SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp) 6305 { 6306 EventAck_t *pAck; 6307 6308 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6309 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 6310 ioc->name, __func__)); 6311 return -1; 6312 } 6313 6314 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventAck\n", ioc->name)); 6315 6316 pAck->Function = MPI_FUNCTION_EVENT_ACK; 6317 pAck->ChainOffset = 0; 6318 pAck->Reserved[0] = pAck->Reserved[1] = 0; 6319 pAck->MsgFlags = 0; 6320 pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0; 6321 pAck->Event = evnp->Event; 6322 pAck->EventContext = evnp->EventContext; 6323 6324 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck); 6325 6326 return 0; 6327 } 6328 6329 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6330 /** 6331 * mpt_config - Generic function to issue config message 6332 * @ioc: Pointer to an adapter structure 6333 * @pCfg: Pointer to a configuration structure. Struct contains 6334 * action, page address, direction, physical address 6335 * and pointer to a configuration page header 6336 * Page header is updated. 6337 * 6338 * Returns 0 for success 6339 * -EPERM if not allowed due to ISR context 6340 * -EAGAIN if no msg frames currently available 6341 * -EFAULT for non-successful reply or no reply (timeout) 6342 */ 6343 int 6344 mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 6345 { 6346 Config_t *pReq; 6347 ConfigReply_t *pReply; 6348 ConfigExtendedPageHeader_t *pExtHdr = NULL; 6349 MPT_FRAME_HDR *mf; 6350 int ii; 6351 int flagsLength; 6352 long timeout; 6353 int ret; 6354 u8 page_type = 0, extend_page; 6355 unsigned long timeleft; 6356 unsigned long flags; 6357 int in_isr; 6358 u8 issue_hard_reset = 0; 6359 u8 retry_count = 0; 6360 6361 /* Prevent calling wait_event() (below), if caller happens 6362 * to be in ISR context, because that is fatal! 6363 */ 6364 in_isr = in_interrupt(); 6365 if (in_isr) { 6366 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", 6367 ioc->name)); 6368 return -EPERM; 6369 } 6370 6371 /* don't send a config page during diag reset */ 6372 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 6373 if (ioc->ioc_reset_in_progress) { 6374 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6375 "%s: busy with host reset\n", ioc->name, __func__)); 6376 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6377 return -EBUSY; 6378 } 6379 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6380 6381 /* don't send if no chance of success */ 6382 if (!ioc->active || 6383 mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) { 6384 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6385 "%s: ioc not operational, %d, %xh\n", 6386 ioc->name, __func__, ioc->active, 6387 mpt_GetIocState(ioc, 0))); 6388 return -EFAULT; 6389 } 6390 6391 retry_config: 6392 mutex_lock(&ioc->mptbase_cmds.mutex); 6393 /* init the internal cmd struct */ 6394 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); 6395 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status) 6396 6397 /* Get and Populate a free Frame 6398 */ 6399 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6400 dcprintk(ioc, printk(MYIOC_s_WARN_FMT 6401 "mpt_config: no msg frames!\n", ioc->name)); 6402 ret = -EAGAIN; 6403 goto out; 6404 } 6405 6406 pReq = (Config_t *)mf; 6407 pReq->Action = pCfg->action; 6408 pReq->Reserved = 0; 6409 pReq->ChainOffset = 0; 6410 pReq->Function = MPI_FUNCTION_CONFIG; 6411 6412 /* Assume page type is not extended and clear "reserved" fields. */ 6413 pReq->ExtPageLength = 0; 6414 pReq->ExtPageType = 0; 6415 pReq->MsgFlags = 0; 6416 6417 for (ii=0; ii < 8; ii++) 6418 pReq->Reserved2[ii] = 0; 6419 6420 pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion; 6421 pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength; 6422 pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber; 6423 pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 6424 6425 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { 6426 pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr; 6427 pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength); 6428 pReq->ExtPageType = pExtHdr->ExtPageType; 6429 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 6430 6431 /* Page Length must be treated as a reserved field for the 6432 * extended header. 6433 */ 6434 pReq->Header.PageLength = 0; 6435 } 6436 6437 pReq->PageAddress = cpu_to_le32(pCfg->pageAddr); 6438 6439 /* Add a SGE to the config request. 6440 */ 6441 if (pCfg->dir) 6442 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; 6443 else 6444 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 6445 6446 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == 6447 MPI_CONFIG_PAGETYPE_EXTENDED) { 6448 flagsLength |= pExtHdr->ExtPageLength * 4; 6449 page_type = pReq->ExtPageType; 6450 extend_page = 1; 6451 } else { 6452 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; 6453 page_type = pReq->Header.PageType; 6454 extend_page = 0; 6455 } 6456 6457 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6458 "Sending Config request type 0x%x, page 0x%x and action %d\n", 6459 ioc->name, page_type, pReq->Header.PageNumber, pReq->Action)); 6460 6461 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); 6462 timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout; 6463 mpt_put_msg_frame(mpt_base_index, ioc, mf); 6464 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 6465 timeout); 6466 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { 6467 ret = -ETIME; 6468 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6469 "Failed Sending Config request type 0x%x, page 0x%x," 6470 " action %d, status %xh, time left %ld\n\n", 6471 ioc->name, page_type, pReq->Header.PageNumber, 6472 pReq->Action, ioc->mptbase_cmds.status, timeleft)); 6473 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) 6474 goto out; 6475 if (!timeleft) { 6476 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 6477 if (ioc->ioc_reset_in_progress) { 6478 spin_unlock_irqrestore(&ioc->taskmgmt_lock, 6479 flags); 6480 printk(MYIOC_s_INFO_FMT "%s: host reset in" 6481 " progress mpt_config timed out.!!\n", 6482 __func__, ioc->name); 6483 mutex_unlock(&ioc->mptbase_cmds.mutex); 6484 return -EFAULT; 6485 } 6486 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6487 issue_hard_reset = 1; 6488 } 6489 goto out; 6490 } 6491 6492 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { 6493 ret = -1; 6494 goto out; 6495 } 6496 pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply; 6497 ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; 6498 if (ret == MPI_IOCSTATUS_SUCCESS) { 6499 if (extend_page) { 6500 pCfg->cfghdr.ehdr->ExtPageLength = 6501 le16_to_cpu(pReply->ExtPageLength); 6502 pCfg->cfghdr.ehdr->ExtPageType = 6503 pReply->ExtPageType; 6504 } 6505 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion; 6506 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength; 6507 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber; 6508 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType; 6509 6510 } 6511 6512 if (retry_count) 6513 printk(MYIOC_s_INFO_FMT "Retry completed " 6514 "ret=0x%x timeleft=%ld\n", 6515 ioc->name, ret, timeleft); 6516 6517 dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n", 6518 ret, le32_to_cpu(pReply->IOCLogInfo))); 6519 6520 out: 6521 6522 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) 6523 mutex_unlock(&ioc->mptbase_cmds.mutex); 6524 if (issue_hard_reset) { 6525 issue_hard_reset = 0; 6526 printk(MYIOC_s_WARN_FMT 6527 "Issuing Reset from %s!!, doorbell=0x%08x\n", 6528 ioc->name, __func__, mpt_GetIocState(ioc, 0)); 6529 if (retry_count == 0) { 6530 if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0) 6531 retry_count++; 6532 } else 6533 mpt_HardResetHandler(ioc, CAN_SLEEP); 6534 6535 mpt_free_msg_frame(ioc, mf); 6536 /* attempt one retry for a timed out command */ 6537 if (retry_count < 2) { 6538 printk(MYIOC_s_INFO_FMT 6539 "Attempting Retry Config request" 6540 " type 0x%x, page 0x%x," 6541 " action %d\n", ioc->name, page_type, 6542 pCfg->cfghdr.hdr->PageNumber, pCfg->action); 6543 retry_count++; 6544 goto retry_config; 6545 } 6546 } 6547 return ret; 6548 6549 } 6550 6551 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6552 /** 6553 * mpt_ioc_reset - Base cleanup for hard reset 6554 * @ioc: Pointer to the adapter structure 6555 * @reset_phase: Indicates pre- or post-reset functionality 6556 * 6557 * Remark: Frees resources with internally generated commands. 6558 */ 6559 static int 6560 mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 6561 { 6562 switch (reset_phase) { 6563 case MPT_IOC_SETUP_RESET: 6564 ioc->taskmgmt_quiesce_io = 1; 6565 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6566 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); 6567 break; 6568 case MPT_IOC_PRE_RESET: 6569 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6570 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); 6571 break; 6572 case MPT_IOC_POST_RESET: 6573 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6574 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); 6575 /* wake up mptbase_cmds */ 6576 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) { 6577 ioc->mptbase_cmds.status |= 6578 MPT_MGMT_STATUS_DID_IOCRESET; 6579 complete(&ioc->mptbase_cmds.done); 6580 } 6581 /* wake up taskmgmt_cmds */ 6582 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { 6583 ioc->taskmgmt_cmds.status |= 6584 MPT_MGMT_STATUS_DID_IOCRESET; 6585 complete(&ioc->taskmgmt_cmds.done); 6586 } 6587 break; 6588 default: 6589 break; 6590 } 6591 6592 return 1; /* currently means nothing really */ 6593 } 6594 6595 6596 #ifdef CONFIG_PROC_FS /* { */ 6597 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6598 /* 6599 * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff... 6600 */ 6601 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6602 /** 6603 * procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries. 6604 * 6605 * Returns 0 for success, non-zero for failure. 6606 */ 6607 static int 6608 procmpt_create(void) 6609 { 6610 mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL); 6611 if (mpt_proc_root_dir == NULL) 6612 return -ENOTDIR; 6613 6614 proc_create_single("summary", S_IRUGO, mpt_proc_root_dir, 6615 mpt_summary_proc_show); 6616 proc_create_single("version", S_IRUGO, mpt_proc_root_dir, 6617 mpt_version_proc_show); 6618 return 0; 6619 } 6620 6621 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6622 /** 6623 * procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries. 6624 * 6625 * Returns 0 for success, non-zero for failure. 6626 */ 6627 static void 6628 procmpt_destroy(void) 6629 { 6630 remove_proc_entry("version", mpt_proc_root_dir); 6631 remove_proc_entry("summary", mpt_proc_root_dir); 6632 remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL); 6633 } 6634 6635 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6636 /* 6637 * Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary. 6638 */ 6639 static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan); 6640 6641 static int mpt_summary_proc_show(struct seq_file *m, void *v) 6642 { 6643 MPT_ADAPTER *ioc = m->private; 6644 6645 if (ioc) { 6646 seq_mpt_print_ioc_summary(ioc, m, 1); 6647 } else { 6648 list_for_each_entry(ioc, &ioc_list, list) { 6649 seq_mpt_print_ioc_summary(ioc, m, 1); 6650 } 6651 } 6652 6653 return 0; 6654 } 6655 6656 static int mpt_version_proc_show(struct seq_file *m, void *v) 6657 { 6658 u8 cb_idx; 6659 int scsi, fc, sas, lan, ctl, targ, dmp; 6660 char *drvname; 6661 6662 seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON); 6663 seq_printf(m, " Fusion MPT base driver\n"); 6664 6665 scsi = fc = sas = lan = ctl = targ = dmp = 0; 6666 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 6667 drvname = NULL; 6668 if (MptCallbacks[cb_idx]) { 6669 switch (MptDriverClass[cb_idx]) { 6670 case MPTSPI_DRIVER: 6671 if (!scsi++) drvname = "SPI host"; 6672 break; 6673 case MPTFC_DRIVER: 6674 if (!fc++) drvname = "FC host"; 6675 break; 6676 case MPTSAS_DRIVER: 6677 if (!sas++) drvname = "SAS host"; 6678 break; 6679 case MPTLAN_DRIVER: 6680 if (!lan++) drvname = "LAN"; 6681 break; 6682 case MPTSTM_DRIVER: 6683 if (!targ++) drvname = "SCSI target"; 6684 break; 6685 case MPTCTL_DRIVER: 6686 if (!ctl++) drvname = "ioctl"; 6687 break; 6688 } 6689 6690 if (drvname) 6691 seq_printf(m, " Fusion MPT %s driver\n", drvname); 6692 } 6693 } 6694 6695 return 0; 6696 } 6697 6698 static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) 6699 { 6700 MPT_ADAPTER *ioc = m->private; 6701 char expVer[32]; 6702 int sz; 6703 int p; 6704 6705 mpt_get_fw_exp_ver(expVer, ioc); 6706 6707 seq_printf(m, "%s:", ioc->name); 6708 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 6709 seq_printf(m, " (f/w download boot flag set)"); 6710 // if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL) 6711 // seq_printf(m, " CONFIG_CHECKSUM_FAIL!"); 6712 6713 seq_printf(m, "\n ProductID = 0x%04x (%s)\n", 6714 ioc->facts.ProductID, 6715 ioc->prod_name); 6716 seq_printf(m, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer); 6717 if (ioc->facts.FWImageSize) 6718 seq_printf(m, " (fw_size=%d)", ioc->facts.FWImageSize); 6719 seq_printf(m, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion); 6720 seq_printf(m, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit); 6721 seq_printf(m, " EventState = 0x%02x\n", ioc->facts.EventState); 6722 6723 seq_printf(m, " CurrentHostMfaHighAddr = 0x%08x\n", 6724 ioc->facts.CurrentHostMfaHighAddr); 6725 seq_printf(m, " CurrentSenseBufferHighAddr = 0x%08x\n", 6726 ioc->facts.CurrentSenseBufferHighAddr); 6727 6728 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); 6729 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); 6730 6731 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", 6732 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); 6733 /* 6734 * Rounding UP to nearest 4-kB boundary here... 6735 */ 6736 sz = (ioc->req_sz * ioc->req_depth) + 128; 6737 sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000; 6738 seq_printf(m, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n", 6739 ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz); 6740 seq_printf(m, " {MaxReqSz=%d} {MaxReqDepth=%d}\n", 6741 4*ioc->facts.RequestFrameSize, 6742 ioc->facts.GlobalCredits); 6743 6744 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n", 6745 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma); 6746 sz = (ioc->reply_sz * ioc->reply_depth) + 128; 6747 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", 6748 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); 6749 seq_printf(m, " {MaxRepSz=%d} {MaxRepDepth=%d}\n", 6750 ioc->facts.CurReplyFrameSize, 6751 ioc->facts.ReplyQueueDepth); 6752 6753 seq_printf(m, " MaxDevices = %d\n", 6754 (ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices); 6755 seq_printf(m, " MaxBuses = %d\n", ioc->facts.MaxBuses); 6756 6757 /* per-port info */ 6758 for (p=0; p < ioc->facts.NumberOfPorts; p++) { 6759 seq_printf(m, " PortNumber = %d (of %d)\n", 6760 p+1, 6761 ioc->facts.NumberOfPorts); 6762 if (ioc->bus_type == FC) { 6763 if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) { 6764 u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 6765 seq_printf(m, " LanAddr = %pMR\n", a); 6766 } 6767 seq_printf(m, " WWN = %08X%08X:%08X%08X\n", 6768 ioc->fc_port_page0[p].WWNN.High, 6769 ioc->fc_port_page0[p].WWNN.Low, 6770 ioc->fc_port_page0[p].WWPN.High, 6771 ioc->fc_port_page0[p].WWPN.Low); 6772 } 6773 } 6774 6775 return 0; 6776 } 6777 #endif /* CONFIG_PROC_FS } */ 6778 6779 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6780 static void 6781 mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc) 6782 { 6783 buf[0] ='\0'; 6784 if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) { 6785 sprintf(buf, " (Exp %02d%02d)", 6786 (ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */ 6787 (ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */ 6788 6789 /* insider hack! */ 6790 if ((ioc->facts.FWVersion.Word >> 8) & 0x80) 6791 strcat(buf, " [MDBG]"); 6792 } 6793 } 6794 6795 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6796 /** 6797 * mpt_print_ioc_summary - Write ASCII summary of IOC to a buffer. 6798 * @ioc: Pointer to MPT_ADAPTER structure 6799 * @buffer: Pointer to buffer where IOC summary info should be written 6800 * @size: Pointer to number of bytes we wrote (set by this routine) 6801 * @len: Offset at which to start writing in buffer 6802 * @showlan: Display LAN stuff? 6803 * 6804 * This routine writes (english readable) ASCII text, which represents 6805 * a summary of IOC information, to a buffer. 6806 */ 6807 void 6808 mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan) 6809 { 6810 char expVer[32]; 6811 int y; 6812 6813 mpt_get_fw_exp_ver(expVer, ioc); 6814 6815 /* 6816 * Shorter summary of attached ioc's... 6817 */ 6818 y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d", 6819 ioc->name, 6820 ioc->prod_name, 6821 MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */ 6822 ioc->facts.FWVersion.Word, 6823 expVer, 6824 ioc->facts.NumberOfPorts, 6825 ioc->req_depth); 6826 6827 if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) { 6828 u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 6829 y += sprintf(buffer+len+y, ", LanAddr=%pMR", a); 6830 } 6831 6832 y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq); 6833 6834 if (!ioc->active) 6835 y += sprintf(buffer+len+y, " (disabled)"); 6836 6837 y += sprintf(buffer+len+y, "\n"); 6838 6839 *size = y; 6840 } 6841 6842 #ifdef CONFIG_PROC_FS 6843 static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan) 6844 { 6845 char expVer[32]; 6846 6847 mpt_get_fw_exp_ver(expVer, ioc); 6848 6849 /* 6850 * Shorter summary of attached ioc's... 6851 */ 6852 seq_printf(m, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d", 6853 ioc->name, 6854 ioc->prod_name, 6855 MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */ 6856 ioc->facts.FWVersion.Word, 6857 expVer, 6858 ioc->facts.NumberOfPorts, 6859 ioc->req_depth); 6860 6861 if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) { 6862 u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 6863 seq_printf(m, ", LanAddr=%pMR", a); 6864 } 6865 6866 seq_printf(m, ", IRQ=%d", ioc->pci_irq); 6867 6868 if (!ioc->active) 6869 seq_printf(m, " (disabled)"); 6870 6871 seq_putc(m, '\n'); 6872 } 6873 #endif 6874 6875 /** 6876 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management 6877 * @ioc: Pointer to MPT_ADAPTER structure 6878 * 6879 * Returns 0 for SUCCESS or -1 if FAILED. 6880 * 6881 * If -1 is return, then it was not possible to set the flags 6882 **/ 6883 int 6884 mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) 6885 { 6886 unsigned long flags; 6887 int retval; 6888 6889 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 6890 if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress || 6891 (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) { 6892 retval = -1; 6893 goto out; 6894 } 6895 retval = 0; 6896 ioc->taskmgmt_in_progress = 1; 6897 ioc->taskmgmt_quiesce_io = 1; 6898 if (ioc->alt_ioc) { 6899 ioc->alt_ioc->taskmgmt_in_progress = 1; 6900 ioc->alt_ioc->taskmgmt_quiesce_io = 1; 6901 } 6902 out: 6903 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6904 return retval; 6905 } 6906 EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag); 6907 6908 /** 6909 * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management 6910 * @ioc: Pointer to MPT_ADAPTER structure 6911 * 6912 **/ 6913 void 6914 mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) 6915 { 6916 unsigned long flags; 6917 6918 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 6919 ioc->taskmgmt_in_progress = 0; 6920 ioc->taskmgmt_quiesce_io = 0; 6921 if (ioc->alt_ioc) { 6922 ioc->alt_ioc->taskmgmt_in_progress = 0; 6923 ioc->alt_ioc->taskmgmt_quiesce_io = 0; 6924 } 6925 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6926 } 6927 EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); 6928 6929 6930 /** 6931 * mpt_halt_firmware - Halts the firmware if it is operational and panic 6932 * the kernel 6933 * @ioc: Pointer to MPT_ADAPTER structure 6934 * 6935 **/ 6936 void 6937 mpt_halt_firmware(MPT_ADAPTER *ioc) 6938 { 6939 u32 ioc_raw_state; 6940 6941 ioc_raw_state = mpt_GetIocState(ioc, 0); 6942 6943 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { 6944 printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n", 6945 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 6946 panic("%s: IOC Fault (%04xh)!!!\n", ioc->name, 6947 ioc_raw_state & MPI_DOORBELL_DATA_MASK); 6948 } else { 6949 CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00); 6950 panic("%s: Firmware is halted due to command timeout\n", 6951 ioc->name); 6952 } 6953 } 6954 EXPORT_SYMBOL(mpt_halt_firmware); 6955 6956 /** 6957 * mpt_SoftResetHandler - Issues a less expensive reset 6958 * @ioc: Pointer to MPT_ADAPTER structure 6959 * @sleepFlag: Indicates if sleep or schedule must be called. 6960 * 6961 * Returns 0 for SUCCESS or -1 if FAILED. 6962 * 6963 * Message Unit Reset - instructs the IOC to reset the Reply Post and 6964 * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded. 6965 * All posted buffers are freed, and event notification is turned off. 6966 * IOC doesn't reply to any outstanding request. This will transfer IOC 6967 * to READY state. 6968 **/ 6969 static int 6970 mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 6971 { 6972 int rc; 6973 int ii; 6974 u8 cb_idx; 6975 unsigned long flags; 6976 u32 ioc_state; 6977 unsigned long time_count; 6978 6979 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n", 6980 ioc->name)); 6981 6982 ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK; 6983 6984 if (mpt_fwfault_debug) 6985 mpt_halt_firmware(ioc); 6986 6987 if (ioc_state == MPI_IOC_STATE_FAULT || 6988 ioc_state == MPI_IOC_STATE_RESET) { 6989 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6990 "skipping, either in FAULT or RESET state!\n", ioc->name)); 6991 return -1; 6992 } 6993 6994 if (ioc->bus_type == FC) { 6995 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6996 "skipping, because the bus type is FC!\n", ioc->name)); 6997 return -1; 6998 } 6999 7000 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 7001 if (ioc->ioc_reset_in_progress) { 7002 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7003 return -1; 7004 } 7005 ioc->ioc_reset_in_progress = 1; 7006 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7007 7008 rc = -1; 7009 7010 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7011 if (MptResetHandlers[cb_idx]) 7012 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); 7013 } 7014 7015 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 7016 if (ioc->taskmgmt_in_progress) { 7017 ioc->ioc_reset_in_progress = 0; 7018 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7019 return -1; 7020 } 7021 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7022 /* Disable reply interrupts (also blocks FreeQ) */ 7023 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 7024 ioc->active = 0; 7025 time_count = jiffies; 7026 7027 rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag); 7028 7029 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7030 if (MptResetHandlers[cb_idx]) 7031 mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET); 7032 } 7033 7034 if (rc) 7035 goto out; 7036 7037 ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK; 7038 if (ioc_state != MPI_IOC_STATE_READY) 7039 goto out; 7040 7041 for (ii = 0; ii < 5; ii++) { 7042 /* Get IOC facts! Allow 5 retries */ 7043 rc = GetIocFacts(ioc, sleepFlag, 7044 MPT_HOSTEVENT_IOC_RECOVER); 7045 if (rc == 0) 7046 break; 7047 if (sleepFlag == CAN_SLEEP) 7048 msleep(100); 7049 else 7050 mdelay(100); 7051 } 7052 if (ii == 5) 7053 goto out; 7054 7055 rc = PrimeIocFifos(ioc); 7056 if (rc != 0) 7057 goto out; 7058 7059 rc = SendIocInit(ioc, sleepFlag); 7060 if (rc != 0) 7061 goto out; 7062 7063 rc = SendEventNotification(ioc, 1, sleepFlag); 7064 if (rc != 0) 7065 goto out; 7066 7067 if (ioc->hard_resets < -1) 7068 ioc->hard_resets++; 7069 7070 /* 7071 * At this point, we know soft reset succeeded. 7072 */ 7073 7074 ioc->active = 1; 7075 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 7076 7077 out: 7078 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 7079 ioc->ioc_reset_in_progress = 0; 7080 ioc->taskmgmt_quiesce_io = 0; 7081 ioc->taskmgmt_in_progress = 0; 7082 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7083 7084 if (ioc->active) { /* otherwise, hard reset coming */ 7085 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7086 if (MptResetHandlers[cb_idx]) 7087 mpt_signal_reset(cb_idx, ioc, 7088 MPT_IOC_POST_RESET); 7089 } 7090 } 7091 7092 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7093 "SoftResetHandler: completed (%d seconds): %s\n", 7094 ioc->name, jiffies_to_msecs(jiffies - time_count)/1000, 7095 ((rc == 0) ? "SUCCESS" : "FAILED"))); 7096 7097 return rc; 7098 } 7099 7100 /** 7101 * mpt_Soft_Hard_ResetHandler - Try less expensive reset 7102 * @ioc: Pointer to MPT_ADAPTER structure 7103 * @sleepFlag: Indicates if sleep or schedule must be called. 7104 * 7105 * Returns 0 for SUCCESS or -1 if FAILED. 7106 * Try for softreset first, only if it fails go for expensive 7107 * HardReset. 7108 **/ 7109 int 7110 mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) { 7111 int ret = -1; 7112 7113 ret = mpt_SoftResetHandler(ioc, sleepFlag); 7114 if (ret == 0) 7115 return ret; 7116 ret = mpt_HardResetHandler(ioc, sleepFlag); 7117 return ret; 7118 } 7119 EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler); 7120 7121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7122 /* 7123 * Reset Handling 7124 */ 7125 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7126 /** 7127 * mpt_HardResetHandler - Generic reset handler 7128 * @ioc: Pointer to MPT_ADAPTER structure 7129 * @sleepFlag: Indicates if sleep or schedule must be called. 7130 * 7131 * Issues SCSI Task Management call based on input arg values. 7132 * If TaskMgmt fails, returns associated SCSI request. 7133 * 7134 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) 7135 * or a non-interrupt thread. In the former, must not call schedule(). 7136 * 7137 * Note: A return of -1 is a FATAL error case, as it means a 7138 * FW reload/initialization failed. 7139 * 7140 * Returns 0 for SUCCESS or -1 if FAILED. 7141 */ 7142 int 7143 mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 7144 { 7145 int rc; 7146 u8 cb_idx; 7147 unsigned long flags; 7148 unsigned long time_count; 7149 7150 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); 7151 #ifdef MFCNT 7152 printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name); 7153 printk("MF count 0x%x !\n", ioc->mfcnt); 7154 #endif 7155 if (mpt_fwfault_debug) 7156 mpt_halt_firmware(ioc); 7157 7158 /* Reset the adapter. Prevent more than 1 call to 7159 * mpt_do_ioc_recovery at any instant in time. 7160 */ 7161 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 7162 if (ioc->ioc_reset_in_progress) { 7163 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7164 ioc->wait_on_reset_completion = 1; 7165 do { 7166 ssleep(1); 7167 } while (ioc->ioc_reset_in_progress == 1); 7168 ioc->wait_on_reset_completion = 0; 7169 return ioc->reset_status; 7170 } 7171 if (ioc->wait_on_reset_completion) { 7172 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7173 rc = 0; 7174 time_count = jiffies; 7175 goto exit; 7176 } 7177 ioc->ioc_reset_in_progress = 1; 7178 if (ioc->alt_ioc) 7179 ioc->alt_ioc->ioc_reset_in_progress = 1; 7180 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7181 7182 7183 /* The SCSI driver needs to adjust timeouts on all current 7184 * commands prior to the diagnostic reset being issued. 7185 * Prevents timeouts occurring during a diagnostic reset...very bad. 7186 * For all other protocol drivers, this is a no-op. 7187 */ 7188 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7189 if (MptResetHandlers[cb_idx]) { 7190 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); 7191 if (ioc->alt_ioc) 7192 mpt_signal_reset(cb_idx, ioc->alt_ioc, 7193 MPT_IOC_SETUP_RESET); 7194 } 7195 } 7196 7197 time_count = jiffies; 7198 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag); 7199 if (rc != 0) { 7200 printk(KERN_WARNING MYNAM 7201 ": WARNING - (%d) Cannot recover %s, doorbell=0x%08x\n", 7202 rc, ioc->name, mpt_GetIocState(ioc, 0)); 7203 } else { 7204 if (ioc->hard_resets < -1) 7205 ioc->hard_resets++; 7206 } 7207 7208 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 7209 ioc->ioc_reset_in_progress = 0; 7210 ioc->taskmgmt_quiesce_io = 0; 7211 ioc->taskmgmt_in_progress = 0; 7212 ioc->reset_status = rc; 7213 if (ioc->alt_ioc) { 7214 ioc->alt_ioc->ioc_reset_in_progress = 0; 7215 ioc->alt_ioc->taskmgmt_quiesce_io = 0; 7216 ioc->alt_ioc->taskmgmt_in_progress = 0; 7217 } 7218 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7219 7220 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7221 if (MptResetHandlers[cb_idx]) { 7222 mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET); 7223 if (ioc->alt_ioc) 7224 mpt_signal_reset(cb_idx, 7225 ioc->alt_ioc, MPT_IOC_POST_RESET); 7226 } 7227 } 7228 exit: 7229 dtmprintk(ioc, 7230 printk(MYIOC_s_DEBUG_FMT 7231 "HardResetHandler: completed (%d seconds): %s\n", ioc->name, 7232 jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ? 7233 "SUCCESS" : "FAILED"))); 7234 7235 return rc; 7236 } 7237 7238 #ifdef CONFIG_FUSION_LOGGING 7239 static void 7240 mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) 7241 { 7242 char *ds = NULL; 7243 u32 evData0; 7244 int ii; 7245 u8 event; 7246 char *evStr = ioc->evStr; 7247 7248 event = le32_to_cpu(pEventReply->Event) & 0xFF; 7249 evData0 = le32_to_cpu(pEventReply->Data[0]); 7250 7251 switch(event) { 7252 case MPI_EVENT_NONE: 7253 ds = "None"; 7254 break; 7255 case MPI_EVENT_LOG_DATA: 7256 ds = "Log Data"; 7257 break; 7258 case MPI_EVENT_STATE_CHANGE: 7259 ds = "State Change"; 7260 break; 7261 case MPI_EVENT_UNIT_ATTENTION: 7262 ds = "Unit Attention"; 7263 break; 7264 case MPI_EVENT_IOC_BUS_RESET: 7265 ds = "IOC Bus Reset"; 7266 break; 7267 case MPI_EVENT_EXT_BUS_RESET: 7268 ds = "External Bus Reset"; 7269 break; 7270 case MPI_EVENT_RESCAN: 7271 ds = "Bus Rescan Event"; 7272 break; 7273 case MPI_EVENT_LINK_STATUS_CHANGE: 7274 if (evData0 == MPI_EVENT_LINK_STATUS_FAILURE) 7275 ds = "Link Status(FAILURE) Change"; 7276 else 7277 ds = "Link Status(ACTIVE) Change"; 7278 break; 7279 case MPI_EVENT_LOOP_STATE_CHANGE: 7280 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 7281 ds = "Loop State(LIP) Change"; 7282 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 7283 ds = "Loop State(LPE) Change"; 7284 else 7285 ds = "Loop State(LPB) Change"; 7286 break; 7287 case MPI_EVENT_LOGOUT: 7288 ds = "Logout"; 7289 break; 7290 case MPI_EVENT_EVENT_CHANGE: 7291 if (evData0) 7292 ds = "Events ON"; 7293 else 7294 ds = "Events OFF"; 7295 break; 7296 case MPI_EVENT_INTEGRATED_RAID: 7297 { 7298 u8 ReasonCode = (u8)(evData0 >> 16); 7299 switch (ReasonCode) { 7300 case MPI_EVENT_RAID_RC_VOLUME_CREATED : 7301 ds = "Integrated Raid: Volume Created"; 7302 break; 7303 case MPI_EVENT_RAID_RC_VOLUME_DELETED : 7304 ds = "Integrated Raid: Volume Deleted"; 7305 break; 7306 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED : 7307 ds = "Integrated Raid: Volume Settings Changed"; 7308 break; 7309 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED : 7310 ds = "Integrated Raid: Volume Status Changed"; 7311 break; 7312 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED : 7313 ds = "Integrated Raid: Volume Physdisk Changed"; 7314 break; 7315 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED : 7316 ds = "Integrated Raid: Physdisk Created"; 7317 break; 7318 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED : 7319 ds = "Integrated Raid: Physdisk Deleted"; 7320 break; 7321 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED : 7322 ds = "Integrated Raid: Physdisk Settings Changed"; 7323 break; 7324 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED : 7325 ds = "Integrated Raid: Physdisk Status Changed"; 7326 break; 7327 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED : 7328 ds = "Integrated Raid: Domain Validation Needed"; 7329 break; 7330 case MPI_EVENT_RAID_RC_SMART_DATA : 7331 ds = "Integrated Raid; Smart Data"; 7332 break; 7333 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED : 7334 ds = "Integrated Raid: Replace Action Started"; 7335 break; 7336 default: 7337 ds = "Integrated Raid"; 7338 break; 7339 } 7340 break; 7341 } 7342 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: 7343 ds = "SCSI Device Status Change"; 7344 break; 7345 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 7346 { 7347 u8 id = (u8)(evData0); 7348 u8 channel = (u8)(evData0 >> 8); 7349 u8 ReasonCode = (u8)(evData0 >> 16); 7350 switch (ReasonCode) { 7351 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 7352 snprintf(evStr, EVENT_DESCR_STR_SZ, 7353 "SAS Device Status Change: Added: " 7354 "id=%d channel=%d", id, channel); 7355 break; 7356 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 7357 snprintf(evStr, EVENT_DESCR_STR_SZ, 7358 "SAS Device Status Change: Deleted: " 7359 "id=%d channel=%d", id, channel); 7360 break; 7361 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7362 snprintf(evStr, EVENT_DESCR_STR_SZ, 7363 "SAS Device Status Change: SMART Data: " 7364 "id=%d channel=%d", id, channel); 7365 break; 7366 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 7367 snprintf(evStr, EVENT_DESCR_STR_SZ, 7368 "SAS Device Status Change: No Persistency: " 7369 "id=%d channel=%d", id, channel); 7370 break; 7371 case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7372 snprintf(evStr, EVENT_DESCR_STR_SZ, 7373 "SAS Device Status Change: Unsupported Device " 7374 "Discovered : id=%d channel=%d", id, channel); 7375 break; 7376 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7377 snprintf(evStr, EVENT_DESCR_STR_SZ, 7378 "SAS Device Status Change: Internal Device " 7379 "Reset : id=%d channel=%d", id, channel); 7380 break; 7381 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7382 snprintf(evStr, EVENT_DESCR_STR_SZ, 7383 "SAS Device Status Change: Internal Task " 7384 "Abort : id=%d channel=%d", id, channel); 7385 break; 7386 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7387 snprintf(evStr, EVENT_DESCR_STR_SZ, 7388 "SAS Device Status Change: Internal Abort " 7389 "Task Set : id=%d channel=%d", id, channel); 7390 break; 7391 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7392 snprintf(evStr, EVENT_DESCR_STR_SZ, 7393 "SAS Device Status Change: Internal Clear " 7394 "Task Set : id=%d channel=%d", id, channel); 7395 break; 7396 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7397 snprintf(evStr, EVENT_DESCR_STR_SZ, 7398 "SAS Device Status Change: Internal Query " 7399 "Task : id=%d channel=%d", id, channel); 7400 break; 7401 default: 7402 snprintf(evStr, EVENT_DESCR_STR_SZ, 7403 "SAS Device Status Change: Unknown: " 7404 "id=%d channel=%d", id, channel); 7405 break; 7406 } 7407 break; 7408 } 7409 case MPI_EVENT_ON_BUS_TIMER_EXPIRED: 7410 ds = "Bus Timer Expired"; 7411 break; 7412 case MPI_EVENT_QUEUE_FULL: 7413 { 7414 u16 curr_depth = (u16)(evData0 >> 16); 7415 u8 channel = (u8)(evData0 >> 8); 7416 u8 id = (u8)(evData0); 7417 7418 snprintf(evStr, EVENT_DESCR_STR_SZ, 7419 "Queue Full: channel=%d id=%d depth=%d", 7420 channel, id, curr_depth); 7421 break; 7422 } 7423 case MPI_EVENT_SAS_SES: 7424 ds = "SAS SES Event"; 7425 break; 7426 case MPI_EVENT_PERSISTENT_TABLE_FULL: 7427 ds = "Persistent Table Full"; 7428 break; 7429 case MPI_EVENT_SAS_PHY_LINK_STATUS: 7430 { 7431 u8 LinkRates = (u8)(evData0 >> 8); 7432 u8 PhyNumber = (u8)(evData0); 7433 LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >> 7434 MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT; 7435 switch (LinkRates) { 7436 case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN: 7437 snprintf(evStr, EVENT_DESCR_STR_SZ, 7438 "SAS PHY Link Status: Phy=%d:" 7439 " Rate Unknown",PhyNumber); 7440 break; 7441 case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED: 7442 snprintf(evStr, EVENT_DESCR_STR_SZ, 7443 "SAS PHY Link Status: Phy=%d:" 7444 " Phy Disabled",PhyNumber); 7445 break; 7446 case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION: 7447 snprintf(evStr, EVENT_DESCR_STR_SZ, 7448 "SAS PHY Link Status: Phy=%d:" 7449 " Failed Speed Nego",PhyNumber); 7450 break; 7451 case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE: 7452 snprintf(evStr, EVENT_DESCR_STR_SZ, 7453 "SAS PHY Link Status: Phy=%d:" 7454 " Sata OOB Completed",PhyNumber); 7455 break; 7456 case MPI_EVENT_SAS_PLS_LR_RATE_1_5: 7457 snprintf(evStr, EVENT_DESCR_STR_SZ, 7458 "SAS PHY Link Status: Phy=%d:" 7459 " Rate 1.5 Gbps",PhyNumber); 7460 break; 7461 case MPI_EVENT_SAS_PLS_LR_RATE_3_0: 7462 snprintf(evStr, EVENT_DESCR_STR_SZ, 7463 "SAS PHY Link Status: Phy=%d:" 7464 " Rate 3.0 Gbps", PhyNumber); 7465 break; 7466 case MPI_EVENT_SAS_PLS_LR_RATE_6_0: 7467 snprintf(evStr, EVENT_DESCR_STR_SZ, 7468 "SAS PHY Link Status: Phy=%d:" 7469 " Rate 6.0 Gbps", PhyNumber); 7470 break; 7471 default: 7472 snprintf(evStr, EVENT_DESCR_STR_SZ, 7473 "SAS PHY Link Status: Phy=%d", PhyNumber); 7474 break; 7475 } 7476 break; 7477 } 7478 case MPI_EVENT_SAS_DISCOVERY_ERROR: 7479 ds = "SAS Discovery Error"; 7480 break; 7481 case MPI_EVENT_IR_RESYNC_UPDATE: 7482 { 7483 u8 resync_complete = (u8)(evData0 >> 16); 7484 snprintf(evStr, EVENT_DESCR_STR_SZ, 7485 "IR Resync Update: Complete = %d:",resync_complete); 7486 break; 7487 } 7488 case MPI_EVENT_IR2: 7489 { 7490 u8 id = (u8)(evData0); 7491 u8 channel = (u8)(evData0 >> 8); 7492 u8 phys_num = (u8)(evData0 >> 24); 7493 u8 ReasonCode = (u8)(evData0 >> 16); 7494 7495 switch (ReasonCode) { 7496 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: 7497 snprintf(evStr, EVENT_DESCR_STR_SZ, 7498 "IR2: LD State Changed: " 7499 "id=%d channel=%d phys_num=%d", 7500 id, channel, phys_num); 7501 break; 7502 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: 7503 snprintf(evStr, EVENT_DESCR_STR_SZ, 7504 "IR2: PD State Changed " 7505 "id=%d channel=%d phys_num=%d", 7506 id, channel, phys_num); 7507 break; 7508 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: 7509 snprintf(evStr, EVENT_DESCR_STR_SZ, 7510 "IR2: Bad Block Table Full: " 7511 "id=%d channel=%d phys_num=%d", 7512 id, channel, phys_num); 7513 break; 7514 case MPI_EVENT_IR2_RC_PD_INSERTED: 7515 snprintf(evStr, EVENT_DESCR_STR_SZ, 7516 "IR2: PD Inserted: " 7517 "id=%d channel=%d phys_num=%d", 7518 id, channel, phys_num); 7519 break; 7520 case MPI_EVENT_IR2_RC_PD_REMOVED: 7521 snprintf(evStr, EVENT_DESCR_STR_SZ, 7522 "IR2: PD Removed: " 7523 "id=%d channel=%d phys_num=%d", 7524 id, channel, phys_num); 7525 break; 7526 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: 7527 snprintf(evStr, EVENT_DESCR_STR_SZ, 7528 "IR2: Foreign CFG Detected: " 7529 "id=%d channel=%d phys_num=%d", 7530 id, channel, phys_num); 7531 break; 7532 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: 7533 snprintf(evStr, EVENT_DESCR_STR_SZ, 7534 "IR2: Rebuild Medium Error: " 7535 "id=%d channel=%d phys_num=%d", 7536 id, channel, phys_num); 7537 break; 7538 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED: 7539 snprintf(evStr, EVENT_DESCR_STR_SZ, 7540 "IR2: Dual Port Added: " 7541 "id=%d channel=%d phys_num=%d", 7542 id, channel, phys_num); 7543 break; 7544 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED: 7545 snprintf(evStr, EVENT_DESCR_STR_SZ, 7546 "IR2: Dual Port Removed: " 7547 "id=%d channel=%d phys_num=%d", 7548 id, channel, phys_num); 7549 break; 7550 default: 7551 ds = "IR2"; 7552 break; 7553 } 7554 break; 7555 } 7556 case MPI_EVENT_SAS_DISCOVERY: 7557 { 7558 if (evData0) 7559 ds = "SAS Discovery: Start"; 7560 else 7561 ds = "SAS Discovery: Stop"; 7562 break; 7563 } 7564 case MPI_EVENT_LOG_ENTRY_ADDED: 7565 ds = "SAS Log Entry Added"; 7566 break; 7567 7568 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: 7569 { 7570 u8 phy_num = (u8)(evData0); 7571 u8 port_num = (u8)(evData0 >> 8); 7572 u8 port_width = (u8)(evData0 >> 16); 7573 u8 primitive = (u8)(evData0 >> 24); 7574 snprintf(evStr, EVENT_DESCR_STR_SZ, 7575 "SAS Broadcast Primitive: phy=%d port=%d " 7576 "width=%d primitive=0x%02x", 7577 phy_num, port_num, port_width, primitive); 7578 break; 7579 } 7580 7581 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 7582 { 7583 u8 reason = (u8)(evData0); 7584 7585 switch (reason) { 7586 case MPI_EVENT_SAS_INIT_RC_ADDED: 7587 ds = "SAS Initiator Status Change: Added"; 7588 break; 7589 case MPI_EVENT_SAS_INIT_RC_REMOVED: 7590 ds = "SAS Initiator Status Change: Deleted"; 7591 break; 7592 default: 7593 ds = "SAS Initiator Status Change"; 7594 break; 7595 } 7596 break; 7597 } 7598 7599 case MPI_EVENT_SAS_INIT_TABLE_OVERFLOW: 7600 { 7601 u8 max_init = (u8)(evData0); 7602 u8 current_init = (u8)(evData0 >> 8); 7603 7604 snprintf(evStr, EVENT_DESCR_STR_SZ, 7605 "SAS Initiator Device Table Overflow: max initiators=%02d " 7606 "current initiators=%02d", 7607 max_init, current_init); 7608 break; 7609 } 7610 case MPI_EVENT_SAS_SMP_ERROR: 7611 { 7612 u8 status = (u8)(evData0); 7613 u8 port_num = (u8)(evData0 >> 8); 7614 u8 result = (u8)(evData0 >> 16); 7615 7616 if (status == MPI_EVENT_SAS_SMP_FUNCTION_RESULT_VALID) 7617 snprintf(evStr, EVENT_DESCR_STR_SZ, 7618 "SAS SMP Error: port=%d result=0x%02x", 7619 port_num, result); 7620 else if (status == MPI_EVENT_SAS_SMP_CRC_ERROR) 7621 snprintf(evStr, EVENT_DESCR_STR_SZ, 7622 "SAS SMP Error: port=%d : CRC Error", 7623 port_num); 7624 else if (status == MPI_EVENT_SAS_SMP_TIMEOUT) 7625 snprintf(evStr, EVENT_DESCR_STR_SZ, 7626 "SAS SMP Error: port=%d : Timeout", 7627 port_num); 7628 else if (status == MPI_EVENT_SAS_SMP_NO_DESTINATION) 7629 snprintf(evStr, EVENT_DESCR_STR_SZ, 7630 "SAS SMP Error: port=%d : No Destination", 7631 port_num); 7632 else if (status == MPI_EVENT_SAS_SMP_BAD_DESTINATION) 7633 snprintf(evStr, EVENT_DESCR_STR_SZ, 7634 "SAS SMP Error: port=%d : Bad Destination", 7635 port_num); 7636 else 7637 snprintf(evStr, EVENT_DESCR_STR_SZ, 7638 "SAS SMP Error: port=%d : status=0x%02x", 7639 port_num, status); 7640 break; 7641 } 7642 7643 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: 7644 { 7645 u8 reason = (u8)(evData0); 7646 7647 switch (reason) { 7648 case MPI_EVENT_SAS_EXP_RC_ADDED: 7649 ds = "Expander Status Change: Added"; 7650 break; 7651 case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING: 7652 ds = "Expander Status Change: Deleted"; 7653 break; 7654 default: 7655 ds = "Expander Status Change"; 7656 break; 7657 } 7658 break; 7659 } 7660 7661 /* 7662 * MPT base "custom" events may be added here... 7663 */ 7664 default: 7665 ds = "Unknown"; 7666 break; 7667 } 7668 if (ds) 7669 strlcpy(evStr, ds, EVENT_DESCR_STR_SZ); 7670 7671 7672 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7673 "MPT event:(%02Xh) : %s\n", 7674 ioc->name, event, evStr)); 7675 7676 devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM 7677 ": Event data:\n")); 7678 for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++) 7679 devtverboseprintk(ioc, printk(" %08x", 7680 le32_to_cpu(pEventReply->Data[ii]))); 7681 devtverboseprintk(ioc, printk(KERN_DEBUG "\n")); 7682 } 7683 #endif 7684 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7685 /** 7686 * ProcessEventNotification - Route EventNotificationReply to all event handlers 7687 * @ioc: Pointer to MPT_ADAPTER structure 7688 * @pEventReply: Pointer to EventNotification reply frame 7689 * @evHandlers: Pointer to integer, number of event handlers 7690 * 7691 * Routes a received EventNotificationReply to all currently registered 7692 * event handlers. 7693 * Returns sum of event handlers return values. 7694 */ 7695 static int 7696 ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply, int *evHandlers) 7697 { 7698 u16 evDataLen; 7699 u32 evData0 = 0; 7700 int ii; 7701 u8 cb_idx; 7702 int r = 0; 7703 int handlers = 0; 7704 u8 event; 7705 7706 /* 7707 * Do platform normalization of values 7708 */ 7709 event = le32_to_cpu(pEventReply->Event) & 0xFF; 7710 evDataLen = le16_to_cpu(pEventReply->EventDataLength); 7711 if (evDataLen) { 7712 evData0 = le32_to_cpu(pEventReply->Data[0]); 7713 } 7714 7715 #ifdef CONFIG_FUSION_LOGGING 7716 if (evDataLen) 7717 mpt_display_event_info(ioc, pEventReply); 7718 #endif 7719 7720 /* 7721 * Do general / base driver event processing 7722 */ 7723 switch(event) { 7724 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 7725 if (evDataLen) { 7726 u8 evState = evData0 & 0xFF; 7727 7728 /* CHECKME! What if evState unexpectedly says OFF (0)? */ 7729 7730 /* Update EventState field in cached IocFacts */ 7731 if (ioc->facts.Function) { 7732 ioc->facts.EventState = evState; 7733 } 7734 } 7735 break; 7736 case MPI_EVENT_INTEGRATED_RAID: 7737 mptbase_raid_process_event_data(ioc, 7738 (MpiEventDataRaid_t *)pEventReply->Data); 7739 break; 7740 default: 7741 break; 7742 } 7743 7744 /* 7745 * Should this event be logged? Events are written sequentially. 7746 * When buffer is full, start again at the top. 7747 */ 7748 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 7749 int idx; 7750 7751 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; 7752 7753 ioc->events[idx].event = event; 7754 ioc->events[idx].eventContext = ioc->eventContext; 7755 7756 for (ii = 0; ii < 2; ii++) { 7757 if (ii < evDataLen) 7758 ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]); 7759 else 7760 ioc->events[idx].data[ii] = 0; 7761 } 7762 7763 ioc->eventContext++; 7764 } 7765 7766 7767 /* 7768 * Call each currently registered protocol event handler. 7769 */ 7770 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7771 if (MptEvHandlers[cb_idx]) { 7772 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7773 "Routing Event to event handler #%d\n", 7774 ioc->name, cb_idx)); 7775 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); 7776 handlers++; 7777 } 7778 } 7779 /* FIXME? Examine results here? */ 7780 7781 /* 7782 * If needed, send (a single) EventAck. 7783 */ 7784 if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) { 7785 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7786 "EventAck required\n",ioc->name)); 7787 if ((ii = SendEventAck(ioc, pEventReply)) != 0) { 7788 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SendEventAck returned %d\n", 7789 ioc->name, ii)); 7790 } 7791 } 7792 7793 *evHandlers = handlers; 7794 return r; 7795 } 7796 7797 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7798 /** 7799 * mpt_fc_log_info - Log information returned from Fibre Channel IOC. 7800 * @ioc: Pointer to MPT_ADAPTER structure 7801 * @log_info: U32 LogInfo reply word from the IOC 7802 * 7803 * Refer to lsi/mpi_log_fc.h. 7804 */ 7805 static void 7806 mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) 7807 { 7808 char *desc = "unknown"; 7809 7810 switch (log_info & 0xFF000000) { 7811 case MPI_IOCLOGINFO_FC_INIT_BASE: 7812 desc = "FCP Initiator"; 7813 break; 7814 case MPI_IOCLOGINFO_FC_TARGET_BASE: 7815 desc = "FCP Target"; 7816 break; 7817 case MPI_IOCLOGINFO_FC_LAN_BASE: 7818 desc = "LAN"; 7819 break; 7820 case MPI_IOCLOGINFO_FC_MSG_BASE: 7821 desc = "MPI Message Layer"; 7822 break; 7823 case MPI_IOCLOGINFO_FC_LINK_BASE: 7824 desc = "FC Link"; 7825 break; 7826 case MPI_IOCLOGINFO_FC_CTX_BASE: 7827 desc = "Context Manager"; 7828 break; 7829 case MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET: 7830 desc = "Invalid Field Offset"; 7831 break; 7832 case MPI_IOCLOGINFO_FC_STATE_CHANGE: 7833 desc = "State Change Info"; 7834 break; 7835 } 7836 7837 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubClass={%s}, Value=(0x%06x)\n", 7838 ioc->name, log_info, desc, (log_info & 0xFFFFFF)); 7839 } 7840 7841 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7842 /** 7843 * mpt_spi_log_info - Log information returned from SCSI Parallel IOC. 7844 * @ioc: Pointer to MPT_ADAPTER structure 7845 * @log_info: U32 LogInfo word from the IOC 7846 * 7847 * Refer to lsi/sp_log.h. 7848 */ 7849 static void 7850 mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) 7851 { 7852 u32 info = log_info & 0x00FF0000; 7853 char *desc = "unknown"; 7854 7855 switch (info) { 7856 case 0x00010000: 7857 desc = "bug! MID not found"; 7858 break; 7859 7860 case 0x00020000: 7861 desc = "Parity Error"; 7862 break; 7863 7864 case 0x00030000: 7865 desc = "ASYNC Outbound Overrun"; 7866 break; 7867 7868 case 0x00040000: 7869 desc = "SYNC Offset Error"; 7870 break; 7871 7872 case 0x00050000: 7873 desc = "BM Change"; 7874 break; 7875 7876 case 0x00060000: 7877 desc = "Msg In Overflow"; 7878 break; 7879 7880 case 0x00070000: 7881 desc = "DMA Error"; 7882 break; 7883 7884 case 0x00080000: 7885 desc = "Outbound DMA Overrun"; 7886 break; 7887 7888 case 0x00090000: 7889 desc = "Task Management"; 7890 break; 7891 7892 case 0x000A0000: 7893 desc = "Device Problem"; 7894 break; 7895 7896 case 0x000B0000: 7897 desc = "Invalid Phase Change"; 7898 break; 7899 7900 case 0x000C0000: 7901 desc = "Untagged Table Size"; 7902 break; 7903 7904 } 7905 7906 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc); 7907 } 7908 7909 /* strings for sas loginfo */ 7910 static char *originator_str[] = { 7911 "IOP", /* 00h */ 7912 "PL", /* 01h */ 7913 "IR" /* 02h */ 7914 }; 7915 static char *iop_code_str[] = { 7916 NULL, /* 00h */ 7917 "Invalid SAS Address", /* 01h */ 7918 NULL, /* 02h */ 7919 "Invalid Page", /* 03h */ 7920 "Diag Message Error", /* 04h */ 7921 "Task Terminated", /* 05h */ 7922 "Enclosure Management", /* 06h */ 7923 "Target Mode" /* 07h */ 7924 }; 7925 static char *pl_code_str[] = { 7926 NULL, /* 00h */ 7927 "Open Failure", /* 01h */ 7928 "Invalid Scatter Gather List", /* 02h */ 7929 "Wrong Relative Offset or Frame Length", /* 03h */ 7930 "Frame Transfer Error", /* 04h */ 7931 "Transmit Frame Connected Low", /* 05h */ 7932 "SATA Non-NCQ RW Error Bit Set", /* 06h */ 7933 "SATA Read Log Receive Data Error", /* 07h */ 7934 "SATA NCQ Fail All Commands After Error", /* 08h */ 7935 "SATA Error in Receive Set Device Bit FIS", /* 09h */ 7936 "Receive Frame Invalid Message", /* 0Ah */ 7937 "Receive Context Message Valid Error", /* 0Bh */ 7938 "Receive Frame Current Frame Error", /* 0Ch */ 7939 "SATA Link Down", /* 0Dh */ 7940 "Discovery SATA Init W IOS", /* 0Eh */ 7941 "Config Invalid Page", /* 0Fh */ 7942 "Discovery SATA Init Timeout", /* 10h */ 7943 "Reset", /* 11h */ 7944 "Abort", /* 12h */ 7945 "IO Not Yet Executed", /* 13h */ 7946 "IO Executed", /* 14h */ 7947 "Persistent Reservation Out Not Affiliation " 7948 "Owner", /* 15h */ 7949 "Open Transmit DMA Abort", /* 16h */ 7950 "IO Device Missing Delay Retry", /* 17h */ 7951 "IO Cancelled Due to Receive Error", /* 18h */ 7952 NULL, /* 19h */ 7953 NULL, /* 1Ah */ 7954 NULL, /* 1Bh */ 7955 NULL, /* 1Ch */ 7956 NULL, /* 1Dh */ 7957 NULL, /* 1Eh */ 7958 NULL, /* 1Fh */ 7959 "Enclosure Management" /* 20h */ 7960 }; 7961 static char *ir_code_str[] = { 7962 "Raid Action Error", /* 00h */ 7963 NULL, /* 00h */ 7964 NULL, /* 01h */ 7965 NULL, /* 02h */ 7966 NULL, /* 03h */ 7967 NULL, /* 04h */ 7968 NULL, /* 05h */ 7969 NULL, /* 06h */ 7970 NULL /* 07h */ 7971 }; 7972 static char *raid_sub_code_str[] = { 7973 NULL, /* 00h */ 7974 "Volume Creation Failed: Data Passed too " 7975 "Large", /* 01h */ 7976 "Volume Creation Failed: Duplicate Volumes " 7977 "Attempted", /* 02h */ 7978 "Volume Creation Failed: Max Number " 7979 "Supported Volumes Exceeded", /* 03h */ 7980 "Volume Creation Failed: DMA Error", /* 04h */ 7981 "Volume Creation Failed: Invalid Volume Type", /* 05h */ 7982 "Volume Creation Failed: Error Reading " 7983 "MFG Page 4", /* 06h */ 7984 "Volume Creation Failed: Creating Internal " 7985 "Structures", /* 07h */ 7986 NULL, /* 08h */ 7987 NULL, /* 09h */ 7988 NULL, /* 0Ah */ 7989 NULL, /* 0Bh */ 7990 NULL, /* 0Ch */ 7991 NULL, /* 0Dh */ 7992 NULL, /* 0Eh */ 7993 NULL, /* 0Fh */ 7994 "Activation failed: Already Active Volume", /* 10h */ 7995 "Activation failed: Unsupported Volume Type", /* 11h */ 7996 "Activation failed: Too Many Active Volumes", /* 12h */ 7997 "Activation failed: Volume ID in Use", /* 13h */ 7998 "Activation failed: Reported Failure", /* 14h */ 7999 "Activation failed: Importing a Volume", /* 15h */ 8000 NULL, /* 16h */ 8001 NULL, /* 17h */ 8002 NULL, /* 18h */ 8003 NULL, /* 19h */ 8004 NULL, /* 1Ah */ 8005 NULL, /* 1Bh */ 8006 NULL, /* 1Ch */ 8007 NULL, /* 1Dh */ 8008 NULL, /* 1Eh */ 8009 NULL, /* 1Fh */ 8010 "Phys Disk failed: Too Many Phys Disks", /* 20h */ 8011 "Phys Disk failed: Data Passed too Large", /* 21h */ 8012 "Phys Disk failed: DMA Error", /* 22h */ 8013 "Phys Disk failed: Invalid <channel:id>", /* 23h */ 8014 "Phys Disk failed: Creating Phys Disk Config " 8015 "Page", /* 24h */ 8016 NULL, /* 25h */ 8017 NULL, /* 26h */ 8018 NULL, /* 27h */ 8019 NULL, /* 28h */ 8020 NULL, /* 29h */ 8021 NULL, /* 2Ah */ 8022 NULL, /* 2Bh */ 8023 NULL, /* 2Ch */ 8024 NULL, /* 2Dh */ 8025 NULL, /* 2Eh */ 8026 NULL, /* 2Fh */ 8027 "Compatibility Error: IR Disabled", /* 30h */ 8028 "Compatibility Error: Inquiry Command Failed", /* 31h */ 8029 "Compatibility Error: Device not Direct Access " 8030 "Device ", /* 32h */ 8031 "Compatibility Error: Removable Device Found", /* 33h */ 8032 "Compatibility Error: Device SCSI Version not " 8033 "2 or Higher", /* 34h */ 8034 "Compatibility Error: SATA Device, 48 BIT LBA " 8035 "not Supported", /* 35h */ 8036 "Compatibility Error: Device doesn't have " 8037 "512 Byte Block Sizes", /* 36h */ 8038 "Compatibility Error: Volume Type Check Failed", /* 37h */ 8039 "Compatibility Error: Volume Type is " 8040 "Unsupported by FW", /* 38h */ 8041 "Compatibility Error: Disk Drive too Small for " 8042 "use in Volume", /* 39h */ 8043 "Compatibility Error: Phys Disk for Create " 8044 "Volume not Found", /* 3Ah */ 8045 "Compatibility Error: Too Many or too Few " 8046 "Disks for Volume Type", /* 3Bh */ 8047 "Compatibility Error: Disk stripe Sizes " 8048 "Must be 64KB", /* 3Ch */ 8049 "Compatibility Error: IME Size Limited to < 2TB", /* 3Dh */ 8050 }; 8051 8052 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 8053 /** 8054 * mpt_sas_log_info - Log information returned from SAS IOC. 8055 * @ioc: Pointer to MPT_ADAPTER structure 8056 * @log_info: U32 LogInfo reply word from the IOC 8057 * @cb_idx: callback function's handle 8058 * 8059 * Refer to lsi/mpi_log_sas.h. 8060 **/ 8061 static void 8062 mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx) 8063 { 8064 union loginfo_type { 8065 u32 loginfo; 8066 struct { 8067 u32 subcode:16; 8068 u32 code:8; 8069 u32 originator:4; 8070 u32 bus_type:4; 8071 } dw; 8072 }; 8073 union loginfo_type sas_loginfo; 8074 char *originator_desc = NULL; 8075 char *code_desc = NULL; 8076 char *sub_code_desc = NULL; 8077 8078 sas_loginfo.loginfo = log_info; 8079 if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) && 8080 (sas_loginfo.dw.originator < ARRAY_SIZE(originator_str))) 8081 return; 8082 8083 originator_desc = originator_str[sas_loginfo.dw.originator]; 8084 8085 switch (sas_loginfo.dw.originator) { 8086 8087 case 0: /* IOP */ 8088 if (sas_loginfo.dw.code < 8089 ARRAY_SIZE(iop_code_str)) 8090 code_desc = iop_code_str[sas_loginfo.dw.code]; 8091 break; 8092 case 1: /* PL */ 8093 if (sas_loginfo.dw.code < 8094 ARRAY_SIZE(pl_code_str)) 8095 code_desc = pl_code_str[sas_loginfo.dw.code]; 8096 break; 8097 case 2: /* IR */ 8098 if (sas_loginfo.dw.code >= 8099 ARRAY_SIZE(ir_code_str)) 8100 break; 8101 code_desc = ir_code_str[sas_loginfo.dw.code]; 8102 if (sas_loginfo.dw.subcode >= 8103 ARRAY_SIZE(raid_sub_code_str)) 8104 break; 8105 if (sas_loginfo.dw.code == 0) 8106 sub_code_desc = 8107 raid_sub_code_str[sas_loginfo.dw.subcode]; 8108 break; 8109 default: 8110 return; 8111 } 8112 8113 if (sub_code_desc != NULL) 8114 printk(MYIOC_s_INFO_FMT 8115 "LogInfo(0x%08x): Originator={%s}, Code={%s}," 8116 " SubCode={%s} cb_idx %s\n", 8117 ioc->name, log_info, originator_desc, code_desc, 8118 sub_code_desc, MptCallbacksName[cb_idx]); 8119 else if (code_desc != NULL) 8120 printk(MYIOC_s_INFO_FMT 8121 "LogInfo(0x%08x): Originator={%s}, Code={%s}," 8122 " SubCode(0x%04x) cb_idx %s\n", 8123 ioc->name, log_info, originator_desc, code_desc, 8124 sas_loginfo.dw.subcode, MptCallbacksName[cb_idx]); 8125 else 8126 printk(MYIOC_s_INFO_FMT 8127 "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x)," 8128 " SubCode(0x%04x) cb_idx %s\n", 8129 ioc->name, log_info, originator_desc, 8130 sas_loginfo.dw.code, sas_loginfo.dw.subcode, 8131 MptCallbacksName[cb_idx]); 8132 } 8133 8134 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 8135 /** 8136 * mpt_iocstatus_info_config - IOCSTATUS information for config pages 8137 * @ioc: Pointer to MPT_ADAPTER structure 8138 * @ioc_status: U32 IOCStatus word from IOC 8139 * @mf: Pointer to MPT request frame 8140 * 8141 * Refer to lsi/mpi.h. 8142 **/ 8143 static void 8144 mpt_iocstatus_info_config(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) 8145 { 8146 Config_t *pReq = (Config_t *)mf; 8147 char extend_desc[EVENT_DESCR_STR_SZ]; 8148 char *desc = NULL; 8149 u32 form; 8150 u8 page_type; 8151 8152 if (pReq->Header.PageType == MPI_CONFIG_PAGETYPE_EXTENDED) 8153 page_type = pReq->ExtPageType; 8154 else 8155 page_type = pReq->Header.PageType; 8156 8157 /* 8158 * ignore invalid page messages for GET_NEXT_HANDLE 8159 */ 8160 form = le32_to_cpu(pReq->PageAddress); 8161 if (ioc_status == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { 8162 if (page_type == MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE || 8163 page_type == MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER || 8164 page_type == MPI_CONFIG_EXTPAGETYPE_ENCLOSURE) { 8165 if ((form >> MPI_SAS_DEVICE_PGAD_FORM_SHIFT) == 8166 MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE) 8167 return; 8168 } 8169 if (page_type == MPI_CONFIG_PAGETYPE_FC_DEVICE) 8170 if ((form & MPI_FC_DEVICE_PGAD_FORM_MASK) == 8171 MPI_FC_DEVICE_PGAD_FORM_NEXT_DID) 8172 return; 8173 } 8174 8175 snprintf(extend_desc, EVENT_DESCR_STR_SZ, 8176 "type=%02Xh, page=%02Xh, action=%02Xh, form=%08Xh", 8177 page_type, pReq->Header.PageNumber, pReq->Action, form); 8178 8179 switch (ioc_status) { 8180 8181 case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */ 8182 desc = "Config Page Invalid Action"; 8183 break; 8184 8185 case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */ 8186 desc = "Config Page Invalid Type"; 8187 break; 8188 8189 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */ 8190 desc = "Config Page Invalid Page"; 8191 break; 8192 8193 case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */ 8194 desc = "Config Page Invalid Data"; 8195 break; 8196 8197 case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */ 8198 desc = "Config Page No Defaults"; 8199 break; 8200 8201 case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */ 8202 desc = "Config Page Can't Commit"; 8203 break; 8204 } 8205 8206 if (!desc) 8207 return; 8208 8209 dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s: %s\n", 8210 ioc->name, ioc_status, desc, extend_desc)); 8211 } 8212 8213 /** 8214 * mpt_iocstatus_info - IOCSTATUS information returned from IOC. 8215 * @ioc: Pointer to MPT_ADAPTER structure 8216 * @ioc_status: U32 IOCStatus word from IOC 8217 * @mf: Pointer to MPT request frame 8218 * 8219 * Refer to lsi/mpi.h. 8220 **/ 8221 static void 8222 mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) 8223 { 8224 u32 status = ioc_status & MPI_IOCSTATUS_MASK; 8225 char *desc = NULL; 8226 8227 switch (status) { 8228 8229 /****************************************************************************/ 8230 /* Common IOCStatus values for all replies */ 8231 /****************************************************************************/ 8232 8233 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ 8234 desc = "Invalid Function"; 8235 break; 8236 8237 case MPI_IOCSTATUS_BUSY: /* 0x0002 */ 8238 desc = "Busy"; 8239 break; 8240 8241 case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */ 8242 desc = "Invalid SGL"; 8243 break; 8244 8245 case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */ 8246 desc = "Internal Error"; 8247 break; 8248 8249 case MPI_IOCSTATUS_RESERVED: /* 0x0005 */ 8250 desc = "Reserved"; 8251 break; 8252 8253 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */ 8254 desc = "Insufficient Resources"; 8255 break; 8256 8257 case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */ 8258 desc = "Invalid Field"; 8259 break; 8260 8261 case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */ 8262 desc = "Invalid State"; 8263 break; 8264 8265 /****************************************************************************/ 8266 /* Config IOCStatus values */ 8267 /****************************************************************************/ 8268 8269 case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */ 8270 case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */ 8271 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */ 8272 case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */ 8273 case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */ 8274 case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */ 8275 mpt_iocstatus_info_config(ioc, status, mf); 8276 break; 8277 8278 /****************************************************************************/ 8279 /* SCSIIO Reply (SPI, FCP, SAS) initiator values */ 8280 /* */ 8281 /* Look at mptscsih_iocstatus_info_scsiio in mptscsih.c */ 8282 /* */ 8283 /****************************************************************************/ 8284 8285 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ 8286 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ 8287 case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */ 8288 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */ 8289 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ 8290 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */ 8291 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ 8292 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ 8293 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 8294 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 8295 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */ 8296 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ 8297 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ 8298 break; 8299 8300 /****************************************************************************/ 8301 /* SCSI Target values */ 8302 /****************************************************************************/ 8303 8304 case MPI_IOCSTATUS_TARGET_PRIORITY_IO: /* 0x0060 */ 8305 desc = "Target: Priority IO"; 8306 break; 8307 8308 case MPI_IOCSTATUS_TARGET_INVALID_PORT: /* 0x0061 */ 8309 desc = "Target: Invalid Port"; 8310 break; 8311 8312 case MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX: /* 0x0062 */ 8313 desc = "Target Invalid IO Index:"; 8314 break; 8315 8316 case MPI_IOCSTATUS_TARGET_ABORTED: /* 0x0063 */ 8317 desc = "Target: Aborted"; 8318 break; 8319 8320 case MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: /* 0x0064 */ 8321 desc = "Target: No Conn Retryable"; 8322 break; 8323 8324 case MPI_IOCSTATUS_TARGET_NO_CONNECTION: /* 0x0065 */ 8325 desc = "Target: No Connection"; 8326 break; 8327 8328 case MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: /* 0x006A */ 8329 desc = "Target: Transfer Count Mismatch"; 8330 break; 8331 8332 case MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT: /* 0x006B */ 8333 desc = "Target: STS Data not Sent"; 8334 break; 8335 8336 case MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: /* 0x006D */ 8337 desc = "Target: Data Offset Error"; 8338 break; 8339 8340 case MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: /* 0x006E */ 8341 desc = "Target: Too Much Write Data"; 8342 break; 8343 8344 case MPI_IOCSTATUS_TARGET_IU_TOO_SHORT: /* 0x006F */ 8345 desc = "Target: IU Too Short"; 8346 break; 8347 8348 case MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: /* 0x0070 */ 8349 desc = "Target: ACK NAK Timeout"; 8350 break; 8351 8352 case MPI_IOCSTATUS_TARGET_NAK_RECEIVED: /* 0x0071 */ 8353 desc = "Target: Nak Received"; 8354 break; 8355 8356 /****************************************************************************/ 8357 /* Fibre Channel Direct Access values */ 8358 /****************************************************************************/ 8359 8360 case MPI_IOCSTATUS_FC_ABORTED: /* 0x0066 */ 8361 desc = "FC: Aborted"; 8362 break; 8363 8364 case MPI_IOCSTATUS_FC_RX_ID_INVALID: /* 0x0067 */ 8365 desc = "FC: RX ID Invalid"; 8366 break; 8367 8368 case MPI_IOCSTATUS_FC_DID_INVALID: /* 0x0068 */ 8369 desc = "FC: DID Invalid"; 8370 break; 8371 8372 case MPI_IOCSTATUS_FC_NODE_LOGGED_OUT: /* 0x0069 */ 8373 desc = "FC: Node Logged Out"; 8374 break; 8375 8376 case MPI_IOCSTATUS_FC_EXCHANGE_CANCELED: /* 0x006C */ 8377 desc = "FC: Exchange Canceled"; 8378 break; 8379 8380 /****************************************************************************/ 8381 /* LAN values */ 8382 /****************************************************************************/ 8383 8384 case MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND: /* 0x0080 */ 8385 desc = "LAN: Device not Found"; 8386 break; 8387 8388 case MPI_IOCSTATUS_LAN_DEVICE_FAILURE: /* 0x0081 */ 8389 desc = "LAN: Device Failure"; 8390 break; 8391 8392 case MPI_IOCSTATUS_LAN_TRANSMIT_ERROR: /* 0x0082 */ 8393 desc = "LAN: Transmit Error"; 8394 break; 8395 8396 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: /* 0x0083 */ 8397 desc = "LAN: Transmit Aborted"; 8398 break; 8399 8400 case MPI_IOCSTATUS_LAN_RECEIVE_ERROR: /* 0x0084 */ 8401 desc = "LAN: Receive Error"; 8402 break; 8403 8404 case MPI_IOCSTATUS_LAN_RECEIVE_ABORTED: /* 0x0085 */ 8405 desc = "LAN: Receive Aborted"; 8406 break; 8407 8408 case MPI_IOCSTATUS_LAN_PARTIAL_PACKET: /* 0x0086 */ 8409 desc = "LAN: Partial Packet"; 8410 break; 8411 8412 case MPI_IOCSTATUS_LAN_CANCELED: /* 0x0087 */ 8413 desc = "LAN: Canceled"; 8414 break; 8415 8416 /****************************************************************************/ 8417 /* Serial Attached SCSI values */ 8418 /****************************************************************************/ 8419 8420 case MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED: /* 0x0090 */ 8421 desc = "SAS: SMP Request Failed"; 8422 break; 8423 8424 case MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN: /* 0x0090 */ 8425 desc = "SAS: SMP Data Overrun"; 8426 break; 8427 8428 default: 8429 desc = "Others"; 8430 break; 8431 } 8432 8433 if (!desc) 8434 return; 8435 8436 dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s\n", 8437 ioc->name, status, desc)); 8438 } 8439 8440 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 8441 EXPORT_SYMBOL(mpt_attach); 8442 EXPORT_SYMBOL(mpt_detach); 8443 #ifdef CONFIG_PM 8444 EXPORT_SYMBOL(mpt_resume); 8445 EXPORT_SYMBOL(mpt_suspend); 8446 #endif 8447 EXPORT_SYMBOL(ioc_list); 8448 EXPORT_SYMBOL(mpt_register); 8449 EXPORT_SYMBOL(mpt_deregister); 8450 EXPORT_SYMBOL(mpt_event_register); 8451 EXPORT_SYMBOL(mpt_event_deregister); 8452 EXPORT_SYMBOL(mpt_reset_register); 8453 EXPORT_SYMBOL(mpt_reset_deregister); 8454 EXPORT_SYMBOL(mpt_device_driver_register); 8455 EXPORT_SYMBOL(mpt_device_driver_deregister); 8456 EXPORT_SYMBOL(mpt_get_msg_frame); 8457 EXPORT_SYMBOL(mpt_put_msg_frame); 8458 EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); 8459 EXPORT_SYMBOL(mpt_free_msg_frame); 8460 EXPORT_SYMBOL(mpt_send_handshake_request); 8461 EXPORT_SYMBOL(mpt_verify_adapter); 8462 EXPORT_SYMBOL(mpt_GetIocState); 8463 EXPORT_SYMBOL(mpt_print_ioc_summary); 8464 EXPORT_SYMBOL(mpt_HardResetHandler); 8465 EXPORT_SYMBOL(mpt_config); 8466 EXPORT_SYMBOL(mpt_findImVolumes); 8467 EXPORT_SYMBOL(mpt_alloc_fw_memory); 8468 EXPORT_SYMBOL(mpt_free_fw_memory); 8469 EXPORT_SYMBOL(mptbase_sas_persist_operation); 8470 EXPORT_SYMBOL(mpt_raid_phys_disk_pg0); 8471 8472 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 8473 /** 8474 * fusion_init - Fusion MPT base driver initialization routine. 8475 * 8476 * Returns 0 for success, non-zero for failure. 8477 */ 8478 static int __init 8479 fusion_init(void) 8480 { 8481 u8 cb_idx; 8482 8483 show_mptmod_ver(my_NAME, my_VERSION); 8484 printk(KERN_INFO COPYRIGHT "\n"); 8485 8486 for (cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) { 8487 MptCallbacks[cb_idx] = NULL; 8488 MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER; 8489 MptEvHandlers[cb_idx] = NULL; 8490 MptResetHandlers[cb_idx] = NULL; 8491 } 8492 8493 /* Register ourselves (mptbase) in order to facilitate 8494 * EventNotification handling. 8495 */ 8496 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER, 8497 "mptbase_reply"); 8498 8499 /* Register for hard reset handling callbacks. 8500 */ 8501 mpt_reset_register(mpt_base_index, mpt_ioc_reset); 8502 8503 #ifdef CONFIG_PROC_FS 8504 (void) procmpt_create(); 8505 #endif 8506 return 0; 8507 } 8508 8509 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 8510 /** 8511 * fusion_exit - Perform driver unload cleanup. 8512 * 8513 * This routine frees all resources associated with each MPT adapter 8514 * and removes all %MPT_PROCFS_MPTBASEDIR entries. 8515 */ 8516 static void __exit 8517 fusion_exit(void) 8518 { 8519 8520 mpt_reset_deregister(mpt_base_index); 8521 8522 #ifdef CONFIG_PROC_FS 8523 procmpt_destroy(); 8524 #endif 8525 } 8526 8527 module_init(fusion_init); 8528 module_exit(fusion_exit); 8529