1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * ipr.c -- driver for IBM Power Linux RAID adapters 4 * 5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2003, 2004 IBM Corporation 8 */ 9 10 /* 11 * Notes: 12 * 13 * This driver is used to control the following SCSI adapters: 14 * 15 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B 16 * 17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter 18 * PCI-X Dual Channel Ultra 320 SCSI Adapter 19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card 20 * Embedded SCSI adapter on p615 and p655 systems 21 * 22 * Supported Hardware Features: 23 * - Ultra 320 SCSI controller 24 * - PCI-X host interface 25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine 26 * - Non-Volatile Write Cache 27 * - Supports attachment of non-RAID disks, tape, and optical devices 28 * - RAID Levels 0, 5, 10 29 * - Hot spare 30 * - Background Parity Checking 31 * - Background Data Scrubbing 32 * - Ability to increase the capacity of an existing RAID 5 disk array 33 * by adding disks 34 * 35 * Driver Features: 36 * - Tagged command queuing 37 * - Adapter microcode download 38 * - PCI hot plug 39 * - SCSI device hot plug 40 * 41 */ 42 43 #include <linux/fs.h> 44 #include <linux/init.h> 45 #include <linux/types.h> 46 #include <linux/errno.h> 47 #include <linux/kernel.h> 48 #include <linux/slab.h> 49 #include <linux/vmalloc.h> 50 #include <linux/ioport.h> 51 #include <linux/delay.h> 52 #include <linux/pci.h> 53 #include <linux/wait.h> 54 #include <linux/spinlock.h> 55 #include <linux/sched.h> 56 #include <linux/interrupt.h> 57 #include <linux/blkdev.h> 58 #include <linux/firmware.h> 59 #include <linux/module.h> 60 #include <linux/moduleparam.h> 61 #include <linux/libata.h> 62 #include <linux/hdreg.h> 63 #include <linux/reboot.h> 64 #include <linux/stringify.h> 65 #include <asm/io.h> 66 #include <asm/irq.h> 67 #include <asm/processor.h> 68 #include <scsi/scsi.h> 69 #include <scsi/scsi_host.h> 70 #include <scsi/scsi_tcq.h> 71 #include <scsi/scsi_eh.h> 72 #include <scsi/scsi_cmnd.h> 73 #include "ipr.h" 74 75 /* 76 * Global Data 77 */ 78 static LIST_HEAD(ipr_ioa_head); 79 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; 80 static unsigned int ipr_max_speed = 1; 81 static int ipr_testmode = 0; 82 static unsigned int ipr_fastfail = 0; 83 static unsigned int ipr_transop_timeout = 0; 84 static unsigned int ipr_debug = 0; 85 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 86 static unsigned int ipr_dual_ioa_raid = 1; 87 static unsigned int ipr_number_of_msix = 16; 88 static unsigned int ipr_fast_reboot; 89 static DEFINE_SPINLOCK(ipr_driver_lock); 90 91 /* This table describes the differences between DMA controller chips */ 92 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 93 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ 94 .mailbox = 0x0042C, 95 .max_cmds = 100, 96 .cache_line_size = 0x20, 97 .clear_isr = 1, 98 .iopoll_weight = 0, 99 { 100 .set_interrupt_mask_reg = 0x0022C, 101 .clr_interrupt_mask_reg = 0x00230, 102 .clr_interrupt_mask_reg32 = 0x00230, 103 .sense_interrupt_mask_reg = 0x0022C, 104 .sense_interrupt_mask_reg32 = 0x0022C, 105 .clr_interrupt_reg = 0x00228, 106 .clr_interrupt_reg32 = 0x00228, 107 .sense_interrupt_reg = 0x00224, 108 .sense_interrupt_reg32 = 0x00224, 109 .ioarrin_reg = 0x00404, 110 .sense_uproc_interrupt_reg = 0x00214, 111 .sense_uproc_interrupt_reg32 = 0x00214, 112 .set_uproc_interrupt_reg = 0x00214, 113 .set_uproc_interrupt_reg32 = 0x00214, 114 .clr_uproc_interrupt_reg = 0x00218, 115 .clr_uproc_interrupt_reg32 = 0x00218 116 } 117 }, 118 { /* Snipe and Scamp */ 119 .mailbox = 0x0052C, 120 .max_cmds = 100, 121 .cache_line_size = 0x20, 122 .clear_isr = 1, 123 .iopoll_weight = 0, 124 { 125 .set_interrupt_mask_reg = 0x00288, 126 .clr_interrupt_mask_reg = 0x0028C, 127 .clr_interrupt_mask_reg32 = 0x0028C, 128 .sense_interrupt_mask_reg = 0x00288, 129 .sense_interrupt_mask_reg32 = 0x00288, 130 .clr_interrupt_reg = 0x00284, 131 .clr_interrupt_reg32 = 0x00284, 132 .sense_interrupt_reg = 0x00280, 133 .sense_interrupt_reg32 = 0x00280, 134 .ioarrin_reg = 0x00504, 135 .sense_uproc_interrupt_reg = 0x00290, 136 .sense_uproc_interrupt_reg32 = 0x00290, 137 .set_uproc_interrupt_reg = 0x00290, 138 .set_uproc_interrupt_reg32 = 0x00290, 139 .clr_uproc_interrupt_reg = 0x00294, 140 .clr_uproc_interrupt_reg32 = 0x00294 141 } 142 }, 143 { /* CRoC */ 144 .mailbox = 0x00044, 145 .max_cmds = 1000, 146 .cache_line_size = 0x20, 147 .clear_isr = 0, 148 .iopoll_weight = 64, 149 { 150 .set_interrupt_mask_reg = 0x00010, 151 .clr_interrupt_mask_reg = 0x00018, 152 .clr_interrupt_mask_reg32 = 0x0001C, 153 .sense_interrupt_mask_reg = 0x00010, 154 .sense_interrupt_mask_reg32 = 0x00014, 155 .clr_interrupt_reg = 0x00008, 156 .clr_interrupt_reg32 = 0x0000C, 157 .sense_interrupt_reg = 0x00000, 158 .sense_interrupt_reg32 = 0x00004, 159 .ioarrin_reg = 0x00070, 160 .sense_uproc_interrupt_reg = 0x00020, 161 .sense_uproc_interrupt_reg32 = 0x00024, 162 .set_uproc_interrupt_reg = 0x00020, 163 .set_uproc_interrupt_reg32 = 0x00024, 164 .clr_uproc_interrupt_reg = 0x00028, 165 .clr_uproc_interrupt_reg32 = 0x0002C, 166 .init_feedback_reg = 0x0005C, 167 .dump_addr_reg = 0x00064, 168 .dump_data_reg = 0x00068, 169 .endian_swap_reg = 0x00084 170 } 171 }, 172 }; 173 174 static const struct ipr_chip_t ipr_chip[] = { 175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 185 }; 186 187 static int ipr_max_bus_speeds[] = { 188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE 189 }; 190 191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>"); 192 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); 193 module_param_named(max_speed, ipr_max_speed, uint, 0); 194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); 195 module_param_named(log_level, ipr_log_level, uint, 0); 196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); 197 module_param_named(testmode, ipr_testmode, int, 0); 198 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); 199 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); 200 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 201 module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 202 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 203 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 204 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 205 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 206 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 207 module_param_named(max_devs, ipr_max_devs, int, 0); 208 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 210 module_param_named(number_of_msix, ipr_number_of_msix, int, 0); 211 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)"); 212 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); 213 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); 214 MODULE_LICENSE("GPL"); 215 MODULE_VERSION(IPR_DRIVER_VERSION); 216 217 /* A constant array of IOASCs/URCs/Error Messages */ 218 static const 219 struct ipr_error_table_t ipr_error_table[] = { 220 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, 221 "8155: An unknown error was received"}, 222 {0x00330000, 0, 0, 223 "Soft underlength error"}, 224 {0x005A0000, 0, 0, 225 "Command to be cancelled not found"}, 226 {0x00808000, 0, 0, 227 "Qualified success"}, 228 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, 229 "FFFE: Soft device bus error recovered by the IOA"}, 230 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 231 "4101: Soft device bus fabric error"}, 232 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, 233 "FFFC: Logical block guard error recovered by the device"}, 234 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, 235 "FFFC: Logical block reference tag error recovered by the device"}, 236 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, 237 "4171: Recovered scatter list tag / sequence number error"}, 238 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, 239 "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, 240 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, 241 "4171: Recovered logical block sequence number error on IOA to Host transfer"}, 242 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, 243 "FFFD: Recovered logical block reference tag error detected by the IOA"}, 244 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, 245 "FFFD: Logical block guard error recovered by the IOA"}, 246 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 247 "FFF9: Device sector reassign successful"}, 248 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 249 "FFF7: Media error recovered by device rewrite procedures"}, 250 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, 251 "7001: IOA sector reassignment successful"}, 252 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, 253 "FFF9: Soft media error. Sector reassignment recommended"}, 254 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, 255 "FFF7: Media error recovered by IOA rewrite procedures"}, 256 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, 257 "FF3D: Soft PCI bus error recovered by the IOA"}, 258 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, 259 "FFF6: Device hardware error recovered by the IOA"}, 260 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, 261 "FFF6: Device hardware error recovered by the device"}, 262 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, 263 "FF3D: Soft IOA error recovered by the IOA"}, 264 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, 265 "FFFA: Undefined device response recovered by the IOA"}, 266 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, 267 "FFF6: Device bus error, message or command phase"}, 268 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, 269 "FFFE: Task Management Function failed"}, 270 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, 271 "FFF6: Failure prediction threshold exceeded"}, 272 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, 273 "8009: Impending cache battery pack failure"}, 274 {0x02040100, 0, 0, 275 "Logical Unit in process of becoming ready"}, 276 {0x02040200, 0, 0, 277 "Initializing command required"}, 278 {0x02040400, 0, 0, 279 "34FF: Disk device format in progress"}, 280 {0x02040C00, 0, 0, 281 "Logical unit not accessible, target port in unavailable state"}, 282 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, 283 "9070: IOA requested reset"}, 284 {0x023F0000, 0, 0, 285 "Synchronization required"}, 286 {0x02408500, 0, 0, 287 "IOA microcode download required"}, 288 {0x02408600, 0, 0, 289 "Device bus connection is prohibited by host"}, 290 {0x024E0000, 0, 0, 291 "No ready, IOA shutdown"}, 292 {0x025A0000, 0, 0, 293 "Not ready, IOA has been shutdown"}, 294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, 295 "3020: Storage subsystem configuration error"}, 296 {0x03110B00, 0, 0, 297 "FFF5: Medium error, data unreadable, recommend reassign"}, 298 {0x03110C00, 0, 0, 299 "7000: Medium error, data unreadable, do not reassign"}, 300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, 301 "FFF3: Disk media format bad"}, 302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, 303 "3002: Addressed device failed to respond to selection"}, 304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, 305 "3100: Device bus error"}, 306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, 307 "3109: IOA timed out a device command"}, 308 {0x04088000, 0, 0, 309 "3120: SCSI bus is not operational"}, 310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 311 "4100: Hard device bus fabric error"}, 312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, 313 "310C: Logical block guard error detected by the device"}, 314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, 315 "310C: Logical block reference tag error detected by the device"}, 316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, 317 "4170: Scatter list tag / sequence number error"}, 318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, 319 "8150: Logical block CRC error on IOA to Host transfer"}, 320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, 321 "4170: Logical block sequence number error on IOA to Host transfer"}, 322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, 323 "310D: Logical block reference tag error detected by the IOA"}, 324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, 325 "310D: Logical block guard error detected by the IOA"}, 326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 327 "9000: IOA reserved area data check"}, 328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 329 "9001: IOA reserved area invalid data pattern"}, 330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 331 "9002: IOA reserved area LRC error"}, 332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, 333 "Hardware Error, IOA metadata access error"}, 334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 335 "102E: Out of alternate sectors for disk storage"}, 336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 337 "FFF4: Data transfer underlength error"}, 338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, 339 "FFF4: Data transfer overlength error"}, 340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, 341 "3400: Logical unit failure"}, 342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, 343 "FFF4: Device microcode is corrupt"}, 344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, 345 "8150: PCI bus error"}, 346 {0x04430000, 1, 0, 347 "Unsupported device bus message received"}, 348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, 349 "FFF4: Disk device problem"}, 350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, 351 "8150: Permanent IOA failure"}, 352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, 353 "3010: Disk device returned wrong response to IOA"}, 354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, 355 "8151: IOA microcode error"}, 356 {0x04448500, 0, 0, 357 "Device bus status error"}, 358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, 359 "8157: IOA error requiring IOA reset to recover"}, 360 {0x04448700, 0, 0, 361 "ATA device status error"}, 362 {0x04490000, 0, 0, 363 "Message reject received from the device"}, 364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, 365 "8008: A permanent cache battery pack failure occurred"}, 366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, 367 "9090: Disk unit has been modified after the last known status"}, 368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, 369 "9081: IOA detected device error"}, 370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, 371 "9082: IOA detected device error"}, 372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, 373 "3110: Device bus error, message or command phase"}, 374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, 375 "3110: SAS Command / Task Management Function failed"}, 376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, 377 "9091: Incorrect hardware configuration change has been detected"}, 378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, 379 "9073: Invalid multi-adapter configuration"}, 380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, 381 "4010: Incorrect connection between cascaded expanders"}, 382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, 383 "4020: Connections exceed IOA design limits"}, 384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, 385 "4030: Incorrect multipath connection"}, 386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, 387 "4110: Unsupported enclosure function"}, 388 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, 389 "4120: SAS cable VPD cannot be read"}, 390 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, 391 "FFF4: Command to logical unit failed"}, 392 {0x05240000, 1, 0, 393 "Illegal request, invalid request type or request packet"}, 394 {0x05250000, 0, 0, 395 "Illegal request, invalid resource handle"}, 396 {0x05258000, 0, 0, 397 "Illegal request, commands not allowed to this device"}, 398 {0x05258100, 0, 0, 399 "Illegal request, command not allowed to a secondary adapter"}, 400 {0x05258200, 0, 0, 401 "Illegal request, command not allowed to a non-optimized resource"}, 402 {0x05260000, 0, 0, 403 "Illegal request, invalid field in parameter list"}, 404 {0x05260100, 0, 0, 405 "Illegal request, parameter not supported"}, 406 {0x05260200, 0, 0, 407 "Illegal request, parameter value invalid"}, 408 {0x052C0000, 0, 0, 409 "Illegal request, command sequence error"}, 410 {0x052C8000, 1, 0, 411 "Illegal request, dual adapter support not enabled"}, 412 {0x052C8100, 1, 0, 413 "Illegal request, another cable connector was physically disabled"}, 414 {0x054E8000, 1, 0, 415 "Illegal request, inconsistent group id/group count"}, 416 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, 417 "9031: Array protection temporarily suspended, protection resuming"}, 418 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, 419 "9040: Array protection temporarily suspended, protection resuming"}, 420 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, 421 "4080: IOA exceeded maximum operating temperature"}, 422 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, 423 "4085: Service required"}, 424 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL, 425 "4086: SAS Adapter Hardware Configuration Error"}, 426 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, 427 "3140: Device bus not ready to ready transition"}, 428 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, 429 "FFFB: SCSI bus was reset"}, 430 {0x06290500, 0, 0, 431 "FFFE: SCSI bus transition to single ended"}, 432 {0x06290600, 0, 0, 433 "FFFE: SCSI bus transition to LVD"}, 434 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, 435 "FFFB: SCSI bus was reset by another initiator"}, 436 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, 437 "3029: A device replacement has occurred"}, 438 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, 439 "4102: Device bus fabric performance degradation"}, 440 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, 441 "9051: IOA cache data exists for a missing or failed device"}, 442 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, 443 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 444 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, 445 "9025: Disk unit is not supported at its physical location"}, 446 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, 447 "3020: IOA detected a SCSI bus configuration error"}, 448 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, 449 "3150: SCSI bus configuration error"}, 450 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, 451 "9074: Asymmetric advanced function disk configuration"}, 452 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, 453 "4040: Incomplete multipath connection between IOA and enclosure"}, 454 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, 455 "4041: Incomplete multipath connection between enclosure and device"}, 456 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, 457 "9075: Incomplete multipath connection between IOA and remote IOA"}, 458 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, 459 "9076: Configuration error, missing remote IOA"}, 460 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, 461 "4050: Enclosure does not support a required multipath function"}, 462 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, 463 "4121: Configuration error, required cable is missing"}, 464 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, 465 "4122: Cable is not plugged into the correct location on remote IOA"}, 466 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, 467 "4123: Configuration error, invalid cable vital product data"}, 468 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, 469 "4124: Configuration error, both cable ends are plugged into the same IOA"}, 470 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, 471 "4070: Logically bad block written on device"}, 472 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, 473 "9041: Array protection temporarily suspended"}, 474 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, 475 "9042: Corrupt array parity detected on specified device"}, 476 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, 477 "9030: Array no longer protected due to missing or failed disk unit"}, 478 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, 479 "9071: Link operational transition"}, 480 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, 481 "9072: Link not operational transition"}, 482 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, 483 "9032: Array exposed but still protected"}, 484 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL, 485 "70DD: Device forced failed by disrupt device command"}, 486 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, 487 "4061: Multipath redundancy level got better"}, 488 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, 489 "4060: Multipath redundancy level got worse"}, 490 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL, 491 "9083: Device raw mode enabled"}, 492 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL, 493 "9084: Device raw mode disabled"}, 494 {0x07270000, 0, 0, 495 "Failure due to other device"}, 496 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, 497 "9008: IOA does not support functions expected by devices"}, 498 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, 499 "9010: Cache data associated with attached devices cannot be found"}, 500 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, 501 "9011: Cache data belongs to devices other than those attached"}, 502 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, 503 "9020: Array missing 2 or more devices with only 1 device present"}, 504 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, 505 "9021: Array missing 2 or more devices with 2 or more devices present"}, 506 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, 507 "9022: Exposed array is missing a required device"}, 508 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, 509 "9023: Array member(s) not at required physical locations"}, 510 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, 511 "9024: Array not functional due to present hardware configuration"}, 512 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, 513 "9026: Array not functional due to present hardware configuration"}, 514 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, 515 "9027: Array is missing a device and parity is out of sync"}, 516 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, 517 "9028: Maximum number of arrays already exist"}, 518 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, 519 "9050: Required cache data cannot be located for a disk unit"}, 520 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, 521 "9052: Cache data exists for a device that has been modified"}, 522 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, 523 "9054: IOA resources not available due to previous problems"}, 524 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, 525 "9092: Disk unit requires initialization before use"}, 526 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, 527 "9029: Incorrect hardware configuration change has been detected"}, 528 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, 529 "9060: One or more disk pairs are missing from an array"}, 530 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, 531 "9061: One or more disks are missing from an array"}, 532 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, 533 "9062: One or more disks are missing from an array"}, 534 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, 535 "9063: Maximum number of functional arrays has been exceeded"}, 536 {0x07279A00, 0, 0, 537 "Data protect, other volume set problem"}, 538 {0x0B260000, 0, 0, 539 "Aborted command, invalid descriptor"}, 540 {0x0B3F9000, 0, 0, 541 "Target operating conditions have changed, dual adapter takeover"}, 542 {0x0B530200, 0, 0, 543 "Aborted command, medium removal prevented"}, 544 {0x0B5A0000, 0, 0, 545 "Command terminated by host"}, 546 {0x0B5B8000, 0, 0, 547 "Aborted command, command terminated by host"} 548 }; 549 550 static const struct ipr_ses_table_entry ipr_ses_table[] = { 551 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, 552 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, 553 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ 554 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ 555 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ 556 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ 557 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, 558 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, 559 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 560 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 561 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, 562 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, 563 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } 564 }; 565 566 /* 567 * Function Prototypes 568 */ 569 static int ipr_reset_alert(struct ipr_cmnd *); 570 static void ipr_process_ccn(struct ipr_cmnd *); 571 static void ipr_process_error(struct ipr_cmnd *); 572 static void ipr_reset_ioa_job(struct ipr_cmnd *); 573 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, 574 enum ipr_shutdown_type); 575 576 #ifdef CONFIG_SCSI_IPR_TRACE 577 /** 578 * ipr_trc_hook - Add a trace entry to the driver trace 579 * @ipr_cmd: ipr command struct 580 * @type: trace type 581 * @add_data: additional data 582 * 583 * Return value: 584 * none 585 **/ 586 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, 587 u8 type, u32 add_data) 588 { 589 struct ipr_trace_entry *trace_entry; 590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 591 unsigned int trace_index; 592 593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; 594 trace_entry = &ioa_cfg->trace[trace_index]; 595 trace_entry->time = jiffies; 596 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 597 trace_entry->type = type; 598 if (ipr_cmd->ioa_cfg->sis64) 599 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; 600 else 601 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; 602 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 603 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 604 trace_entry->u.add_data = add_data; 605 wmb(); 606 } 607 #else 608 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) 609 #endif 610 611 /** 612 * ipr_lock_and_done - Acquire lock and complete command 613 * @ipr_cmd: ipr command struct 614 * 615 * Return value: 616 * none 617 **/ 618 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) 619 { 620 unsigned long lock_flags; 621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 622 623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 624 ipr_cmd->done(ipr_cmd); 625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 626 } 627 628 /** 629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 630 * @ipr_cmd: ipr command struct 631 * 632 * Return value: 633 * none 634 **/ 635 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 636 { 637 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 638 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 639 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 640 dma_addr_t dma_addr = ipr_cmd->dma_addr; 641 int hrrq_id; 642 643 hrrq_id = ioarcb->cmd_pkt.hrrq_id; 644 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 645 ioarcb->cmd_pkt.hrrq_id = hrrq_id; 646 ioarcb->data_transfer_length = 0; 647 ioarcb->read_data_transfer_length = 0; 648 ioarcb->ioadl_len = 0; 649 ioarcb->read_ioadl_len = 0; 650 651 if (ipr_cmd->ioa_cfg->sis64) { 652 ioarcb->u.sis64_addr_data.data_ioadl_addr = 653 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 654 ioasa64->u.gata.status = 0; 655 } else { 656 ioarcb->write_ioadl_addr = 657 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 658 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 659 ioasa->u.gata.status = 0; 660 } 661 662 ioasa->hdr.ioasc = 0; 663 ioasa->hdr.residual_data_len = 0; 664 ipr_cmd->scsi_cmd = NULL; 665 ipr_cmd->qc = NULL; 666 ipr_cmd->sense_buffer[0] = 0; 667 ipr_cmd->dma_use_sg = 0; 668 } 669 670 /** 671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block 672 * @ipr_cmd: ipr command struct 673 * @fast_done: fast done function call-back 674 * 675 * Return value: 676 * none 677 **/ 678 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, 679 void (*fast_done) (struct ipr_cmnd *)) 680 { 681 ipr_reinit_ipr_cmnd(ipr_cmd); 682 ipr_cmd->u.scratch = 0; 683 ipr_cmd->sibling = NULL; 684 ipr_cmd->eh_comp = NULL; 685 ipr_cmd->fast_done = fast_done; 686 timer_setup(&ipr_cmd->timer, NULL, 0); 687 } 688 689 /** 690 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 691 * @hrrq: hrr queue 692 * 693 * Return value: 694 * pointer to ipr command struct 695 **/ 696 static 697 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) 698 { 699 struct ipr_cmnd *ipr_cmd = NULL; 700 701 if (likely(!list_empty(&hrrq->hrrq_free_q))) { 702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, 703 struct ipr_cmnd, queue); 704 list_del(&ipr_cmd->queue); 705 } 706 707 708 return ipr_cmd; 709 } 710 711 /** 712 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it 713 * @ioa_cfg: ioa config struct 714 * 715 * Return value: 716 * pointer to ipr command struct 717 **/ 718 static 719 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 720 { 721 struct ipr_cmnd *ipr_cmd = 722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); 723 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 724 return ipr_cmd; 725 } 726 727 /** 728 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 729 * @ioa_cfg: ioa config struct 730 * @clr_ints: interrupts to clear 731 * 732 * This function masks all interrupts on the adapter, then clears the 733 * interrupts specified in the mask 734 * 735 * Return value: 736 * none 737 **/ 738 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, 739 u32 clr_ints) 740 { 741 int i; 742 743 /* Stop new interrupts */ 744 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 745 spin_lock(&ioa_cfg->hrrq[i]._lock); 746 ioa_cfg->hrrq[i].allow_interrupts = 0; 747 spin_unlock(&ioa_cfg->hrrq[i]._lock); 748 } 749 750 /* Set interrupt mask to stop all new interrupts */ 751 if (ioa_cfg->sis64) 752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); 753 else 754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 755 756 /* Clear any pending interrupts */ 757 if (ioa_cfg->sis64) 758 writel(~0, ioa_cfg->regs.clr_interrupt_reg); 759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); 760 readl(ioa_cfg->regs.sense_interrupt_reg); 761 } 762 763 /** 764 * ipr_save_pcix_cmd_reg - Save PCI-X command register 765 * @ioa_cfg: ioa config struct 766 * 767 * Return value: 768 * 0 on success / -EIO on failure 769 **/ 770 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 771 { 772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 773 774 if (pcix_cmd_reg == 0) 775 return 0; 776 777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); 780 return -EIO; 781 } 782 783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; 784 return 0; 785 } 786 787 /** 788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register 789 * @ioa_cfg: ioa config struct 790 * 791 * Return value: 792 * 0 on success / -EIO on failure 793 **/ 794 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 795 { 796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 797 798 if (pcix_cmd_reg) { 799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); 802 return -EIO; 803 } 804 } 805 806 return 0; 807 } 808 809 /** 810 * __ipr_sata_eh_done - done function for aborted SATA commands 811 * @ipr_cmd: ipr command struct 812 * 813 * This function is invoked for ops generated to SATA 814 * devices which are being aborted. 815 * 816 * Return value: 817 * none 818 **/ 819 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 820 { 821 struct ata_queued_cmd *qc = ipr_cmd->qc; 822 struct ipr_sata_port *sata_port = qc->ap->private_data; 823 824 qc->err_mask |= AC_ERR_OTHER; 825 sata_port->ioasa.status |= ATA_BUSY; 826 ata_qc_complete(qc); 827 if (ipr_cmd->eh_comp) 828 complete(ipr_cmd->eh_comp); 829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 830 } 831 832 /** 833 * ipr_sata_eh_done - done function for aborted SATA commands 834 * @ipr_cmd: ipr command struct 835 * 836 * This function is invoked for ops generated to SATA 837 * devices which are being aborted. 838 * 839 * Return value: 840 * none 841 **/ 842 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 843 { 844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 845 unsigned long hrrq_flags; 846 847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 848 __ipr_sata_eh_done(ipr_cmd); 849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 850 } 851 852 /** 853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops 854 * @ipr_cmd: ipr command struct 855 * 856 * This function is invoked by the interrupt handler for 857 * ops generated by the SCSI mid-layer which are being aborted. 858 * 859 * Return value: 860 * none 861 **/ 862 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 863 { 864 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 865 866 scsi_cmd->result |= (DID_ERROR << 16); 867 868 scsi_dma_unmap(ipr_cmd->scsi_cmd); 869 scsi_done(scsi_cmd); 870 if (ipr_cmd->eh_comp) 871 complete(ipr_cmd->eh_comp); 872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 873 } 874 875 /** 876 * ipr_scsi_eh_done - mid-layer done function for aborted ops 877 * @ipr_cmd: ipr command struct 878 * 879 * This function is invoked by the interrupt handler for 880 * ops generated by the SCSI mid-layer which are being aborted. 881 * 882 * Return value: 883 * none 884 **/ 885 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 886 { 887 unsigned long hrrq_flags; 888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 889 890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 891 __ipr_scsi_eh_done(ipr_cmd); 892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 893 } 894 895 /** 896 * ipr_fail_all_ops - Fails all outstanding ops. 897 * @ioa_cfg: ioa config struct 898 * 899 * This function fails all outstanding ops. 900 * 901 * Return value: 902 * none 903 **/ 904 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 905 { 906 struct ipr_cmnd *ipr_cmd, *temp; 907 struct ipr_hrr_queue *hrrq; 908 909 ENTER; 910 for_each_hrrq(hrrq, ioa_cfg) { 911 spin_lock(&hrrq->_lock); 912 list_for_each_entry_safe(ipr_cmd, 913 temp, &hrrq->hrrq_pending_q, queue) { 914 list_del(&ipr_cmd->queue); 915 916 ipr_cmd->s.ioasa.hdr.ioasc = 917 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 918 ipr_cmd->s.ioasa.hdr.ilid = 919 cpu_to_be32(IPR_DRIVER_ILID); 920 921 if (ipr_cmd->scsi_cmd) 922 ipr_cmd->done = __ipr_scsi_eh_done; 923 else if (ipr_cmd->qc) 924 ipr_cmd->done = __ipr_sata_eh_done; 925 926 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, 927 IPR_IOASC_IOA_WAS_RESET); 928 del_timer(&ipr_cmd->timer); 929 ipr_cmd->done(ipr_cmd); 930 } 931 spin_unlock(&hrrq->_lock); 932 } 933 LEAVE; 934 } 935 936 /** 937 * ipr_send_command - Send driver initiated requests. 938 * @ipr_cmd: ipr command struct 939 * 940 * This function sends a command to the adapter using the correct write call. 941 * In the case of sis64, calculate the ioarcb size required. Then or in the 942 * appropriate bits. 943 * 944 * Return value: 945 * none 946 **/ 947 static void ipr_send_command(struct ipr_cmnd *ipr_cmd) 948 { 949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 950 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; 951 952 if (ioa_cfg->sis64) { 953 /* The default size is 256 bytes */ 954 send_dma_addr |= 0x1; 955 956 /* If the number of ioadls * size of ioadl > 128 bytes, 957 then use a 512 byte ioarcb */ 958 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) 959 send_dma_addr |= 0x4; 960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 961 } else 962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 963 } 964 965 /** 966 * ipr_do_req - Send driver initiated requests. 967 * @ipr_cmd: ipr command struct 968 * @done: done function 969 * @timeout_func: timeout function 970 * @timeout: timeout value 971 * 972 * This function sends the specified command to the adapter with the 973 * timeout given. The done function is invoked on command completion. 974 * 975 * Return value: 976 * none 977 **/ 978 static void ipr_do_req(struct ipr_cmnd *ipr_cmd, 979 void (*done) (struct ipr_cmnd *), 980 void (*timeout_func) (struct timer_list *), u32 timeout) 981 { 982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 983 984 ipr_cmd->done = done; 985 986 ipr_cmd->timer.expires = jiffies + timeout; 987 ipr_cmd->timer.function = timeout_func; 988 989 add_timer(&ipr_cmd->timer); 990 991 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 992 993 ipr_send_command(ipr_cmd); 994 } 995 996 /** 997 * ipr_internal_cmd_done - Op done function for an internally generated op. 998 * @ipr_cmd: ipr command struct 999 * 1000 * This function is the op done function for an internally generated, 1001 * blocking op. It simply wakes the sleeping thread. 1002 * 1003 * Return value: 1004 * none 1005 **/ 1006 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) 1007 { 1008 if (ipr_cmd->sibling) 1009 ipr_cmd->sibling = NULL; 1010 else 1011 complete(&ipr_cmd->completion); 1012 } 1013 1014 /** 1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type 1016 * @ipr_cmd: ipr command struct 1017 * @dma_addr: dma address 1018 * @len: transfer length 1019 * @flags: ioadl flag value 1020 * 1021 * This function initializes an ioadl in the case where there is only a single 1022 * descriptor. 1023 * 1024 * Return value: 1025 * nothing 1026 **/ 1027 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, 1028 u32 len, int flags) 1029 { 1030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 1031 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 1032 1033 ipr_cmd->dma_use_sg = 1; 1034 1035 if (ipr_cmd->ioa_cfg->sis64) { 1036 ioadl64->flags = cpu_to_be32(flags); 1037 ioadl64->data_len = cpu_to_be32(len); 1038 ioadl64->address = cpu_to_be64(dma_addr); 1039 1040 ipr_cmd->ioarcb.ioadl_len = 1041 cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); 1042 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1043 } else { 1044 ioadl->flags_and_data_len = cpu_to_be32(flags | len); 1045 ioadl->address = cpu_to_be32(dma_addr); 1046 1047 if (flags == IPR_IOADL_FLAGS_READ_LAST) { 1048 ipr_cmd->ioarcb.read_ioadl_len = 1049 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1050 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); 1051 } else { 1052 ipr_cmd->ioarcb.ioadl_len = 1053 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1054 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1055 } 1056 } 1057 } 1058 1059 /** 1060 * ipr_send_blocking_cmd - Send command and sleep on its completion. 1061 * @ipr_cmd: ipr command struct 1062 * @timeout_func: function to invoke if command times out 1063 * @timeout: timeout 1064 * 1065 * Return value: 1066 * none 1067 **/ 1068 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, 1069 void (*timeout_func) (struct timer_list *), 1070 u32 timeout) 1071 { 1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1073 1074 init_completion(&ipr_cmd->completion); 1075 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); 1076 1077 spin_unlock_irq(ioa_cfg->host->host_lock); 1078 wait_for_completion(&ipr_cmd->completion); 1079 spin_lock_irq(ioa_cfg->host->host_lock); 1080 } 1081 1082 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1083 { 1084 unsigned int hrrq; 1085 1086 if (ioa_cfg->hrrq_num == 1) 1087 hrrq = 0; 1088 else { 1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); 1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; 1091 } 1092 return hrrq; 1093 } 1094 1095 /** 1096 * ipr_send_hcam - Send an HCAM to the adapter. 1097 * @ioa_cfg: ioa config struct 1098 * @type: HCAM type 1099 * @hostrcb: hostrcb struct 1100 * 1101 * This function will send a Host Controlled Async command to the adapter. 1102 * If HCAMs are currently not allowed to be issued to the adapter, it will 1103 * place the hostrcb on the free queue. 1104 * 1105 * Return value: 1106 * none 1107 **/ 1108 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, 1109 struct ipr_hostrcb *hostrcb) 1110 { 1111 struct ipr_cmnd *ipr_cmd; 1112 struct ipr_ioarcb *ioarcb; 1113 1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 1115 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1118 1119 ipr_cmd->u.hostrcb = hostrcb; 1120 ioarcb = &ipr_cmd->ioarcb; 1121 1122 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 1123 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; 1124 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; 1125 ioarcb->cmd_pkt.cdb[1] = type; 1126 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 1127 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 1128 1129 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, 1130 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); 1131 1132 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 1133 ipr_cmd->done = ipr_process_ccn; 1134 else 1135 ipr_cmd->done = ipr_process_error; 1136 1137 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 1138 1139 ipr_send_command(ipr_cmd); 1140 } else { 1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 1142 } 1143 } 1144 1145 /** 1146 * ipr_update_ata_class - Update the ata class in the resource entry 1147 * @res: resource entry struct 1148 * @proto: cfgte device bus protocol value 1149 * 1150 * Return value: 1151 * none 1152 **/ 1153 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) 1154 { 1155 switch (proto) { 1156 case IPR_PROTO_SATA: 1157 case IPR_PROTO_SAS_STP: 1158 res->ata_class = ATA_DEV_ATA; 1159 break; 1160 case IPR_PROTO_SATA_ATAPI: 1161 case IPR_PROTO_SAS_STP_ATAPI: 1162 res->ata_class = ATA_DEV_ATAPI; 1163 break; 1164 default: 1165 res->ata_class = ATA_DEV_UNKNOWN; 1166 break; 1167 } 1168 } 1169 1170 /** 1171 * ipr_init_res_entry - Initialize a resource entry struct. 1172 * @res: resource entry struct 1173 * @cfgtew: config table entry wrapper struct 1174 * 1175 * Return value: 1176 * none 1177 **/ 1178 static void ipr_init_res_entry(struct ipr_resource_entry *res, 1179 struct ipr_config_table_entry_wrapper *cfgtew) 1180 { 1181 int found = 0; 1182 unsigned int proto; 1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1184 struct ipr_resource_entry *gscsi_res = NULL; 1185 1186 res->needs_sync_complete = 0; 1187 res->in_erp = 0; 1188 res->add_to_ml = 0; 1189 res->del_from_ml = 0; 1190 res->resetting_device = 0; 1191 res->reset_occurred = 0; 1192 res->sdev = NULL; 1193 res->sata_port = NULL; 1194 1195 if (ioa_cfg->sis64) { 1196 proto = cfgtew->u.cfgte64->proto; 1197 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); 1198 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); 1199 res->qmodel = IPR_QUEUEING_MODEL64(res); 1200 res->type = cfgtew->u.cfgte64->res_type; 1201 1202 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1203 sizeof(res->res_path)); 1204 1205 res->bus = 0; 1206 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1207 sizeof(res->dev_lun.scsi_lun)); 1208 res->lun = scsilun_to_int(&res->dev_lun); 1209 1210 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { 1212 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { 1213 found = 1; 1214 res->target = gscsi_res->target; 1215 break; 1216 } 1217 } 1218 if (!found) { 1219 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1220 ioa_cfg->max_devs_supported); 1221 set_bit(res->target, ioa_cfg->target_ids); 1222 } 1223 } else if (res->type == IPR_RES_TYPE_IOAFP) { 1224 res->bus = IPR_IOAFP_VIRTUAL_BUS; 1225 res->target = 0; 1226 } else if (res->type == IPR_RES_TYPE_ARRAY) { 1227 res->bus = IPR_ARRAY_VIRTUAL_BUS; 1228 res->target = find_first_zero_bit(ioa_cfg->array_ids, 1229 ioa_cfg->max_devs_supported); 1230 set_bit(res->target, ioa_cfg->array_ids); 1231 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { 1232 res->bus = IPR_VSET_VIRTUAL_BUS; 1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids, 1234 ioa_cfg->max_devs_supported); 1235 set_bit(res->target, ioa_cfg->vset_ids); 1236 } else { 1237 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1238 ioa_cfg->max_devs_supported); 1239 set_bit(res->target, ioa_cfg->target_ids); 1240 } 1241 } else { 1242 proto = cfgtew->u.cfgte->proto; 1243 res->qmodel = IPR_QUEUEING_MODEL(res); 1244 res->flags = cfgtew->u.cfgte->flags; 1245 if (res->flags & IPR_IS_IOA_RESOURCE) 1246 res->type = IPR_RES_TYPE_IOAFP; 1247 else 1248 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1249 1250 res->bus = cfgtew->u.cfgte->res_addr.bus; 1251 res->target = cfgtew->u.cfgte->res_addr.target; 1252 res->lun = cfgtew->u.cfgte->res_addr.lun; 1253 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); 1254 } 1255 1256 ipr_update_ata_class(res, proto); 1257 } 1258 1259 /** 1260 * ipr_is_same_device - Determine if two devices are the same. 1261 * @res: resource entry struct 1262 * @cfgtew: config table entry wrapper struct 1263 * 1264 * Return value: 1265 * 1 if the devices are the same / 0 otherwise 1266 **/ 1267 static int ipr_is_same_device(struct ipr_resource_entry *res, 1268 struct ipr_config_table_entry_wrapper *cfgtew) 1269 { 1270 if (res->ioa_cfg->sis64) { 1271 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 1272 sizeof(cfgtew->u.cfgte64->dev_id)) && 1273 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1274 sizeof(cfgtew->u.cfgte64->lun))) { 1275 return 1; 1276 } 1277 } else { 1278 if (res->bus == cfgtew->u.cfgte->res_addr.bus && 1279 res->target == cfgtew->u.cfgte->res_addr.target && 1280 res->lun == cfgtew->u.cfgte->res_addr.lun) 1281 return 1; 1282 } 1283 1284 return 0; 1285 } 1286 1287 /** 1288 * __ipr_format_res_path - Format the resource path for printing. 1289 * @res_path: resource path 1290 * @buffer: buffer 1291 * @len: length of buffer provided 1292 * 1293 * Return value: 1294 * pointer to buffer 1295 **/ 1296 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len) 1297 { 1298 int i; 1299 char *p = buffer; 1300 1301 *p = '\0'; 1302 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); 1303 for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++) 1304 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); 1305 1306 return buffer; 1307 } 1308 1309 /** 1310 * ipr_format_res_path - Format the resource path for printing. 1311 * @ioa_cfg: ioa config struct 1312 * @res_path: resource path 1313 * @buffer: buffer 1314 * @len: length of buffer provided 1315 * 1316 * Return value: 1317 * pointer to buffer 1318 **/ 1319 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, 1320 u8 *res_path, char *buffer, int len) 1321 { 1322 char *p = buffer; 1323 1324 *p = '\0'; 1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); 1326 __ipr_format_res_path(res_path, p, len - (p - buffer)); 1327 return buffer; 1328 } 1329 1330 /** 1331 * ipr_update_res_entry - Update the resource entry. 1332 * @res: resource entry struct 1333 * @cfgtew: config table entry wrapper struct 1334 * 1335 * Return value: 1336 * none 1337 **/ 1338 static void ipr_update_res_entry(struct ipr_resource_entry *res, 1339 struct ipr_config_table_entry_wrapper *cfgtew) 1340 { 1341 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1342 unsigned int proto; 1343 int new_path = 0; 1344 1345 if (res->ioa_cfg->sis64) { 1346 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); 1347 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); 1348 res->type = cfgtew->u.cfgte64->res_type; 1349 1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1351 sizeof(struct ipr_std_inq_data)); 1352 1353 res->qmodel = IPR_QUEUEING_MODEL64(res); 1354 proto = cfgtew->u.cfgte64->proto; 1355 res->res_handle = cfgtew->u.cfgte64->res_handle; 1356 res->dev_id = cfgtew->u.cfgte64->dev_id; 1357 1358 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1359 sizeof(res->dev_lun.scsi_lun)); 1360 1361 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, 1362 sizeof(res->res_path))) { 1363 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1364 sizeof(res->res_path)); 1365 new_path = 1; 1366 } 1367 1368 if (res->sdev && new_path) 1369 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1370 ipr_format_res_path(res->ioa_cfg, 1371 res->res_path, buffer, sizeof(buffer))); 1372 } else { 1373 res->flags = cfgtew->u.cfgte->flags; 1374 if (res->flags & IPR_IS_IOA_RESOURCE) 1375 res->type = IPR_RES_TYPE_IOAFP; 1376 else 1377 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1378 1379 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, 1380 sizeof(struct ipr_std_inq_data)); 1381 1382 res->qmodel = IPR_QUEUEING_MODEL(res); 1383 proto = cfgtew->u.cfgte->proto; 1384 res->res_handle = cfgtew->u.cfgte->res_handle; 1385 } 1386 1387 ipr_update_ata_class(res, proto); 1388 } 1389 1390 /** 1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target 1392 * for the resource. 1393 * @res: resource entry struct 1394 * 1395 * Return value: 1396 * none 1397 **/ 1398 static void ipr_clear_res_target(struct ipr_resource_entry *res) 1399 { 1400 struct ipr_resource_entry *gscsi_res = NULL; 1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1402 1403 if (!ioa_cfg->sis64) 1404 return; 1405 1406 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) 1407 clear_bit(res->target, ioa_cfg->array_ids); 1408 else if (res->bus == IPR_VSET_VIRTUAL_BUS) 1409 clear_bit(res->target, ioa_cfg->vset_ids); 1410 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) 1412 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) 1413 return; 1414 clear_bit(res->target, ioa_cfg->target_ids); 1415 1416 } else if (res->bus == 0) 1417 clear_bit(res->target, ioa_cfg->target_ids); 1418 } 1419 1420 /** 1421 * ipr_handle_config_change - Handle a config change from the adapter 1422 * @ioa_cfg: ioa config struct 1423 * @hostrcb: hostrcb 1424 * 1425 * Return value: 1426 * none 1427 **/ 1428 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1429 struct ipr_hostrcb *hostrcb) 1430 { 1431 struct ipr_resource_entry *res = NULL; 1432 struct ipr_config_table_entry_wrapper cfgtew; 1433 __be32 cc_res_handle; 1434 1435 u32 is_ndn = 1; 1436 1437 if (ioa_cfg->sis64) { 1438 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; 1439 cc_res_handle = cfgtew.u.cfgte64->res_handle; 1440 } else { 1441 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; 1442 cc_res_handle = cfgtew.u.cfgte->res_handle; 1443 } 1444 1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1446 if (res->res_handle == cc_res_handle) { 1447 is_ndn = 0; 1448 break; 1449 } 1450 } 1451 1452 if (is_ndn) { 1453 if (list_empty(&ioa_cfg->free_res_q)) { 1454 ipr_send_hcam(ioa_cfg, 1455 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 1456 hostrcb); 1457 return; 1458 } 1459 1460 res = list_entry(ioa_cfg->free_res_q.next, 1461 struct ipr_resource_entry, queue); 1462 1463 list_del(&res->queue); 1464 ipr_init_res_entry(res, &cfgtew); 1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1466 } 1467 1468 ipr_update_res_entry(res, &cfgtew); 1469 1470 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1471 if (res->sdev) { 1472 res->del_from_ml = 1; 1473 res->res_handle = IPR_INVALID_RES_HANDLE; 1474 schedule_work(&ioa_cfg->work_q); 1475 } else { 1476 ipr_clear_res_target(res); 1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1478 } 1479 } else if (!res->sdev || res->del_from_ml) { 1480 res->add_to_ml = 1; 1481 schedule_work(&ioa_cfg->work_q); 1482 } 1483 1484 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1485 } 1486 1487 /** 1488 * ipr_process_ccn - Op done function for a CCN. 1489 * @ipr_cmd: ipr command struct 1490 * 1491 * This function is the op done function for a configuration 1492 * change notification host controlled async from the adapter. 1493 * 1494 * Return value: 1495 * none 1496 **/ 1497 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) 1498 { 1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1500 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1501 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1502 1503 list_del_init(&hostrcb->queue); 1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 1505 1506 if (ioasc) { 1507 if (ioasc != IPR_IOASC_IOA_WAS_RESET && 1508 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) 1509 dev_err(&ioa_cfg->pdev->dev, 1510 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 1511 1512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1513 } else { 1514 ipr_handle_config_change(ioa_cfg, hostrcb); 1515 } 1516 } 1517 1518 /** 1519 * strip_and_pad_whitespace - Strip and pad trailing whitespace. 1520 * @i: index into buffer 1521 * @buf: string to modify 1522 * 1523 * This function will strip all trailing whitespace, pad the end 1524 * of the string with a single space, and NULL terminate the string. 1525 * 1526 * Return value: 1527 * new length of string 1528 **/ 1529 static int strip_and_pad_whitespace(int i, char *buf) 1530 { 1531 while (i && buf[i] == ' ') 1532 i--; 1533 buf[i+1] = ' '; 1534 buf[i+2] = '\0'; 1535 return i + 2; 1536 } 1537 1538 /** 1539 * ipr_log_vpd_compact - Log the passed extended VPD compactly. 1540 * @prefix: string to print at start of printk 1541 * @hostrcb: hostrcb pointer 1542 * @vpd: vendor/product id/sn struct 1543 * 1544 * Return value: 1545 * none 1546 **/ 1547 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1548 struct ipr_vpd *vpd) 1549 { 1550 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; 1551 int i = 0; 1552 1553 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1554 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); 1555 1556 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); 1557 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); 1558 1559 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); 1560 buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; 1561 1562 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); 1563 } 1564 1565 /** 1566 * ipr_log_vpd - Log the passed VPD to the error log. 1567 * @vpd: vendor/product id/sn struct 1568 * 1569 * Return value: 1570 * none 1571 **/ 1572 static void ipr_log_vpd(struct ipr_vpd *vpd) 1573 { 1574 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN 1575 + IPR_SERIAL_NUM_LEN]; 1576 1577 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1578 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, 1579 IPR_PROD_ID_LEN); 1580 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; 1581 ipr_err("Vendor/Product ID: %s\n", buffer); 1582 1583 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); 1584 buffer[IPR_SERIAL_NUM_LEN] = '\0'; 1585 ipr_err(" Serial Number: %s\n", buffer); 1586 } 1587 1588 /** 1589 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. 1590 * @prefix: string to print at start of printk 1591 * @hostrcb: hostrcb pointer 1592 * @vpd: vendor/product id/sn/wwn struct 1593 * 1594 * Return value: 1595 * none 1596 **/ 1597 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1598 struct ipr_ext_vpd *vpd) 1599 { 1600 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); 1601 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, 1602 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); 1603 } 1604 1605 /** 1606 * ipr_log_ext_vpd - Log the passed extended VPD to the error log. 1607 * @vpd: vendor/product id/sn/wwn struct 1608 * 1609 * Return value: 1610 * none 1611 **/ 1612 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) 1613 { 1614 ipr_log_vpd(&vpd->vpd); 1615 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), 1616 be32_to_cpu(vpd->wwid[1])); 1617 } 1618 1619 /** 1620 * ipr_log_enhanced_cache_error - Log a cache error. 1621 * @ioa_cfg: ioa config struct 1622 * @hostrcb: hostrcb struct 1623 * 1624 * Return value: 1625 * none 1626 **/ 1627 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1628 struct ipr_hostrcb *hostrcb) 1629 { 1630 struct ipr_hostrcb_type_12_error *error; 1631 1632 if (ioa_cfg->sis64) 1633 error = &hostrcb->hcam.u.error64.u.type_12_error; 1634 else 1635 error = &hostrcb->hcam.u.error.u.type_12_error; 1636 1637 ipr_err("-----Current Configuration-----\n"); 1638 ipr_err("Cache Directory Card Information:\n"); 1639 ipr_log_ext_vpd(&error->ioa_vpd); 1640 ipr_err("Adapter Card Information:\n"); 1641 ipr_log_ext_vpd(&error->cfc_vpd); 1642 1643 ipr_err("-----Expected Configuration-----\n"); 1644 ipr_err("Cache Directory Card Information:\n"); 1645 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); 1646 ipr_err("Adapter Card Information:\n"); 1647 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); 1648 1649 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1650 be32_to_cpu(error->ioa_data[0]), 1651 be32_to_cpu(error->ioa_data[1]), 1652 be32_to_cpu(error->ioa_data[2])); 1653 } 1654 1655 /** 1656 * ipr_log_cache_error - Log a cache error. 1657 * @ioa_cfg: ioa config struct 1658 * @hostrcb: hostrcb struct 1659 * 1660 * Return value: 1661 * none 1662 **/ 1663 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1664 struct ipr_hostrcb *hostrcb) 1665 { 1666 struct ipr_hostrcb_type_02_error *error = 1667 &hostrcb->hcam.u.error.u.type_02_error; 1668 1669 ipr_err("-----Current Configuration-----\n"); 1670 ipr_err("Cache Directory Card Information:\n"); 1671 ipr_log_vpd(&error->ioa_vpd); 1672 ipr_err("Adapter Card Information:\n"); 1673 ipr_log_vpd(&error->cfc_vpd); 1674 1675 ipr_err("-----Expected Configuration-----\n"); 1676 ipr_err("Cache Directory Card Information:\n"); 1677 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); 1678 ipr_err("Adapter Card Information:\n"); 1679 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); 1680 1681 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1682 be32_to_cpu(error->ioa_data[0]), 1683 be32_to_cpu(error->ioa_data[1]), 1684 be32_to_cpu(error->ioa_data[2])); 1685 } 1686 1687 /** 1688 * ipr_log_enhanced_config_error - Log a configuration error. 1689 * @ioa_cfg: ioa config struct 1690 * @hostrcb: hostrcb struct 1691 * 1692 * Return value: 1693 * none 1694 **/ 1695 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, 1696 struct ipr_hostrcb *hostrcb) 1697 { 1698 int errors_logged, i; 1699 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; 1700 struct ipr_hostrcb_type_13_error *error; 1701 1702 error = &hostrcb->hcam.u.error.u.type_13_error; 1703 errors_logged = be32_to_cpu(error->errors_logged); 1704 1705 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1706 be32_to_cpu(error->errors_detected), errors_logged); 1707 1708 dev_entry = error->dev; 1709 1710 for (i = 0; i < errors_logged; i++, dev_entry++) { 1711 ipr_err_separator; 1712 1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1714 ipr_log_ext_vpd(&dev_entry->vpd); 1715 1716 ipr_err("-----New Device Information-----\n"); 1717 ipr_log_ext_vpd(&dev_entry->new_vpd); 1718 1719 ipr_err("Cache Directory Card Information:\n"); 1720 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1721 1722 ipr_err("Adapter Card Information:\n"); 1723 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1724 } 1725 } 1726 1727 /** 1728 * ipr_log_sis64_config_error - Log a device error. 1729 * @ioa_cfg: ioa config struct 1730 * @hostrcb: hostrcb struct 1731 * 1732 * Return value: 1733 * none 1734 **/ 1735 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, 1736 struct ipr_hostrcb *hostrcb) 1737 { 1738 int errors_logged, i; 1739 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; 1740 struct ipr_hostrcb_type_23_error *error; 1741 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1742 1743 error = &hostrcb->hcam.u.error64.u.type_23_error; 1744 errors_logged = be32_to_cpu(error->errors_logged); 1745 1746 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1747 be32_to_cpu(error->errors_detected), errors_logged); 1748 1749 dev_entry = error->dev; 1750 1751 for (i = 0; i < errors_logged; i++, dev_entry++) { 1752 ipr_err_separator; 1753 1754 ipr_err("Device %d : %s", i + 1, 1755 __ipr_format_res_path(dev_entry->res_path, 1756 buffer, sizeof(buffer))); 1757 ipr_log_ext_vpd(&dev_entry->vpd); 1758 1759 ipr_err("-----New Device Information-----\n"); 1760 ipr_log_ext_vpd(&dev_entry->new_vpd); 1761 1762 ipr_err("Cache Directory Card Information:\n"); 1763 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1764 1765 ipr_err("Adapter Card Information:\n"); 1766 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1767 } 1768 } 1769 1770 /** 1771 * ipr_log_config_error - Log a configuration error. 1772 * @ioa_cfg: ioa config struct 1773 * @hostrcb: hostrcb struct 1774 * 1775 * Return value: 1776 * none 1777 **/ 1778 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, 1779 struct ipr_hostrcb *hostrcb) 1780 { 1781 int errors_logged, i; 1782 struct ipr_hostrcb_device_data_entry *dev_entry; 1783 struct ipr_hostrcb_type_03_error *error; 1784 1785 error = &hostrcb->hcam.u.error.u.type_03_error; 1786 errors_logged = be32_to_cpu(error->errors_logged); 1787 1788 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1789 be32_to_cpu(error->errors_detected), errors_logged); 1790 1791 dev_entry = error->dev; 1792 1793 for (i = 0; i < errors_logged; i++, dev_entry++) { 1794 ipr_err_separator; 1795 1796 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1797 ipr_log_vpd(&dev_entry->vpd); 1798 1799 ipr_err("-----New Device Information-----\n"); 1800 ipr_log_vpd(&dev_entry->new_vpd); 1801 1802 ipr_err("Cache Directory Card Information:\n"); 1803 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); 1804 1805 ipr_err("Adapter Card Information:\n"); 1806 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); 1807 1808 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", 1809 be32_to_cpu(dev_entry->ioa_data[0]), 1810 be32_to_cpu(dev_entry->ioa_data[1]), 1811 be32_to_cpu(dev_entry->ioa_data[2]), 1812 be32_to_cpu(dev_entry->ioa_data[3]), 1813 be32_to_cpu(dev_entry->ioa_data[4])); 1814 } 1815 } 1816 1817 /** 1818 * ipr_log_enhanced_array_error - Log an array configuration error. 1819 * @ioa_cfg: ioa config struct 1820 * @hostrcb: hostrcb struct 1821 * 1822 * Return value: 1823 * none 1824 **/ 1825 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, 1826 struct ipr_hostrcb *hostrcb) 1827 { 1828 int i, num_entries; 1829 struct ipr_hostrcb_type_14_error *error; 1830 struct ipr_hostrcb_array_data_entry_enhanced *array_entry; 1831 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1832 1833 error = &hostrcb->hcam.u.error.u.type_14_error; 1834 1835 ipr_err_separator; 1836 1837 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1838 error->protection_level, 1839 ioa_cfg->host->host_no, 1840 error->last_func_vset_res_addr.bus, 1841 error->last_func_vset_res_addr.target, 1842 error->last_func_vset_res_addr.lun); 1843 1844 ipr_err_separator; 1845 1846 array_entry = error->array_member; 1847 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1848 ARRAY_SIZE(error->array_member)); 1849 1850 for (i = 0; i < num_entries; i++, array_entry++) { 1851 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1852 continue; 1853 1854 if (be32_to_cpu(error->exposed_mode_adn) == i) 1855 ipr_err("Exposed Array Member %d:\n", i); 1856 else 1857 ipr_err("Array Member %d:\n", i); 1858 1859 ipr_log_ext_vpd(&array_entry->vpd); 1860 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1861 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1862 "Expected Location"); 1863 1864 ipr_err_separator; 1865 } 1866 } 1867 1868 /** 1869 * ipr_log_array_error - Log an array configuration error. 1870 * @ioa_cfg: ioa config struct 1871 * @hostrcb: hostrcb struct 1872 * 1873 * Return value: 1874 * none 1875 **/ 1876 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, 1877 struct ipr_hostrcb *hostrcb) 1878 { 1879 int i; 1880 struct ipr_hostrcb_type_04_error *error; 1881 struct ipr_hostrcb_array_data_entry *array_entry; 1882 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1883 1884 error = &hostrcb->hcam.u.error.u.type_04_error; 1885 1886 ipr_err_separator; 1887 1888 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1889 error->protection_level, 1890 ioa_cfg->host->host_no, 1891 error->last_func_vset_res_addr.bus, 1892 error->last_func_vset_res_addr.target, 1893 error->last_func_vset_res_addr.lun); 1894 1895 ipr_err_separator; 1896 1897 array_entry = error->array_member; 1898 1899 for (i = 0; i < 18; i++) { 1900 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1901 continue; 1902 1903 if (be32_to_cpu(error->exposed_mode_adn) == i) 1904 ipr_err("Exposed Array Member %d:\n", i); 1905 else 1906 ipr_err("Array Member %d:\n", i); 1907 1908 ipr_log_vpd(&array_entry->vpd); 1909 1910 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1911 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1912 "Expected Location"); 1913 1914 ipr_err_separator; 1915 1916 if (i == 9) 1917 array_entry = error->array_member2; 1918 else 1919 array_entry++; 1920 } 1921 } 1922 1923 /** 1924 * ipr_log_hex_data - Log additional hex IOA error data. 1925 * @ioa_cfg: ioa config struct 1926 * @data: IOA error data 1927 * @len: data length 1928 * 1929 * Return value: 1930 * none 1931 **/ 1932 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) 1933 { 1934 int i; 1935 1936 if (len == 0) 1937 return; 1938 1939 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 1940 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); 1941 1942 for (i = 0; i < len / 4; i += 4) { 1943 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1944 be32_to_cpu(data[i]), 1945 be32_to_cpu(data[i+1]), 1946 be32_to_cpu(data[i+2]), 1947 be32_to_cpu(data[i+3])); 1948 } 1949 } 1950 1951 /** 1952 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. 1953 * @ioa_cfg: ioa config struct 1954 * @hostrcb: hostrcb struct 1955 * 1956 * Return value: 1957 * none 1958 **/ 1959 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1960 struct ipr_hostrcb *hostrcb) 1961 { 1962 struct ipr_hostrcb_type_17_error *error; 1963 1964 if (ioa_cfg->sis64) 1965 error = &hostrcb->hcam.u.error64.u.type_17_error; 1966 else 1967 error = &hostrcb->hcam.u.error.u.type_17_error; 1968 1969 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1970 strim(error->failure_reason); 1971 1972 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1973 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1974 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1975 ipr_log_hex_data(ioa_cfg, error->data, 1976 be32_to_cpu(hostrcb->hcam.length) - 1977 (offsetof(struct ipr_hostrcb_error, u) + 1978 offsetof(struct ipr_hostrcb_type_17_error, data))); 1979 } 1980 1981 /** 1982 * ipr_log_dual_ioa_error - Log a dual adapter error. 1983 * @ioa_cfg: ioa config struct 1984 * @hostrcb: hostrcb struct 1985 * 1986 * Return value: 1987 * none 1988 **/ 1989 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1990 struct ipr_hostrcb *hostrcb) 1991 { 1992 struct ipr_hostrcb_type_07_error *error; 1993 1994 error = &hostrcb->hcam.u.error.u.type_07_error; 1995 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1996 strim(error->failure_reason); 1997 1998 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1999 be32_to_cpu(hostrcb->hcam.u.error.prc)); 2000 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); 2001 ipr_log_hex_data(ioa_cfg, error->data, 2002 be32_to_cpu(hostrcb->hcam.length) - 2003 (offsetof(struct ipr_hostrcb_error, u) + 2004 offsetof(struct ipr_hostrcb_type_07_error, data))); 2005 } 2006 2007 static const struct { 2008 u8 active; 2009 char *desc; 2010 } path_active_desc[] = { 2011 { IPR_PATH_NO_INFO, "Path" }, 2012 { IPR_PATH_ACTIVE, "Active path" }, 2013 { IPR_PATH_NOT_ACTIVE, "Inactive path" } 2014 }; 2015 2016 static const struct { 2017 u8 state; 2018 char *desc; 2019 } path_state_desc[] = { 2020 { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, 2021 { IPR_PATH_HEALTHY, "is healthy" }, 2022 { IPR_PATH_DEGRADED, "is degraded" }, 2023 { IPR_PATH_FAILED, "is failed" } 2024 }; 2025 2026 /** 2027 * ipr_log_fabric_path - Log a fabric path error 2028 * @hostrcb: hostrcb struct 2029 * @fabric: fabric descriptor 2030 * 2031 * Return value: 2032 * none 2033 **/ 2034 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, 2035 struct ipr_hostrcb_fabric_desc *fabric) 2036 { 2037 int i, j; 2038 u8 path_state = fabric->path_state; 2039 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2040 u8 state = path_state & IPR_PATH_STATE_MASK; 2041 2042 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2043 if (path_active_desc[i].active != active) 2044 continue; 2045 2046 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2047 if (path_state_desc[j].state != state) 2048 continue; 2049 2050 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { 2051 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", 2052 path_active_desc[i].desc, path_state_desc[j].desc, 2053 fabric->ioa_port); 2054 } else if (fabric->cascaded_expander == 0xff) { 2055 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", 2056 path_active_desc[i].desc, path_state_desc[j].desc, 2057 fabric->ioa_port, fabric->phy); 2058 } else if (fabric->phy == 0xff) { 2059 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", 2060 path_active_desc[i].desc, path_state_desc[j].desc, 2061 fabric->ioa_port, fabric->cascaded_expander); 2062 } else { 2063 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", 2064 path_active_desc[i].desc, path_state_desc[j].desc, 2065 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2066 } 2067 return; 2068 } 2069 } 2070 2071 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, 2072 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2073 } 2074 2075 /** 2076 * ipr_log64_fabric_path - Log a fabric path error 2077 * @hostrcb: hostrcb struct 2078 * @fabric: fabric descriptor 2079 * 2080 * Return value: 2081 * none 2082 **/ 2083 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, 2084 struct ipr_hostrcb64_fabric_desc *fabric) 2085 { 2086 int i, j; 2087 u8 path_state = fabric->path_state; 2088 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2089 u8 state = path_state & IPR_PATH_STATE_MASK; 2090 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2091 2092 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2093 if (path_active_desc[i].active != active) 2094 continue; 2095 2096 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2097 if (path_state_desc[j].state != state) 2098 continue; 2099 2100 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 2101 path_active_desc[i].desc, path_state_desc[j].desc, 2102 ipr_format_res_path(hostrcb->ioa_cfg, 2103 fabric->res_path, 2104 buffer, sizeof(buffer))); 2105 return; 2106 } 2107 } 2108 2109 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 2110 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, 2111 buffer, sizeof(buffer))); 2112 } 2113 2114 static const struct { 2115 u8 type; 2116 char *desc; 2117 } path_type_desc[] = { 2118 { IPR_PATH_CFG_IOA_PORT, "IOA port" }, 2119 { IPR_PATH_CFG_EXP_PORT, "Expander port" }, 2120 { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, 2121 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } 2122 }; 2123 2124 static const struct { 2125 u8 status; 2126 char *desc; 2127 } path_status_desc[] = { 2128 { IPR_PATH_CFG_NO_PROB, "Functional" }, 2129 { IPR_PATH_CFG_DEGRADED, "Degraded" }, 2130 { IPR_PATH_CFG_FAILED, "Failed" }, 2131 { IPR_PATH_CFG_SUSPECT, "Suspect" }, 2132 { IPR_PATH_NOT_DETECTED, "Missing" }, 2133 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } 2134 }; 2135 2136 static const char *link_rate[] = { 2137 "unknown", 2138 "disabled", 2139 "phy reset problem", 2140 "spinup hold", 2141 "port selector", 2142 "unknown", 2143 "unknown", 2144 "unknown", 2145 "1.5Gbps", 2146 "3.0Gbps", 2147 "unknown", 2148 "unknown", 2149 "unknown", 2150 "unknown", 2151 "unknown", 2152 "unknown" 2153 }; 2154 2155 /** 2156 * ipr_log_path_elem - Log a fabric path element. 2157 * @hostrcb: hostrcb struct 2158 * @cfg: fabric path element struct 2159 * 2160 * Return value: 2161 * none 2162 **/ 2163 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, 2164 struct ipr_hostrcb_config_element *cfg) 2165 { 2166 int i, j; 2167 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2168 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2169 2170 if (type == IPR_PATH_CFG_NOT_EXIST) 2171 return; 2172 2173 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2174 if (path_type_desc[i].type != type) 2175 continue; 2176 2177 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2178 if (path_status_desc[j].status != status) 2179 continue; 2180 2181 if (type == IPR_PATH_CFG_IOA_PORT) { 2182 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", 2183 path_status_desc[j].desc, path_type_desc[i].desc, 2184 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2186 } else { 2187 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { 2188 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", 2189 path_status_desc[j].desc, path_type_desc[i].desc, 2190 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2191 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2192 } else if (cfg->cascaded_expander == 0xff) { 2193 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " 2194 "WWN=%08X%08X\n", path_status_desc[j].desc, 2195 path_type_desc[i].desc, cfg->phy, 2196 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2197 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2198 } else if (cfg->phy == 0xff) { 2199 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " 2200 "WWN=%08X%08X\n", path_status_desc[j].desc, 2201 path_type_desc[i].desc, cfg->cascaded_expander, 2202 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2203 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2204 } else { 2205 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " 2206 "WWN=%08X%08X\n", path_status_desc[j].desc, 2207 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, 2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2209 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2210 } 2211 } 2212 return; 2213 } 2214 } 2215 2216 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " 2217 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, 2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2220 } 2221 2222 /** 2223 * ipr_log64_path_elem - Log a fabric path element. 2224 * @hostrcb: hostrcb struct 2225 * @cfg: fabric path element struct 2226 * 2227 * Return value: 2228 * none 2229 **/ 2230 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, 2231 struct ipr_hostrcb64_config_element *cfg) 2232 { 2233 int i, j; 2234 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; 2235 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2236 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2237 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2238 2239 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) 2240 return; 2241 2242 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2243 if (path_type_desc[i].type != type) 2244 continue; 2245 2246 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2247 if (path_status_desc[j].status != status) 2248 continue; 2249 2250 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2251 path_status_desc[j].desc, path_type_desc[i].desc, 2252 ipr_format_res_path(hostrcb->ioa_cfg, 2253 cfg->res_path, buffer, sizeof(buffer)), 2254 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2255 be32_to_cpu(cfg->wwid[0]), 2256 be32_to_cpu(cfg->wwid[1])); 2257 return; 2258 } 2259 } 2260 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2261 "WWN=%08X%08X\n", cfg->type_status, 2262 ipr_format_res_path(hostrcb->ioa_cfg, 2263 cfg->res_path, buffer, sizeof(buffer)), 2264 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2265 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2266 } 2267 2268 /** 2269 * ipr_log_fabric_error - Log a fabric error. 2270 * @ioa_cfg: ioa config struct 2271 * @hostrcb: hostrcb struct 2272 * 2273 * Return value: 2274 * none 2275 **/ 2276 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2277 struct ipr_hostrcb *hostrcb) 2278 { 2279 struct ipr_hostrcb_type_20_error *error; 2280 struct ipr_hostrcb_fabric_desc *fabric; 2281 struct ipr_hostrcb_config_element *cfg; 2282 int i, add_len; 2283 2284 error = &hostrcb->hcam.u.error.u.type_20_error; 2285 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2286 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2287 2288 add_len = be32_to_cpu(hostrcb->hcam.length) - 2289 (offsetof(struct ipr_hostrcb_error, u) + 2290 offsetof(struct ipr_hostrcb_type_20_error, desc)); 2291 2292 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2293 ipr_log_fabric_path(hostrcb, fabric); 2294 for_each_fabric_cfg(fabric, cfg) 2295 ipr_log_path_elem(hostrcb, cfg); 2296 2297 add_len -= be16_to_cpu(fabric->length); 2298 fabric = (struct ipr_hostrcb_fabric_desc *) 2299 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2300 } 2301 2302 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); 2303 } 2304 2305 /** 2306 * ipr_log_sis64_array_error - Log a sis64 array error. 2307 * @ioa_cfg: ioa config struct 2308 * @hostrcb: hostrcb struct 2309 * 2310 * Return value: 2311 * none 2312 **/ 2313 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, 2314 struct ipr_hostrcb *hostrcb) 2315 { 2316 int i, num_entries; 2317 struct ipr_hostrcb_type_24_error *error; 2318 struct ipr_hostrcb64_array_data_entry *array_entry; 2319 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2320 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 2321 2322 error = &hostrcb->hcam.u.error64.u.type_24_error; 2323 2324 ipr_err_separator; 2325 2326 ipr_err("RAID %s Array Configuration: %s\n", 2327 error->protection_level, 2328 ipr_format_res_path(ioa_cfg, error->last_res_path, 2329 buffer, sizeof(buffer))); 2330 2331 ipr_err_separator; 2332 2333 array_entry = error->array_member; 2334 num_entries = min_t(u32, error->num_entries, 2335 ARRAY_SIZE(error->array_member)); 2336 2337 for (i = 0; i < num_entries; i++, array_entry++) { 2338 2339 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 2340 continue; 2341 2342 if (error->exposed_mode_adn == i) 2343 ipr_err("Exposed Array Member %d:\n", i); 2344 else 2345 ipr_err("Array Member %d:\n", i); 2346 2347 ipr_err("Array Member %d:\n", i); 2348 ipr_log_ext_vpd(&array_entry->vpd); 2349 ipr_err("Current Location: %s\n", 2350 ipr_format_res_path(ioa_cfg, array_entry->res_path, 2351 buffer, sizeof(buffer))); 2352 ipr_err("Expected Location: %s\n", 2353 ipr_format_res_path(ioa_cfg, 2354 array_entry->expected_res_path, 2355 buffer, sizeof(buffer))); 2356 2357 ipr_err_separator; 2358 } 2359 } 2360 2361 /** 2362 * ipr_log_sis64_fabric_error - Log a sis64 fabric error. 2363 * @ioa_cfg: ioa config struct 2364 * @hostrcb: hostrcb struct 2365 * 2366 * Return value: 2367 * none 2368 **/ 2369 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2370 struct ipr_hostrcb *hostrcb) 2371 { 2372 struct ipr_hostrcb_type_30_error *error; 2373 struct ipr_hostrcb64_fabric_desc *fabric; 2374 struct ipr_hostrcb64_config_element *cfg; 2375 int i, add_len; 2376 2377 error = &hostrcb->hcam.u.error64.u.type_30_error; 2378 2379 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2380 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2381 2382 add_len = be32_to_cpu(hostrcb->hcam.length) - 2383 (offsetof(struct ipr_hostrcb64_error, u) + 2384 offsetof(struct ipr_hostrcb_type_30_error, desc)); 2385 2386 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2387 ipr_log64_fabric_path(hostrcb, fabric); 2388 for_each_fabric_cfg(fabric, cfg) 2389 ipr_log64_path_elem(hostrcb, cfg); 2390 2391 add_len -= be16_to_cpu(fabric->length); 2392 fabric = (struct ipr_hostrcb64_fabric_desc *) 2393 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2394 } 2395 2396 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); 2397 } 2398 2399 /** 2400 * ipr_log_sis64_service_required_error - Log a sis64 service required error. 2401 * @ioa_cfg: ioa config struct 2402 * @hostrcb: hostrcb struct 2403 * 2404 * Return value: 2405 * none 2406 **/ 2407 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg, 2408 struct ipr_hostrcb *hostrcb) 2409 { 2410 struct ipr_hostrcb_type_41_error *error; 2411 2412 error = &hostrcb->hcam.u.error64.u.type_41_error; 2413 2414 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2415 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); 2416 ipr_log_hex_data(ioa_cfg, error->data, 2417 be32_to_cpu(hostrcb->hcam.length) - 2418 (offsetof(struct ipr_hostrcb_error, u) + 2419 offsetof(struct ipr_hostrcb_type_41_error, data))); 2420 } 2421 /** 2422 * ipr_log_generic_error - Log an adapter error. 2423 * @ioa_cfg: ioa config struct 2424 * @hostrcb: hostrcb struct 2425 * 2426 * Return value: 2427 * none 2428 **/ 2429 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 2430 struct ipr_hostrcb *hostrcb) 2431 { 2432 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, 2433 be32_to_cpu(hostrcb->hcam.length)); 2434 } 2435 2436 /** 2437 * ipr_log_sis64_device_error - Log a cache error. 2438 * @ioa_cfg: ioa config struct 2439 * @hostrcb: hostrcb struct 2440 * 2441 * Return value: 2442 * none 2443 **/ 2444 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, 2445 struct ipr_hostrcb *hostrcb) 2446 { 2447 struct ipr_hostrcb_type_21_error *error; 2448 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2449 2450 error = &hostrcb->hcam.u.error64.u.type_21_error; 2451 2452 ipr_err("-----Failing Device Information-----\n"); 2453 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", 2454 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), 2455 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); 2456 ipr_err("Device Resource Path: %s\n", 2457 __ipr_format_res_path(error->res_path, 2458 buffer, sizeof(buffer))); 2459 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; 2460 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; 2461 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); 2462 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); 2463 ipr_err("SCSI Sense Data:\n"); 2464 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); 2465 ipr_err("SCSI Command Descriptor Block: \n"); 2466 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); 2467 2468 ipr_err("Additional IOA Data:\n"); 2469 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); 2470 } 2471 2472 /** 2473 * ipr_get_error - Find the specfied IOASC in the ipr_error_table. 2474 * @ioasc: IOASC 2475 * 2476 * This function will return the index of into the ipr_error_table 2477 * for the specified IOASC. If the IOASC is not in the table, 2478 * 0 will be returned, which points to the entry used for unknown errors. 2479 * 2480 * Return value: 2481 * index into the ipr_error_table 2482 **/ 2483 static u32 ipr_get_error(u32 ioasc) 2484 { 2485 int i; 2486 2487 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 2488 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) 2489 return i; 2490 2491 return 0; 2492 } 2493 2494 /** 2495 * ipr_handle_log_data - Log an adapter error. 2496 * @ioa_cfg: ioa config struct 2497 * @hostrcb: hostrcb struct 2498 * 2499 * This function logs an adapter error to the system. 2500 * 2501 * Return value: 2502 * none 2503 **/ 2504 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, 2505 struct ipr_hostrcb *hostrcb) 2506 { 2507 u32 ioasc; 2508 int error_index; 2509 struct ipr_hostrcb_type_21_error *error; 2510 2511 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) 2512 return; 2513 2514 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2515 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2516 2517 if (ioa_cfg->sis64) 2518 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2519 else 2520 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2521 2522 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || 2523 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { 2524 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2525 scsi_report_bus_reset(ioa_cfg->host, 2526 hostrcb->hcam.u.error.fd_res_addr.bus); 2527 } 2528 2529 error_index = ipr_get_error(ioasc); 2530 2531 if (!ipr_error_table[error_index].log_hcam) 2532 return; 2533 2534 if (ioasc == IPR_IOASC_HW_CMD_FAILED && 2535 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { 2536 error = &hostrcb->hcam.u.error64.u.type_21_error; 2537 2538 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && 2539 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 2540 return; 2541 } 2542 2543 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); 2544 2545 /* Set indication we have logged an error */ 2546 ioa_cfg->errors_logged++; 2547 2548 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) 2549 return; 2550 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 2551 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 2552 2553 switch (hostrcb->hcam.overlay_id) { 2554 case IPR_HOST_RCB_OVERLAY_ID_2: 2555 ipr_log_cache_error(ioa_cfg, hostrcb); 2556 break; 2557 case IPR_HOST_RCB_OVERLAY_ID_3: 2558 ipr_log_config_error(ioa_cfg, hostrcb); 2559 break; 2560 case IPR_HOST_RCB_OVERLAY_ID_4: 2561 case IPR_HOST_RCB_OVERLAY_ID_6: 2562 ipr_log_array_error(ioa_cfg, hostrcb); 2563 break; 2564 case IPR_HOST_RCB_OVERLAY_ID_7: 2565 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); 2566 break; 2567 case IPR_HOST_RCB_OVERLAY_ID_12: 2568 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); 2569 break; 2570 case IPR_HOST_RCB_OVERLAY_ID_13: 2571 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); 2572 break; 2573 case IPR_HOST_RCB_OVERLAY_ID_14: 2574 case IPR_HOST_RCB_OVERLAY_ID_16: 2575 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); 2576 break; 2577 case IPR_HOST_RCB_OVERLAY_ID_17: 2578 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 2579 break; 2580 case IPR_HOST_RCB_OVERLAY_ID_20: 2581 ipr_log_fabric_error(ioa_cfg, hostrcb); 2582 break; 2583 case IPR_HOST_RCB_OVERLAY_ID_21: 2584 ipr_log_sis64_device_error(ioa_cfg, hostrcb); 2585 break; 2586 case IPR_HOST_RCB_OVERLAY_ID_23: 2587 ipr_log_sis64_config_error(ioa_cfg, hostrcb); 2588 break; 2589 case IPR_HOST_RCB_OVERLAY_ID_24: 2590 case IPR_HOST_RCB_OVERLAY_ID_26: 2591 ipr_log_sis64_array_error(ioa_cfg, hostrcb); 2592 break; 2593 case IPR_HOST_RCB_OVERLAY_ID_30: 2594 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); 2595 break; 2596 case IPR_HOST_RCB_OVERLAY_ID_41: 2597 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb); 2598 break; 2599 case IPR_HOST_RCB_OVERLAY_ID_1: 2600 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2601 default: 2602 ipr_log_generic_error(ioa_cfg, hostrcb); 2603 break; 2604 } 2605 } 2606 2607 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa) 2608 { 2609 struct ipr_hostrcb *hostrcb; 2610 2611 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, 2612 struct ipr_hostrcb, queue); 2613 2614 if (unlikely(!hostrcb)) { 2615 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); 2616 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, 2617 struct ipr_hostrcb, queue); 2618 } 2619 2620 list_del_init(&hostrcb->queue); 2621 return hostrcb; 2622 } 2623 2624 /** 2625 * ipr_process_error - Op done function for an adapter error log. 2626 * @ipr_cmd: ipr command struct 2627 * 2628 * This function is the op done function for an error log host 2629 * controlled async from the adapter. It will log the error and 2630 * send the HCAM back to the adapter. 2631 * 2632 * Return value: 2633 * none 2634 **/ 2635 static void ipr_process_error(struct ipr_cmnd *ipr_cmd) 2636 { 2637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2638 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2639 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2640 u32 fd_ioasc; 2641 2642 if (ioa_cfg->sis64) 2643 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2644 else 2645 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2646 2647 list_del_init(&hostrcb->queue); 2648 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 2649 2650 if (!ioasc) { 2651 ipr_handle_log_data(ioa_cfg, hostrcb); 2652 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) 2653 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 2654 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && 2655 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { 2656 dev_err(&ioa_cfg->pdev->dev, 2657 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 2658 } 2659 2660 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); 2661 schedule_work(&ioa_cfg->work_q); 2662 hostrcb = ipr_get_free_hostrcb(ioa_cfg); 2663 2664 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2665 } 2666 2667 /** 2668 * ipr_timeout - An internally generated op has timed out. 2669 * @t: Timer context used to fetch ipr command struct 2670 * 2671 * This function blocks host requests and initiates an 2672 * adapter reset. 2673 * 2674 * Return value: 2675 * none 2676 **/ 2677 static void ipr_timeout(struct timer_list *t) 2678 { 2679 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 2680 unsigned long lock_flags = 0; 2681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2682 2683 ENTER; 2684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2685 2686 ioa_cfg->errors_logged++; 2687 dev_err(&ioa_cfg->pdev->dev, 2688 "Adapter being reset due to command timeout.\n"); 2689 2690 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2691 ioa_cfg->sdt_state = GET_DUMP; 2692 2693 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) 2694 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2695 2696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2697 LEAVE; 2698 } 2699 2700 /** 2701 * ipr_oper_timeout - Adapter timed out transitioning to operational 2702 * @t: Timer context used to fetch ipr command struct 2703 * 2704 * This function blocks host requests and initiates an 2705 * adapter reset. 2706 * 2707 * Return value: 2708 * none 2709 **/ 2710 static void ipr_oper_timeout(struct timer_list *t) 2711 { 2712 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 2713 unsigned long lock_flags = 0; 2714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2715 2716 ENTER; 2717 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2718 2719 ioa_cfg->errors_logged++; 2720 dev_err(&ioa_cfg->pdev->dev, 2721 "Adapter timed out transitioning to operational.\n"); 2722 2723 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2724 ioa_cfg->sdt_state = GET_DUMP; 2725 2726 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { 2727 if (ipr_fastfail) 2728 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 2729 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2730 } 2731 2732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2733 LEAVE; 2734 } 2735 2736 /** 2737 * ipr_find_ses_entry - Find matching SES in SES table 2738 * @res: resource entry struct of SES 2739 * 2740 * Return value: 2741 * pointer to SES table entry / NULL on failure 2742 **/ 2743 static const struct ipr_ses_table_entry * 2744 ipr_find_ses_entry(struct ipr_resource_entry *res) 2745 { 2746 int i, j, matches; 2747 struct ipr_std_inq_vpids *vpids; 2748 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2749 2750 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2751 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2752 if (ste->compare_product_id_byte[j] == 'X') { 2753 vpids = &res->std_inq_data.vpids; 2754 if (vpids->product_id[j] == ste->product_id[j]) 2755 matches++; 2756 else 2757 break; 2758 } else 2759 matches++; 2760 } 2761 2762 if (matches == IPR_PROD_ID_LEN) 2763 return ste; 2764 } 2765 2766 return NULL; 2767 } 2768 2769 /** 2770 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus 2771 * @ioa_cfg: ioa config struct 2772 * @bus: SCSI bus 2773 * @bus_width: bus width 2774 * 2775 * Return value: 2776 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz 2777 * For a 2-byte wide SCSI bus, the maximum transfer speed is 2778 * twice the maximum transfer rate (e.g. for a wide enabled bus, 2779 * max 160MHz = max 320MB/sec). 2780 **/ 2781 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) 2782 { 2783 struct ipr_resource_entry *res; 2784 const struct ipr_ses_table_entry *ste; 2785 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); 2786 2787 /* Loop through each config table entry in the config table buffer */ 2788 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2789 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) 2790 continue; 2791 2792 if (bus != res->bus) 2793 continue; 2794 2795 if (!(ste = ipr_find_ses_entry(res))) 2796 continue; 2797 2798 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); 2799 } 2800 2801 return max_xfer_rate; 2802 } 2803 2804 /** 2805 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA 2806 * @ioa_cfg: ioa config struct 2807 * @max_delay: max delay in micro-seconds to wait 2808 * 2809 * Waits for an IODEBUG ACK from the IOA, doing busy looping. 2810 * 2811 * Return value: 2812 * 0 on success / other on failure 2813 **/ 2814 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) 2815 { 2816 volatile u32 pcii_reg; 2817 int delay = 1; 2818 2819 /* Read interrupt reg until IOA signals IO Debug Acknowledge */ 2820 while (delay < max_delay) { 2821 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 2822 2823 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) 2824 return 0; 2825 2826 /* udelay cannot be used if delay is more than a few milliseconds */ 2827 if ((delay / 1000) > MAX_UDELAY_MS) 2828 mdelay(delay / 1000); 2829 else 2830 udelay(delay); 2831 2832 delay += delay; 2833 } 2834 return -EIO; 2835 } 2836 2837 /** 2838 * ipr_get_sis64_dump_data_section - Dump IOA memory 2839 * @ioa_cfg: ioa config struct 2840 * @start_addr: adapter address to dump 2841 * @dest: destination kernel buffer 2842 * @length_in_words: length to dump in 4 byte words 2843 * 2844 * Return value: 2845 * 0 on success 2846 **/ 2847 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2848 u32 start_addr, 2849 __be32 *dest, u32 length_in_words) 2850 { 2851 int i; 2852 2853 for (i = 0; i < length_in_words; i++) { 2854 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); 2855 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); 2856 dest++; 2857 } 2858 2859 return 0; 2860 } 2861 2862 /** 2863 * ipr_get_ldump_data_section - Dump IOA memory 2864 * @ioa_cfg: ioa config struct 2865 * @start_addr: adapter address to dump 2866 * @dest: destination kernel buffer 2867 * @length_in_words: length to dump in 4 byte words 2868 * 2869 * Return value: 2870 * 0 on success / -EIO on failure 2871 **/ 2872 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2873 u32 start_addr, 2874 __be32 *dest, u32 length_in_words) 2875 { 2876 volatile u32 temp_pcii_reg; 2877 int i, delay = 0; 2878 2879 if (ioa_cfg->sis64) 2880 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, 2881 dest, length_in_words); 2882 2883 /* Write IOA interrupt reg starting LDUMP state */ 2884 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2885 ioa_cfg->regs.set_uproc_interrupt_reg32); 2886 2887 /* Wait for IO debug acknowledge */ 2888 if (ipr_wait_iodbg_ack(ioa_cfg, 2889 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { 2890 dev_err(&ioa_cfg->pdev->dev, 2891 "IOA dump long data transfer timeout\n"); 2892 return -EIO; 2893 } 2894 2895 /* Signal LDUMP interlocked - clear IO debug ack */ 2896 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2897 ioa_cfg->regs.clr_interrupt_reg); 2898 2899 /* Write Mailbox with starting address */ 2900 writel(start_addr, ioa_cfg->ioa_mailbox); 2901 2902 /* Signal address valid - clear IOA Reset alert */ 2903 writel(IPR_UPROCI_RESET_ALERT, 2904 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2905 2906 for (i = 0; i < length_in_words; i++) { 2907 /* Wait for IO debug acknowledge */ 2908 if (ipr_wait_iodbg_ack(ioa_cfg, 2909 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { 2910 dev_err(&ioa_cfg->pdev->dev, 2911 "IOA dump short data transfer timeout\n"); 2912 return -EIO; 2913 } 2914 2915 /* Read data from mailbox and increment destination pointer */ 2916 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); 2917 dest++; 2918 2919 /* For all but the last word of data, signal data received */ 2920 if (i < (length_in_words - 1)) { 2921 /* Signal dump data received - Clear IO debug Ack */ 2922 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2923 ioa_cfg->regs.clr_interrupt_reg); 2924 } 2925 } 2926 2927 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2928 writel(IPR_UPROCI_RESET_ALERT, 2929 ioa_cfg->regs.set_uproc_interrupt_reg32); 2930 2931 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2932 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2933 2934 /* Signal dump data received - Clear IO debug Ack */ 2935 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2936 ioa_cfg->regs.clr_interrupt_reg); 2937 2938 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2939 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2940 temp_pcii_reg = 2941 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 2942 2943 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2944 return 0; 2945 2946 udelay(10); 2947 delay += 10; 2948 } 2949 2950 return 0; 2951 } 2952 2953 #ifdef CONFIG_SCSI_IPR_DUMP 2954 /** 2955 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer 2956 * @ioa_cfg: ioa config struct 2957 * @pci_address: adapter address 2958 * @length: length of data to copy 2959 * 2960 * Copy data from PCI adapter to kernel buffer. 2961 * Note: length MUST be a 4 byte multiple 2962 * Return value: 2963 * 0 on success / other on failure 2964 **/ 2965 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, 2966 unsigned long pci_address, u32 length) 2967 { 2968 int bytes_copied = 0; 2969 int cur_len, rc, rem_len, rem_page_len, max_dump_size; 2970 __be32 *page; 2971 unsigned long lock_flags = 0; 2972 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2973 2974 if (ioa_cfg->sis64) 2975 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 2976 else 2977 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 2978 2979 while (bytes_copied < length && 2980 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { 2981 if (ioa_dump->page_offset >= PAGE_SIZE || 2982 ioa_dump->page_offset == 0) { 2983 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2984 2985 if (!page) { 2986 ipr_trace; 2987 return bytes_copied; 2988 } 2989 2990 ioa_dump->page_offset = 0; 2991 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; 2992 ioa_dump->next_page_index++; 2993 } else 2994 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; 2995 2996 rem_len = length - bytes_copied; 2997 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; 2998 cur_len = min(rem_len, rem_page_len); 2999 3000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3001 if (ioa_cfg->sdt_state == ABORT_DUMP) { 3002 rc = -EIO; 3003 } else { 3004 rc = ipr_get_ldump_data_section(ioa_cfg, 3005 pci_address + bytes_copied, 3006 &page[ioa_dump->page_offset / 4], 3007 (cur_len / sizeof(u32))); 3008 } 3009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3010 3011 if (!rc) { 3012 ioa_dump->page_offset += cur_len; 3013 bytes_copied += cur_len; 3014 } else { 3015 ipr_trace; 3016 break; 3017 } 3018 schedule(); 3019 } 3020 3021 return bytes_copied; 3022 } 3023 3024 /** 3025 * ipr_init_dump_entry_hdr - Initialize a dump entry header. 3026 * @hdr: dump entry header struct 3027 * 3028 * Return value: 3029 * nothing 3030 **/ 3031 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) 3032 { 3033 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; 3034 hdr->num_elems = 1; 3035 hdr->offset = sizeof(*hdr); 3036 hdr->status = IPR_DUMP_STATUS_SUCCESS; 3037 } 3038 3039 /** 3040 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. 3041 * @ioa_cfg: ioa config struct 3042 * @driver_dump: driver dump struct 3043 * 3044 * Return value: 3045 * nothing 3046 **/ 3047 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, 3048 struct ipr_driver_dump *driver_dump) 3049 { 3050 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3051 3052 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); 3053 driver_dump->ioa_type_entry.hdr.len = 3054 sizeof(struct ipr_dump_ioa_type_entry) - 3055 sizeof(struct ipr_dump_entry_header); 3056 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3057 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; 3058 driver_dump->ioa_type_entry.type = ioa_cfg->type; 3059 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | 3060 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | 3061 ucode_vpd->minor_release[1]; 3062 driver_dump->hdr.num_entries++; 3063 } 3064 3065 /** 3066 * ipr_dump_version_data - Fill in the driver version in the dump. 3067 * @ioa_cfg: ioa config struct 3068 * @driver_dump: driver dump struct 3069 * 3070 * Return value: 3071 * nothing 3072 **/ 3073 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, 3074 struct ipr_driver_dump *driver_dump) 3075 { 3076 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); 3077 driver_dump->version_entry.hdr.len = 3078 sizeof(struct ipr_dump_version_entry) - 3079 sizeof(struct ipr_dump_entry_header); 3080 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3081 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; 3082 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); 3083 driver_dump->hdr.num_entries++; 3084 } 3085 3086 /** 3087 * ipr_dump_trace_data - Fill in the IOA trace in the dump. 3088 * @ioa_cfg: ioa config struct 3089 * @driver_dump: driver dump struct 3090 * 3091 * Return value: 3092 * nothing 3093 **/ 3094 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, 3095 struct ipr_driver_dump *driver_dump) 3096 { 3097 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); 3098 driver_dump->trace_entry.hdr.len = 3099 sizeof(struct ipr_dump_trace_entry) - 3100 sizeof(struct ipr_dump_entry_header); 3101 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3102 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; 3103 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); 3104 driver_dump->hdr.num_entries++; 3105 } 3106 3107 /** 3108 * ipr_dump_location_data - Fill in the IOA location in the dump. 3109 * @ioa_cfg: ioa config struct 3110 * @driver_dump: driver dump struct 3111 * 3112 * Return value: 3113 * nothing 3114 **/ 3115 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, 3116 struct ipr_driver_dump *driver_dump) 3117 { 3118 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); 3119 driver_dump->location_entry.hdr.len = 3120 sizeof(struct ipr_dump_location_entry) - 3121 sizeof(struct ipr_dump_entry_header); 3122 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3123 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 3124 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); 3125 driver_dump->hdr.num_entries++; 3126 } 3127 3128 /** 3129 * ipr_get_ioa_dump - Perform a dump of the driver and adapter. 3130 * @ioa_cfg: ioa config struct 3131 * @dump: dump struct 3132 * 3133 * Return value: 3134 * nothing 3135 **/ 3136 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) 3137 { 3138 unsigned long start_addr, sdt_word; 3139 unsigned long lock_flags = 0; 3140 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 3141 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 3142 u32 num_entries, max_num_entries, start_off, end_off; 3143 u32 max_dump_size, bytes_to_copy, bytes_copied, rc; 3144 struct ipr_sdt *sdt; 3145 int valid = 1; 3146 int i; 3147 3148 ENTER; 3149 3150 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3151 3152 if (ioa_cfg->sdt_state != READ_DUMP) { 3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3154 return; 3155 } 3156 3157 if (ioa_cfg->sis64) { 3158 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3159 ssleep(IPR_DUMP_DELAY_SECONDS); 3160 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3161 } 3162 3163 start_addr = readl(ioa_cfg->ioa_mailbox); 3164 3165 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 3166 dev_err(&ioa_cfg->pdev->dev, 3167 "Invalid dump table format: %lx\n", start_addr); 3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3169 return; 3170 } 3171 3172 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); 3173 3174 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; 3175 3176 /* Initialize the overall dump header */ 3177 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); 3178 driver_dump->hdr.num_entries = 1; 3179 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); 3180 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; 3181 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; 3182 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; 3183 3184 ipr_dump_version_data(ioa_cfg, driver_dump); 3185 ipr_dump_location_data(ioa_cfg, driver_dump); 3186 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); 3187 ipr_dump_trace_data(ioa_cfg, driver_dump); 3188 3189 /* Update dump_header */ 3190 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); 3191 3192 /* IOA Dump entry */ 3193 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 3194 ioa_dump->hdr.len = 0; 3195 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3196 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 3197 3198 /* First entries in sdt are actually a list of dump addresses and 3199 lengths to gather the real dump data. sdt represents the pointer 3200 to the ioa generated dump table. Dump data will be extracted based 3201 on entries in this table */ 3202 sdt = &ioa_dump->sdt; 3203 3204 if (ioa_cfg->sis64) { 3205 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; 3206 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 3207 } else { 3208 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; 3209 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 3210 } 3211 3212 bytes_to_copy = offsetof(struct ipr_sdt, entry) + 3213 (max_num_entries * sizeof(struct ipr_sdt_entry)); 3214 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 3215 bytes_to_copy / sizeof(__be32)); 3216 3217 /* Smart Dump table is ready to use and the first entry is valid */ 3218 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 3219 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 3220 dev_err(&ioa_cfg->pdev->dev, 3221 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 3222 rc, be32_to_cpu(sdt->hdr.state)); 3223 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; 3224 ioa_cfg->sdt_state = DUMP_OBTAINED; 3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3226 return; 3227 } 3228 3229 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 3230 3231 if (num_entries > max_num_entries) 3232 num_entries = max_num_entries; 3233 3234 /* Update dump length to the actual data to be copied */ 3235 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); 3236 if (ioa_cfg->sis64) 3237 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); 3238 else 3239 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); 3240 3241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3242 3243 for (i = 0; i < num_entries; i++) { 3244 if (ioa_dump->hdr.len > max_dump_size) { 3245 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3246 break; 3247 } 3248 3249 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 3250 sdt_word = be32_to_cpu(sdt->entry[i].start_token); 3251 if (ioa_cfg->sis64) 3252 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); 3253 else { 3254 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 3255 end_off = be32_to_cpu(sdt->entry[i].end_token); 3256 3257 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) 3258 bytes_to_copy = end_off - start_off; 3259 else 3260 valid = 0; 3261 } 3262 if (valid) { 3263 if (bytes_to_copy > max_dump_size) { 3264 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3265 continue; 3266 } 3267 3268 /* Copy data from adapter to driver buffers */ 3269 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, 3270 bytes_to_copy); 3271 3272 ioa_dump->hdr.len += bytes_copied; 3273 3274 if (bytes_copied != bytes_to_copy) { 3275 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3276 break; 3277 } 3278 } 3279 } 3280 } 3281 3282 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); 3283 3284 /* Update dump_header */ 3285 driver_dump->hdr.len += ioa_dump->hdr.len; 3286 wmb(); 3287 ioa_cfg->sdt_state = DUMP_OBTAINED; 3288 LEAVE; 3289 } 3290 3291 #else 3292 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) 3293 #endif 3294 3295 /** 3296 * ipr_release_dump - Free adapter dump memory 3297 * @kref: kref struct 3298 * 3299 * Return value: 3300 * nothing 3301 **/ 3302 static void ipr_release_dump(struct kref *kref) 3303 { 3304 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); 3305 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; 3306 unsigned long lock_flags = 0; 3307 int i; 3308 3309 ENTER; 3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3311 ioa_cfg->dump = NULL; 3312 ioa_cfg->sdt_state = INACTIVE; 3313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3314 3315 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3316 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3317 3318 vfree(dump->ioa_dump.ioa_data); 3319 kfree(dump); 3320 LEAVE; 3321 } 3322 3323 static void ipr_add_remove_thread(struct work_struct *work) 3324 { 3325 unsigned long lock_flags; 3326 struct ipr_resource_entry *res; 3327 struct scsi_device *sdev; 3328 struct ipr_ioa_cfg *ioa_cfg = 3329 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); 3330 u8 bus, target, lun; 3331 int did_work; 3332 3333 ENTER; 3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3335 3336 restart: 3337 do { 3338 did_work = 0; 3339 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3341 return; 3342 } 3343 3344 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3345 if (res->del_from_ml && res->sdev) { 3346 did_work = 1; 3347 sdev = res->sdev; 3348 if (!scsi_device_get(sdev)) { 3349 if (!res->add_to_ml) 3350 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 3351 else 3352 res->del_from_ml = 0; 3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3354 scsi_remove_device(sdev); 3355 scsi_device_put(sdev); 3356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3357 } 3358 break; 3359 } 3360 } 3361 } while (did_work); 3362 3363 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3364 if (res->add_to_ml) { 3365 bus = res->bus; 3366 target = res->target; 3367 lun = res->lun; 3368 res->add_to_ml = 0; 3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3370 scsi_add_device(ioa_cfg->host, bus, target, lun); 3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3372 goto restart; 3373 } 3374 } 3375 3376 ioa_cfg->scan_done = 1; 3377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3378 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3379 LEAVE; 3380 } 3381 3382 /** 3383 * ipr_worker_thread - Worker thread 3384 * @work: ioa config struct 3385 * 3386 * Called at task level from a work thread. This function takes care 3387 * of adding and removing device from the mid-layer as configuration 3388 * changes are detected by the adapter. 3389 * 3390 * Return value: 3391 * nothing 3392 **/ 3393 static void ipr_worker_thread(struct work_struct *work) 3394 { 3395 unsigned long lock_flags; 3396 struct ipr_dump *dump; 3397 struct ipr_ioa_cfg *ioa_cfg = 3398 container_of(work, struct ipr_ioa_cfg, work_q); 3399 3400 ENTER; 3401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3402 3403 if (ioa_cfg->sdt_state == READ_DUMP) { 3404 dump = ioa_cfg->dump; 3405 if (!dump) { 3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3407 return; 3408 } 3409 kref_get(&dump->kref); 3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3411 ipr_get_ioa_dump(ioa_cfg, dump); 3412 kref_put(&dump->kref, ipr_release_dump); 3413 3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3415 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) 3416 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3418 return; 3419 } 3420 3421 if (ioa_cfg->scsi_unblock) { 3422 ioa_cfg->scsi_unblock = 0; 3423 ioa_cfg->scsi_blocked = 0; 3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3425 scsi_unblock_requests(ioa_cfg->host); 3426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3427 if (ioa_cfg->scsi_blocked) 3428 scsi_block_requests(ioa_cfg->host); 3429 } 3430 3431 if (!ioa_cfg->scan_enabled) { 3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3433 return; 3434 } 3435 3436 schedule_work(&ioa_cfg->scsi_add_work_q); 3437 3438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3439 LEAVE; 3440 } 3441 3442 #ifdef CONFIG_SCSI_IPR_TRACE 3443 /** 3444 * ipr_read_trace - Dump the adapter trace 3445 * @filp: open sysfs file 3446 * @kobj: kobject struct 3447 * @bin_attr: bin_attribute struct 3448 * @buf: buffer 3449 * @off: offset 3450 * @count: buffer size 3451 * 3452 * Return value: 3453 * number of bytes printed to buffer 3454 **/ 3455 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, 3456 struct bin_attribute *bin_attr, 3457 char *buf, loff_t off, size_t count) 3458 { 3459 struct device *dev = kobj_to_dev(kobj); 3460 struct Scsi_Host *shost = class_to_shost(dev); 3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3462 unsigned long lock_flags = 0; 3463 ssize_t ret; 3464 3465 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3466 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, 3467 IPR_TRACE_SIZE); 3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3469 3470 return ret; 3471 } 3472 3473 static struct bin_attribute ipr_trace_attr = { 3474 .attr = { 3475 .name = "trace", 3476 .mode = S_IRUGO, 3477 }, 3478 .size = 0, 3479 .read = ipr_read_trace, 3480 }; 3481 #endif 3482 3483 /** 3484 * ipr_show_fw_version - Show the firmware version 3485 * @dev: class device struct 3486 * @attr: device attribute (unused) 3487 * @buf: buffer 3488 * 3489 * Return value: 3490 * number of bytes printed to buffer 3491 **/ 3492 static ssize_t ipr_show_fw_version(struct device *dev, 3493 struct device_attribute *attr, char *buf) 3494 { 3495 struct Scsi_Host *shost = class_to_shost(dev); 3496 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3497 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3498 unsigned long lock_flags = 0; 3499 int len; 3500 3501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3502 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", 3503 ucode_vpd->major_release, ucode_vpd->card_type, 3504 ucode_vpd->minor_release[0], 3505 ucode_vpd->minor_release[1]); 3506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3507 return len; 3508 } 3509 3510 static struct device_attribute ipr_fw_version_attr = { 3511 .attr = { 3512 .name = "fw_version", 3513 .mode = S_IRUGO, 3514 }, 3515 .show = ipr_show_fw_version, 3516 }; 3517 3518 /** 3519 * ipr_show_log_level - Show the adapter's error logging level 3520 * @dev: class device struct 3521 * @attr: device attribute (unused) 3522 * @buf: buffer 3523 * 3524 * Return value: 3525 * number of bytes printed to buffer 3526 **/ 3527 static ssize_t ipr_show_log_level(struct device *dev, 3528 struct device_attribute *attr, char *buf) 3529 { 3530 struct Scsi_Host *shost = class_to_shost(dev); 3531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3532 unsigned long lock_flags = 0; 3533 int len; 3534 3535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3536 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); 3537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3538 return len; 3539 } 3540 3541 /** 3542 * ipr_store_log_level - Change the adapter's error logging level 3543 * @dev: class device struct 3544 * @attr: device attribute (unused) 3545 * @buf: buffer 3546 * @count: buffer size 3547 * 3548 * Return value: 3549 * number of bytes printed to buffer 3550 **/ 3551 static ssize_t ipr_store_log_level(struct device *dev, 3552 struct device_attribute *attr, 3553 const char *buf, size_t count) 3554 { 3555 struct Scsi_Host *shost = class_to_shost(dev); 3556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3557 unsigned long lock_flags = 0; 3558 3559 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3560 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); 3561 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3562 return strlen(buf); 3563 } 3564 3565 static struct device_attribute ipr_log_level_attr = { 3566 .attr = { 3567 .name = "log_level", 3568 .mode = S_IRUGO | S_IWUSR, 3569 }, 3570 .show = ipr_show_log_level, 3571 .store = ipr_store_log_level 3572 }; 3573 3574 /** 3575 * ipr_store_diagnostics - IOA Diagnostics interface 3576 * @dev: device struct 3577 * @attr: device attribute (unused) 3578 * @buf: buffer 3579 * @count: buffer size 3580 * 3581 * This function will reset the adapter and wait a reasonable 3582 * amount of time for any errors that the adapter might log. 3583 * 3584 * Return value: 3585 * count on success / other on failure 3586 **/ 3587 static ssize_t ipr_store_diagnostics(struct device *dev, 3588 struct device_attribute *attr, 3589 const char *buf, size_t count) 3590 { 3591 struct Scsi_Host *shost = class_to_shost(dev); 3592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3593 unsigned long lock_flags = 0; 3594 int rc = count; 3595 3596 if (!capable(CAP_SYS_ADMIN)) 3597 return -EACCES; 3598 3599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3600 while (ioa_cfg->in_reset_reload) { 3601 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3602 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3604 } 3605 3606 ioa_cfg->errors_logged = 0; 3607 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3608 3609 if (ioa_cfg->in_reset_reload) { 3610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3611 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3612 3613 /* Wait for a second for any errors to be logged */ 3614 msleep(1000); 3615 } else { 3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3617 return -EIO; 3618 } 3619 3620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3621 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) 3622 rc = -EIO; 3623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3624 3625 return rc; 3626 } 3627 3628 static struct device_attribute ipr_diagnostics_attr = { 3629 .attr = { 3630 .name = "run_diagnostics", 3631 .mode = S_IWUSR, 3632 }, 3633 .store = ipr_store_diagnostics 3634 }; 3635 3636 /** 3637 * ipr_show_adapter_state - Show the adapter's state 3638 * @dev: device struct 3639 * @attr: device attribute (unused) 3640 * @buf: buffer 3641 * 3642 * Return value: 3643 * number of bytes printed to buffer 3644 **/ 3645 static ssize_t ipr_show_adapter_state(struct device *dev, 3646 struct device_attribute *attr, char *buf) 3647 { 3648 struct Scsi_Host *shost = class_to_shost(dev); 3649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3650 unsigned long lock_flags = 0; 3651 int len; 3652 3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3654 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 3655 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3656 else 3657 len = snprintf(buf, PAGE_SIZE, "online\n"); 3658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3659 return len; 3660 } 3661 3662 /** 3663 * ipr_store_adapter_state - Change adapter state 3664 * @dev: device struct 3665 * @attr: device attribute (unused) 3666 * @buf: buffer 3667 * @count: buffer size 3668 * 3669 * This function will change the adapter's state. 3670 * 3671 * Return value: 3672 * count on success / other on failure 3673 **/ 3674 static ssize_t ipr_store_adapter_state(struct device *dev, 3675 struct device_attribute *attr, 3676 const char *buf, size_t count) 3677 { 3678 struct Scsi_Host *shost = class_to_shost(dev); 3679 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3680 unsigned long lock_flags; 3681 int result = count, i; 3682 3683 if (!capable(CAP_SYS_ADMIN)) 3684 return -EACCES; 3685 3686 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3687 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && 3688 !strncmp(buf, "online", 6)) { 3689 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 3690 spin_lock(&ioa_cfg->hrrq[i]._lock); 3691 ioa_cfg->hrrq[i].ioa_is_dead = 0; 3692 spin_unlock(&ioa_cfg->hrrq[i]._lock); 3693 } 3694 wmb(); 3695 ioa_cfg->reset_retries = 0; 3696 ioa_cfg->in_ioa_bringdown = 0; 3697 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3698 } 3699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3700 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3701 3702 return result; 3703 } 3704 3705 static struct device_attribute ipr_ioa_state_attr = { 3706 .attr = { 3707 .name = "online_state", 3708 .mode = S_IRUGO | S_IWUSR, 3709 }, 3710 .show = ipr_show_adapter_state, 3711 .store = ipr_store_adapter_state 3712 }; 3713 3714 /** 3715 * ipr_store_reset_adapter - Reset the adapter 3716 * @dev: device struct 3717 * @attr: device attribute (unused) 3718 * @buf: buffer 3719 * @count: buffer size 3720 * 3721 * This function will reset the adapter. 3722 * 3723 * Return value: 3724 * count on success / other on failure 3725 **/ 3726 static ssize_t ipr_store_reset_adapter(struct device *dev, 3727 struct device_attribute *attr, 3728 const char *buf, size_t count) 3729 { 3730 struct Scsi_Host *shost = class_to_shost(dev); 3731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3732 unsigned long lock_flags; 3733 int result = count; 3734 3735 if (!capable(CAP_SYS_ADMIN)) 3736 return -EACCES; 3737 3738 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3739 if (!ioa_cfg->in_reset_reload) 3740 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3742 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3743 3744 return result; 3745 } 3746 3747 static struct device_attribute ipr_ioa_reset_attr = { 3748 .attr = { 3749 .name = "reset_host", 3750 .mode = S_IWUSR, 3751 }, 3752 .store = ipr_store_reset_adapter 3753 }; 3754 3755 static int ipr_iopoll(struct irq_poll *iop, int budget); 3756 /** 3757 * ipr_show_iopoll_weight - Show ipr polling mode 3758 * @dev: class device struct 3759 * @attr: device attribute (unused) 3760 * @buf: buffer 3761 * 3762 * Return value: 3763 * number of bytes printed to buffer 3764 **/ 3765 static ssize_t ipr_show_iopoll_weight(struct device *dev, 3766 struct device_attribute *attr, char *buf) 3767 { 3768 struct Scsi_Host *shost = class_to_shost(dev); 3769 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3770 unsigned long lock_flags = 0; 3771 int len; 3772 3773 spin_lock_irqsave(shost->host_lock, lock_flags); 3774 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); 3775 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3776 3777 return len; 3778 } 3779 3780 /** 3781 * ipr_store_iopoll_weight - Change the adapter's polling mode 3782 * @dev: class device struct 3783 * @attr: device attribute (unused) 3784 * @buf: buffer 3785 * @count: buffer size 3786 * 3787 * Return value: 3788 * number of bytes printed to buffer 3789 **/ 3790 static ssize_t ipr_store_iopoll_weight(struct device *dev, 3791 struct device_attribute *attr, 3792 const char *buf, size_t count) 3793 { 3794 struct Scsi_Host *shost = class_to_shost(dev); 3795 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3796 unsigned long user_iopoll_weight; 3797 unsigned long lock_flags = 0; 3798 int i; 3799 3800 if (!ioa_cfg->sis64) { 3801 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); 3802 return -EINVAL; 3803 } 3804 if (kstrtoul(buf, 10, &user_iopoll_weight)) 3805 return -EINVAL; 3806 3807 if (user_iopoll_weight > 256) { 3808 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); 3809 return -EINVAL; 3810 } 3811 3812 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { 3813 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); 3814 return strlen(buf); 3815 } 3816 3817 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3818 for (i = 1; i < ioa_cfg->hrrq_num; i++) 3819 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); 3820 } 3821 3822 spin_lock_irqsave(shost->host_lock, lock_flags); 3823 ioa_cfg->iopoll_weight = user_iopoll_weight; 3824 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3825 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 3826 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, 3827 ioa_cfg->iopoll_weight, ipr_iopoll); 3828 } 3829 } 3830 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3831 3832 return strlen(buf); 3833 } 3834 3835 static struct device_attribute ipr_iopoll_weight_attr = { 3836 .attr = { 3837 .name = "iopoll_weight", 3838 .mode = S_IRUGO | S_IWUSR, 3839 }, 3840 .show = ipr_show_iopoll_weight, 3841 .store = ipr_store_iopoll_weight 3842 }; 3843 3844 /** 3845 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3846 * @buf_len: buffer length 3847 * 3848 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3849 * list to use for microcode download 3850 * 3851 * Return value: 3852 * pointer to sglist / NULL on failure 3853 **/ 3854 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) 3855 { 3856 int sg_size, order; 3857 struct ipr_sglist *sglist; 3858 3859 /* Get the minimum size per scatter/gather element */ 3860 sg_size = buf_len / (IPR_MAX_SGLIST - 1); 3861 3862 /* Get the actual size per element */ 3863 order = get_order(sg_size); 3864 3865 /* Allocate a scatter/gather list for the DMA */ 3866 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL); 3867 if (sglist == NULL) { 3868 ipr_trace; 3869 return NULL; 3870 } 3871 sglist->order = order; 3872 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, 3873 &sglist->num_sg); 3874 if (!sglist->scatterlist) { 3875 kfree(sglist); 3876 return NULL; 3877 } 3878 3879 return sglist; 3880 } 3881 3882 /** 3883 * ipr_free_ucode_buffer - Frees a microcode download buffer 3884 * @sglist: scatter/gather list pointer 3885 * 3886 * Free a DMA'able ucode download buffer previously allocated with 3887 * ipr_alloc_ucode_buffer 3888 * 3889 * Return value: 3890 * nothing 3891 **/ 3892 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) 3893 { 3894 sgl_free_order(sglist->scatterlist, sglist->order); 3895 kfree(sglist); 3896 } 3897 3898 /** 3899 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer 3900 * @sglist: scatter/gather list pointer 3901 * @buffer: buffer pointer 3902 * @len: buffer length 3903 * 3904 * Copy a microcode image from a user buffer into a buffer allocated by 3905 * ipr_alloc_ucode_buffer 3906 * 3907 * Return value: 3908 * 0 on success / other on failure 3909 **/ 3910 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, 3911 u8 *buffer, u32 len) 3912 { 3913 int bsize_elem, i, result = 0; 3914 struct scatterlist *sg; 3915 3916 /* Determine the actual number of bytes per element */ 3917 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3918 3919 sg = sglist->scatterlist; 3920 3921 for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), 3922 buffer += bsize_elem) { 3923 struct page *page = sg_page(sg); 3924 3925 memcpy_to_page(page, 0, buffer, bsize_elem); 3926 3927 sg->length = bsize_elem; 3928 3929 if (result != 0) { 3930 ipr_trace; 3931 return result; 3932 } 3933 } 3934 3935 if (len % bsize_elem) { 3936 struct page *page = sg_page(sg); 3937 3938 memcpy_to_page(page, 0, buffer, len % bsize_elem); 3939 3940 sg->length = len % bsize_elem; 3941 } 3942 3943 sglist->buffer_len = len; 3944 return result; 3945 } 3946 3947 /** 3948 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL 3949 * @ipr_cmd: ipr command struct 3950 * @sglist: scatter/gather list 3951 * 3952 * Builds a microcode download IOA data list (IOADL). 3953 * 3954 **/ 3955 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, 3956 struct ipr_sglist *sglist) 3957 { 3958 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3959 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 3960 struct scatterlist *scatterlist = sglist->scatterlist; 3961 struct scatterlist *sg; 3962 int i; 3963 3964 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3965 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3966 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3967 3968 ioarcb->ioadl_len = 3969 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3970 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { 3971 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3972 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 3973 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 3974 } 3975 3976 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3977 } 3978 3979 /** 3980 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3981 * @ipr_cmd: ipr command struct 3982 * @sglist: scatter/gather list 3983 * 3984 * Builds a microcode download IOA data list (IOADL). 3985 * 3986 **/ 3987 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, 3988 struct ipr_sglist *sglist) 3989 { 3990 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3991 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3992 struct scatterlist *scatterlist = sglist->scatterlist; 3993 struct scatterlist *sg; 3994 int i; 3995 3996 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3997 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3998 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3999 4000 ioarcb->ioadl_len = 4001 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 4002 4003 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { 4004 ioadl[i].flags_and_data_len = 4005 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg)); 4006 ioadl[i].address = 4007 cpu_to_be32(sg_dma_address(sg)); 4008 } 4009 4010 ioadl[i-1].flags_and_data_len |= 4011 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4012 } 4013 4014 /** 4015 * ipr_update_ioa_ucode - Update IOA's microcode 4016 * @ioa_cfg: ioa config struct 4017 * @sglist: scatter/gather list 4018 * 4019 * Initiate an adapter reset to update the IOA's microcode 4020 * 4021 * Return value: 4022 * 0 on success / -EIO on failure 4023 **/ 4024 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, 4025 struct ipr_sglist *sglist) 4026 { 4027 unsigned long lock_flags; 4028 4029 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4030 while (ioa_cfg->in_reset_reload) { 4031 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4032 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 4033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4034 } 4035 4036 if (ioa_cfg->ucode_sglist) { 4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4038 dev_err(&ioa_cfg->pdev->dev, 4039 "Microcode download already in progress\n"); 4040 return -EIO; 4041 } 4042 4043 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, 4044 sglist->scatterlist, sglist->num_sg, 4045 DMA_TO_DEVICE); 4046 4047 if (!sglist->num_dma_sg) { 4048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4049 dev_err(&ioa_cfg->pdev->dev, 4050 "Failed to map microcode download buffer!\n"); 4051 return -EIO; 4052 } 4053 4054 ioa_cfg->ucode_sglist = sglist; 4055 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 4056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4057 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 4058 4059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4060 ioa_cfg->ucode_sglist = NULL; 4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4062 return 0; 4063 } 4064 4065 /** 4066 * ipr_store_update_fw - Update the firmware on the adapter 4067 * @dev: device struct 4068 * @attr: device attribute (unused) 4069 * @buf: buffer 4070 * @count: buffer size 4071 * 4072 * This function will update the firmware on the adapter. 4073 * 4074 * Return value: 4075 * count on success / other on failure 4076 **/ 4077 static ssize_t ipr_store_update_fw(struct device *dev, 4078 struct device_attribute *attr, 4079 const char *buf, size_t count) 4080 { 4081 struct Scsi_Host *shost = class_to_shost(dev); 4082 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4083 struct ipr_ucode_image_header *image_hdr; 4084 const struct firmware *fw_entry; 4085 struct ipr_sglist *sglist; 4086 char fname[100]; 4087 char *src; 4088 char *endline; 4089 int result, dnld_size; 4090 4091 if (!capable(CAP_SYS_ADMIN)) 4092 return -EACCES; 4093 4094 snprintf(fname, sizeof(fname), "%s", buf); 4095 4096 endline = strchr(fname, '\n'); 4097 if (endline) 4098 *endline = '\0'; 4099 4100 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4101 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4102 return -EIO; 4103 } 4104 4105 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; 4106 4107 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); 4108 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); 4109 sglist = ipr_alloc_ucode_buffer(dnld_size); 4110 4111 if (!sglist) { 4112 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); 4113 release_firmware(fw_entry); 4114 return -ENOMEM; 4115 } 4116 4117 result = ipr_copy_ucode_buffer(sglist, src, dnld_size); 4118 4119 if (result) { 4120 dev_err(&ioa_cfg->pdev->dev, 4121 "Microcode buffer copy to DMA buffer failed\n"); 4122 goto out; 4123 } 4124 4125 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); 4126 4127 result = ipr_update_ioa_ucode(ioa_cfg, sglist); 4128 4129 if (!result) 4130 result = count; 4131 out: 4132 ipr_free_ucode_buffer(sglist); 4133 release_firmware(fw_entry); 4134 return result; 4135 } 4136 4137 static struct device_attribute ipr_update_fw_attr = { 4138 .attr = { 4139 .name = "update_fw", 4140 .mode = S_IWUSR, 4141 }, 4142 .store = ipr_store_update_fw 4143 }; 4144 4145 /** 4146 * ipr_show_fw_type - Show the adapter's firmware type. 4147 * @dev: class device struct 4148 * @attr: device attribute (unused) 4149 * @buf: buffer 4150 * 4151 * Return value: 4152 * number of bytes printed to buffer 4153 **/ 4154 static ssize_t ipr_show_fw_type(struct device *dev, 4155 struct device_attribute *attr, char *buf) 4156 { 4157 struct Scsi_Host *shost = class_to_shost(dev); 4158 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4159 unsigned long lock_flags = 0; 4160 int len; 4161 4162 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4163 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); 4164 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4165 return len; 4166 } 4167 4168 static struct device_attribute ipr_ioa_fw_type_attr = { 4169 .attr = { 4170 .name = "fw_type", 4171 .mode = S_IRUGO, 4172 }, 4173 .show = ipr_show_fw_type 4174 }; 4175 4176 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj, 4177 struct bin_attribute *bin_attr, char *buf, 4178 loff_t off, size_t count) 4179 { 4180 struct device *cdev = kobj_to_dev(kobj); 4181 struct Scsi_Host *shost = class_to_shost(cdev); 4182 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4183 struct ipr_hostrcb *hostrcb; 4184 unsigned long lock_flags = 0; 4185 int ret; 4186 4187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4188 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, 4189 struct ipr_hostrcb, queue); 4190 if (!hostrcb) { 4191 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4192 return 0; 4193 } 4194 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, 4195 sizeof(hostrcb->hcam)); 4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4197 return ret; 4198 } 4199 4200 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj, 4201 struct bin_attribute *bin_attr, char *buf, 4202 loff_t off, size_t count) 4203 { 4204 struct device *cdev = kobj_to_dev(kobj); 4205 struct Scsi_Host *shost = class_to_shost(cdev); 4206 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4207 struct ipr_hostrcb *hostrcb; 4208 unsigned long lock_flags = 0; 4209 4210 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4211 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, 4212 struct ipr_hostrcb, queue); 4213 if (!hostrcb) { 4214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4215 return count; 4216 } 4217 4218 /* Reclaim hostrcb before exit */ 4219 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4221 return count; 4222 } 4223 4224 static struct bin_attribute ipr_ioa_async_err_log = { 4225 .attr = { 4226 .name = "async_err_log", 4227 .mode = S_IRUGO | S_IWUSR, 4228 }, 4229 .size = 0, 4230 .read = ipr_read_async_err_log, 4231 .write = ipr_next_async_err_log 4232 }; 4233 4234 static struct attribute *ipr_ioa_attrs[] = { 4235 &ipr_fw_version_attr.attr, 4236 &ipr_log_level_attr.attr, 4237 &ipr_diagnostics_attr.attr, 4238 &ipr_ioa_state_attr.attr, 4239 &ipr_ioa_reset_attr.attr, 4240 &ipr_update_fw_attr.attr, 4241 &ipr_ioa_fw_type_attr.attr, 4242 &ipr_iopoll_weight_attr.attr, 4243 NULL, 4244 }; 4245 4246 ATTRIBUTE_GROUPS(ipr_ioa); 4247 4248 #ifdef CONFIG_SCSI_IPR_DUMP 4249 /** 4250 * ipr_read_dump - Dump the adapter 4251 * @filp: open sysfs file 4252 * @kobj: kobject struct 4253 * @bin_attr: bin_attribute struct 4254 * @buf: buffer 4255 * @off: offset 4256 * @count: buffer size 4257 * 4258 * Return value: 4259 * number of bytes printed to buffer 4260 **/ 4261 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, 4262 struct bin_attribute *bin_attr, 4263 char *buf, loff_t off, size_t count) 4264 { 4265 struct device *cdev = kobj_to_dev(kobj); 4266 struct Scsi_Host *shost = class_to_shost(cdev); 4267 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4268 struct ipr_dump *dump; 4269 unsigned long lock_flags = 0; 4270 char *src; 4271 int len, sdt_end; 4272 size_t rc = count; 4273 4274 if (!capable(CAP_SYS_ADMIN)) 4275 return -EACCES; 4276 4277 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4278 dump = ioa_cfg->dump; 4279 4280 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { 4281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4282 return 0; 4283 } 4284 kref_get(&dump->kref); 4285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4286 4287 if (off > dump->driver_dump.hdr.len) { 4288 kref_put(&dump->kref, ipr_release_dump); 4289 return 0; 4290 } 4291 4292 if (off + count > dump->driver_dump.hdr.len) { 4293 count = dump->driver_dump.hdr.len - off; 4294 rc = count; 4295 } 4296 4297 if (count && off < sizeof(dump->driver_dump)) { 4298 if (off + count > sizeof(dump->driver_dump)) 4299 len = sizeof(dump->driver_dump) - off; 4300 else 4301 len = count; 4302 src = (u8 *)&dump->driver_dump + off; 4303 memcpy(buf, src, len); 4304 buf += len; 4305 off += len; 4306 count -= len; 4307 } 4308 4309 off -= sizeof(dump->driver_dump); 4310 4311 if (ioa_cfg->sis64) 4312 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4313 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * 4314 sizeof(struct ipr_sdt_entry)); 4315 else 4316 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4317 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); 4318 4319 if (count && off < sdt_end) { 4320 if (off + count > sdt_end) 4321 len = sdt_end - off; 4322 else 4323 len = count; 4324 src = (u8 *)&dump->ioa_dump + off; 4325 memcpy(buf, src, len); 4326 buf += len; 4327 off += len; 4328 count -= len; 4329 } 4330 4331 off -= sdt_end; 4332 4333 while (count) { 4334 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 4335 len = PAGE_ALIGN(off) - off; 4336 else 4337 len = count; 4338 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; 4339 src += off & ~PAGE_MASK; 4340 memcpy(buf, src, len); 4341 buf += len; 4342 off += len; 4343 count -= len; 4344 } 4345 4346 kref_put(&dump->kref, ipr_release_dump); 4347 return rc; 4348 } 4349 4350 /** 4351 * ipr_alloc_dump - Prepare for adapter dump 4352 * @ioa_cfg: ioa config struct 4353 * 4354 * Return value: 4355 * 0 on success / other on failure 4356 **/ 4357 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 4358 { 4359 struct ipr_dump *dump; 4360 __be32 **ioa_data; 4361 unsigned long lock_flags = 0; 4362 4363 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 4364 4365 if (!dump) { 4366 ipr_err("Dump memory allocation failed\n"); 4367 return -ENOMEM; 4368 } 4369 4370 if (ioa_cfg->sis64) 4371 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES, 4372 sizeof(__be32 *))); 4373 else 4374 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES, 4375 sizeof(__be32 *))); 4376 4377 if (!ioa_data) { 4378 ipr_err("Dump memory allocation failed\n"); 4379 kfree(dump); 4380 return -ENOMEM; 4381 } 4382 4383 dump->ioa_dump.ioa_data = ioa_data; 4384 4385 kref_init(&dump->kref); 4386 dump->ioa_cfg = ioa_cfg; 4387 4388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4389 4390 if (INACTIVE != ioa_cfg->sdt_state) { 4391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4392 vfree(dump->ioa_dump.ioa_data); 4393 kfree(dump); 4394 return 0; 4395 } 4396 4397 ioa_cfg->dump = dump; 4398 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 4399 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { 4400 ioa_cfg->dump_taken = 1; 4401 schedule_work(&ioa_cfg->work_q); 4402 } 4403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4404 4405 return 0; 4406 } 4407 4408 /** 4409 * ipr_free_dump - Free adapter dump memory 4410 * @ioa_cfg: ioa config struct 4411 * 4412 * Return value: 4413 * 0 on success / other on failure 4414 **/ 4415 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) 4416 { 4417 struct ipr_dump *dump; 4418 unsigned long lock_flags = 0; 4419 4420 ENTER; 4421 4422 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4423 dump = ioa_cfg->dump; 4424 if (!dump) { 4425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4426 return 0; 4427 } 4428 4429 ioa_cfg->dump = NULL; 4430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4431 4432 kref_put(&dump->kref, ipr_release_dump); 4433 4434 LEAVE; 4435 return 0; 4436 } 4437 4438 /** 4439 * ipr_write_dump - Setup dump state of adapter 4440 * @filp: open sysfs file 4441 * @kobj: kobject struct 4442 * @bin_attr: bin_attribute struct 4443 * @buf: buffer 4444 * @off: offset 4445 * @count: buffer size 4446 * 4447 * Return value: 4448 * number of bytes printed to buffer 4449 **/ 4450 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, 4451 struct bin_attribute *bin_attr, 4452 char *buf, loff_t off, size_t count) 4453 { 4454 struct device *cdev = kobj_to_dev(kobj); 4455 struct Scsi_Host *shost = class_to_shost(cdev); 4456 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4457 int rc; 4458 4459 if (!capable(CAP_SYS_ADMIN)) 4460 return -EACCES; 4461 4462 if (buf[0] == '1') 4463 rc = ipr_alloc_dump(ioa_cfg); 4464 else if (buf[0] == '0') 4465 rc = ipr_free_dump(ioa_cfg); 4466 else 4467 return -EINVAL; 4468 4469 if (rc) 4470 return rc; 4471 else 4472 return count; 4473 } 4474 4475 static struct bin_attribute ipr_dump_attr = { 4476 .attr = { 4477 .name = "dump", 4478 .mode = S_IRUSR | S_IWUSR, 4479 }, 4480 .size = 0, 4481 .read = ipr_read_dump, 4482 .write = ipr_write_dump 4483 }; 4484 #else 4485 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; 4486 #endif 4487 4488 /** 4489 * ipr_change_queue_depth - Change the device's queue depth 4490 * @sdev: scsi device struct 4491 * @qdepth: depth to set 4492 * 4493 * Return value: 4494 * actual depth set 4495 **/ 4496 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 4497 { 4498 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4499 struct ipr_resource_entry *res; 4500 unsigned long lock_flags = 0; 4501 4502 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4503 res = (struct ipr_resource_entry *)sdev->hostdata; 4504 4505 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN) 4506 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4508 4509 scsi_change_queue_depth(sdev, qdepth); 4510 return sdev->queue_depth; 4511 } 4512 4513 /** 4514 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4515 * @dev: device struct 4516 * @attr: device attribute structure 4517 * @buf: buffer 4518 * 4519 * Return value: 4520 * number of bytes printed to buffer 4521 **/ 4522 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) 4523 { 4524 struct scsi_device *sdev = to_scsi_device(dev); 4525 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4526 struct ipr_resource_entry *res; 4527 unsigned long lock_flags = 0; 4528 ssize_t len = -ENXIO; 4529 4530 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4531 res = (struct ipr_resource_entry *)sdev->hostdata; 4532 if (res) 4533 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); 4534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4535 return len; 4536 } 4537 4538 static struct device_attribute ipr_adapter_handle_attr = { 4539 .attr = { 4540 .name = "adapter_handle", 4541 .mode = S_IRUSR, 4542 }, 4543 .show = ipr_show_adapter_handle 4544 }; 4545 4546 /** 4547 * ipr_show_resource_path - Show the resource path or the resource address for 4548 * this device. 4549 * @dev: device struct 4550 * @attr: device attribute structure 4551 * @buf: buffer 4552 * 4553 * Return value: 4554 * number of bytes printed to buffer 4555 **/ 4556 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) 4557 { 4558 struct scsi_device *sdev = to_scsi_device(dev); 4559 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4560 struct ipr_resource_entry *res; 4561 unsigned long lock_flags = 0; 4562 ssize_t len = -ENXIO; 4563 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4564 4565 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4566 res = (struct ipr_resource_entry *)sdev->hostdata; 4567 if (res && ioa_cfg->sis64) 4568 len = snprintf(buf, PAGE_SIZE, "%s\n", 4569 __ipr_format_res_path(res->res_path, buffer, 4570 sizeof(buffer))); 4571 else if (res) 4572 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, 4573 res->bus, res->target, res->lun); 4574 4575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4576 return len; 4577 } 4578 4579 static struct device_attribute ipr_resource_path_attr = { 4580 .attr = { 4581 .name = "resource_path", 4582 .mode = S_IRUGO, 4583 }, 4584 .show = ipr_show_resource_path 4585 }; 4586 4587 /** 4588 * ipr_show_device_id - Show the device_id for this device. 4589 * @dev: device struct 4590 * @attr: device attribute structure 4591 * @buf: buffer 4592 * 4593 * Return value: 4594 * number of bytes printed to buffer 4595 **/ 4596 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) 4597 { 4598 struct scsi_device *sdev = to_scsi_device(dev); 4599 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4600 struct ipr_resource_entry *res; 4601 unsigned long lock_flags = 0; 4602 ssize_t len = -ENXIO; 4603 4604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4605 res = (struct ipr_resource_entry *)sdev->hostdata; 4606 if (res && ioa_cfg->sis64) 4607 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); 4608 else if (res) 4609 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); 4610 4611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4612 return len; 4613 } 4614 4615 static struct device_attribute ipr_device_id_attr = { 4616 .attr = { 4617 .name = "device_id", 4618 .mode = S_IRUGO, 4619 }, 4620 .show = ipr_show_device_id 4621 }; 4622 4623 /** 4624 * ipr_show_resource_type - Show the resource type for this device. 4625 * @dev: device struct 4626 * @attr: device attribute structure 4627 * @buf: buffer 4628 * 4629 * Return value: 4630 * number of bytes printed to buffer 4631 **/ 4632 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) 4633 { 4634 struct scsi_device *sdev = to_scsi_device(dev); 4635 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4636 struct ipr_resource_entry *res; 4637 unsigned long lock_flags = 0; 4638 ssize_t len = -ENXIO; 4639 4640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4641 res = (struct ipr_resource_entry *)sdev->hostdata; 4642 4643 if (res) 4644 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); 4645 4646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4647 return len; 4648 } 4649 4650 static struct device_attribute ipr_resource_type_attr = { 4651 .attr = { 4652 .name = "resource_type", 4653 .mode = S_IRUGO, 4654 }, 4655 .show = ipr_show_resource_type 4656 }; 4657 4658 /** 4659 * ipr_show_raw_mode - Show the adapter's raw mode 4660 * @dev: class device struct 4661 * @attr: device attribute (unused) 4662 * @buf: buffer 4663 * 4664 * Return value: 4665 * number of bytes printed to buffer 4666 **/ 4667 static ssize_t ipr_show_raw_mode(struct device *dev, 4668 struct device_attribute *attr, char *buf) 4669 { 4670 struct scsi_device *sdev = to_scsi_device(dev); 4671 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4672 struct ipr_resource_entry *res; 4673 unsigned long lock_flags = 0; 4674 ssize_t len; 4675 4676 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4677 res = (struct ipr_resource_entry *)sdev->hostdata; 4678 if (res) 4679 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); 4680 else 4681 len = -ENXIO; 4682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4683 return len; 4684 } 4685 4686 /** 4687 * ipr_store_raw_mode - Change the adapter's raw mode 4688 * @dev: class device struct 4689 * @attr: device attribute (unused) 4690 * @buf: buffer 4691 * @count: buffer size 4692 * 4693 * Return value: 4694 * number of bytes printed to buffer 4695 **/ 4696 static ssize_t ipr_store_raw_mode(struct device *dev, 4697 struct device_attribute *attr, 4698 const char *buf, size_t count) 4699 { 4700 struct scsi_device *sdev = to_scsi_device(dev); 4701 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4702 struct ipr_resource_entry *res; 4703 unsigned long lock_flags = 0; 4704 ssize_t len; 4705 4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4707 res = (struct ipr_resource_entry *)sdev->hostdata; 4708 if (res) { 4709 if (ipr_is_af_dasd_device(res)) { 4710 res->raw_mode = simple_strtoul(buf, NULL, 10); 4711 len = strlen(buf); 4712 if (res->sdev) 4713 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", 4714 res->raw_mode ? "enabled" : "disabled"); 4715 } else 4716 len = -EINVAL; 4717 } else 4718 len = -ENXIO; 4719 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4720 return len; 4721 } 4722 4723 static struct device_attribute ipr_raw_mode_attr = { 4724 .attr = { 4725 .name = "raw_mode", 4726 .mode = S_IRUGO | S_IWUSR, 4727 }, 4728 .show = ipr_show_raw_mode, 4729 .store = ipr_store_raw_mode 4730 }; 4731 4732 static struct attribute *ipr_dev_attrs[] = { 4733 &ipr_adapter_handle_attr.attr, 4734 &ipr_resource_path_attr.attr, 4735 &ipr_device_id_attr.attr, 4736 &ipr_resource_type_attr.attr, 4737 &ipr_raw_mode_attr.attr, 4738 NULL, 4739 }; 4740 4741 ATTRIBUTE_GROUPS(ipr_dev); 4742 4743 /** 4744 * ipr_biosparam - Return the HSC mapping 4745 * @sdev: scsi device struct 4746 * @block_device: block device pointer 4747 * @capacity: capacity of the device 4748 * @parm: Array containing returned HSC values. 4749 * 4750 * This function generates the HSC parms that fdisk uses. 4751 * We want to make sure we return something that places partitions 4752 * on 4k boundaries for best performance with the IOA. 4753 * 4754 * Return value: 4755 * 0 on success 4756 **/ 4757 static int ipr_biosparam(struct scsi_device *sdev, 4758 struct block_device *block_device, 4759 sector_t capacity, int *parm) 4760 { 4761 int heads, sectors; 4762 sector_t cylinders; 4763 4764 heads = 128; 4765 sectors = 32; 4766 4767 cylinders = capacity; 4768 sector_div(cylinders, (128 * 32)); 4769 4770 /* return result */ 4771 parm[0] = heads; 4772 parm[1] = sectors; 4773 parm[2] = cylinders; 4774 4775 return 0; 4776 } 4777 4778 /** 4779 * ipr_find_starget - Find target based on bus/target. 4780 * @starget: scsi target struct 4781 * 4782 * Return value: 4783 * resource entry pointer if found / NULL if not found 4784 **/ 4785 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) 4786 { 4787 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4788 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4789 struct ipr_resource_entry *res; 4790 4791 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4792 if ((res->bus == starget->channel) && 4793 (res->target == starget->id)) { 4794 return res; 4795 } 4796 } 4797 4798 return NULL; 4799 } 4800 4801 static struct ata_port_info sata_port_info; 4802 4803 /** 4804 * ipr_target_alloc - Prepare for commands to a SCSI target 4805 * @starget: scsi target struct 4806 * 4807 * If the device is a SATA device, this function allocates an 4808 * ATA port with libata, else it does nothing. 4809 * 4810 * Return value: 4811 * 0 on success / non-0 on failure 4812 **/ 4813 static int ipr_target_alloc(struct scsi_target *starget) 4814 { 4815 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4816 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4817 struct ipr_sata_port *sata_port; 4818 struct ata_port *ap; 4819 struct ipr_resource_entry *res; 4820 unsigned long lock_flags; 4821 4822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4823 res = ipr_find_starget(starget); 4824 starget->hostdata = NULL; 4825 4826 if (res && ipr_is_gata(res)) { 4827 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4828 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL); 4829 if (!sata_port) 4830 return -ENOMEM; 4831 4832 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); 4833 if (ap) { 4834 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4835 sata_port->ioa_cfg = ioa_cfg; 4836 sata_port->ap = ap; 4837 sata_port->res = res; 4838 4839 res->sata_port = sata_port; 4840 ap->private_data = sata_port; 4841 starget->hostdata = sata_port; 4842 } else { 4843 kfree(sata_port); 4844 return -ENOMEM; 4845 } 4846 } 4847 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4848 4849 return 0; 4850 } 4851 4852 /** 4853 * ipr_target_destroy - Destroy a SCSI target 4854 * @starget: scsi target struct 4855 * 4856 * If the device was a SATA device, this function frees the libata 4857 * ATA port, else it does nothing. 4858 * 4859 **/ 4860 static void ipr_target_destroy(struct scsi_target *starget) 4861 { 4862 struct ipr_sata_port *sata_port = starget->hostdata; 4863 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4864 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4865 4866 if (ioa_cfg->sis64) { 4867 if (!ipr_find_starget(starget)) { 4868 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 4869 clear_bit(starget->id, ioa_cfg->array_ids); 4870 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 4871 clear_bit(starget->id, ioa_cfg->vset_ids); 4872 else if (starget->channel == 0) 4873 clear_bit(starget->id, ioa_cfg->target_ids); 4874 } 4875 } 4876 4877 if (sata_port) { 4878 starget->hostdata = NULL; 4879 ata_sas_port_destroy(sata_port->ap); 4880 kfree(sata_port); 4881 } 4882 } 4883 4884 /** 4885 * ipr_find_sdev - Find device based on bus/target/lun. 4886 * @sdev: scsi device struct 4887 * 4888 * Return value: 4889 * resource entry pointer if found / NULL if not found 4890 **/ 4891 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) 4892 { 4893 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4894 struct ipr_resource_entry *res; 4895 4896 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4897 if ((res->bus == sdev->channel) && 4898 (res->target == sdev->id) && 4899 (res->lun == sdev->lun)) 4900 return res; 4901 } 4902 4903 return NULL; 4904 } 4905 4906 /** 4907 * ipr_slave_destroy - Unconfigure a SCSI device 4908 * @sdev: scsi device struct 4909 * 4910 * Return value: 4911 * nothing 4912 **/ 4913 static void ipr_slave_destroy(struct scsi_device *sdev) 4914 { 4915 struct ipr_resource_entry *res; 4916 struct ipr_ioa_cfg *ioa_cfg; 4917 unsigned long lock_flags = 0; 4918 4919 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4920 4921 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4922 res = (struct ipr_resource_entry *) sdev->hostdata; 4923 if (res) { 4924 if (res->sata_port) 4925 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; 4926 sdev->hostdata = NULL; 4927 res->sdev = NULL; 4928 res->sata_port = NULL; 4929 } 4930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4931 } 4932 4933 /** 4934 * ipr_slave_configure - Configure a SCSI device 4935 * @sdev: scsi device struct 4936 * 4937 * This function configures the specified scsi device. 4938 * 4939 * Return value: 4940 * 0 on success 4941 **/ 4942 static int ipr_slave_configure(struct scsi_device *sdev) 4943 { 4944 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4945 struct ipr_resource_entry *res; 4946 struct ata_port *ap = NULL; 4947 unsigned long lock_flags = 0; 4948 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4949 4950 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4951 res = sdev->hostdata; 4952 if (res) { 4953 if (ipr_is_af_dasd_device(res)) 4954 sdev->type = TYPE_RAID; 4955 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { 4956 sdev->scsi_level = 4; 4957 sdev->no_uld_attach = 1; 4958 } 4959 if (ipr_is_vset_device(res)) { 4960 sdev->scsi_level = SCSI_SPC_3; 4961 sdev->no_report_opcodes = 1; 4962 blk_queue_rq_timeout(sdev->request_queue, 4963 IPR_VSET_RW_TIMEOUT); 4964 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4965 } 4966 if (ipr_is_gata(res) && res->sata_port) 4967 ap = res->sata_port->ap; 4968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4969 4970 if (ap) { 4971 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN); 4972 ata_sas_slave_configure(sdev, ap); 4973 } 4974 4975 if (ioa_cfg->sis64) 4976 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4977 ipr_format_res_path(ioa_cfg, 4978 res->res_path, buffer, sizeof(buffer))); 4979 return 0; 4980 } 4981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4982 return 0; 4983 } 4984 4985 /** 4986 * ipr_ata_slave_alloc - Prepare for commands to a SATA device 4987 * @sdev: scsi device struct 4988 * 4989 * This function initializes an ATA port so that future commands 4990 * sent through queuecommand will work. 4991 * 4992 * Return value: 4993 * 0 on success 4994 **/ 4995 static int ipr_ata_slave_alloc(struct scsi_device *sdev) 4996 { 4997 struct ipr_sata_port *sata_port = NULL; 4998 int rc = -ENXIO; 4999 5000 ENTER; 5001 if (sdev->sdev_target) 5002 sata_port = sdev->sdev_target->hostdata; 5003 if (sata_port) { 5004 rc = ata_sas_port_init(sata_port->ap); 5005 if (rc == 0) 5006 rc = ata_sas_sync_probe(sata_port->ap); 5007 } 5008 5009 if (rc) 5010 ipr_slave_destroy(sdev); 5011 5012 LEAVE; 5013 return rc; 5014 } 5015 5016 /** 5017 * ipr_slave_alloc - Prepare for commands to a device. 5018 * @sdev: scsi device struct 5019 * 5020 * This function saves a pointer to the resource entry 5021 * in the scsi device struct if the device exists. We 5022 * can then use this pointer in ipr_queuecommand when 5023 * handling new commands. 5024 * 5025 * Return value: 5026 * 0 on success / -ENXIO if device does not exist 5027 **/ 5028 static int ipr_slave_alloc(struct scsi_device *sdev) 5029 { 5030 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 5031 struct ipr_resource_entry *res; 5032 unsigned long lock_flags; 5033 int rc = -ENXIO; 5034 5035 sdev->hostdata = NULL; 5036 5037 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5038 5039 res = ipr_find_sdev(sdev); 5040 if (res) { 5041 res->sdev = sdev; 5042 res->add_to_ml = 0; 5043 res->in_erp = 0; 5044 sdev->hostdata = res; 5045 if (!ipr_is_naca_model(res)) 5046 res->needs_sync_complete = 1; 5047 rc = 0; 5048 if (ipr_is_gata(res)) { 5049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5050 return ipr_ata_slave_alloc(sdev); 5051 } 5052 } 5053 5054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5055 5056 return rc; 5057 } 5058 5059 /** 5060 * ipr_match_lun - Match function for specified LUN 5061 * @ipr_cmd: ipr command struct 5062 * @device: device to match (sdev) 5063 * 5064 * Returns: 5065 * 1 if command matches sdev / 0 if command does not match sdev 5066 **/ 5067 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) 5068 { 5069 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) 5070 return 1; 5071 return 0; 5072 } 5073 5074 /** 5075 * ipr_cmnd_is_free - Check if a command is free or not 5076 * @ipr_cmd: ipr command struct 5077 * 5078 * Returns: 5079 * true / false 5080 **/ 5081 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd) 5082 { 5083 struct ipr_cmnd *loop_cmd; 5084 5085 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { 5086 if (loop_cmd == ipr_cmd) 5087 return true; 5088 } 5089 5090 return false; 5091 } 5092 5093 /** 5094 * ipr_match_res - Match function for specified resource entry 5095 * @ipr_cmd: ipr command struct 5096 * @resource: resource entry to match 5097 * 5098 * Returns: 5099 * 1 if command matches sdev / 0 if command does not match sdev 5100 **/ 5101 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource) 5102 { 5103 struct ipr_resource_entry *res = resource; 5104 5105 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle) 5106 return 1; 5107 return 0; 5108 } 5109 5110 /** 5111 * ipr_wait_for_ops - Wait for matching commands to complete 5112 * @ioa_cfg: ioa config struct 5113 * @device: device to match (sdev) 5114 * @match: match function to use 5115 * 5116 * Returns: 5117 * SUCCESS / FAILED 5118 **/ 5119 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, 5120 int (*match)(struct ipr_cmnd *, void *)) 5121 { 5122 struct ipr_cmnd *ipr_cmd; 5123 int wait, i; 5124 unsigned long flags; 5125 struct ipr_hrr_queue *hrrq; 5126 signed long timeout = IPR_ABORT_TASK_TIMEOUT; 5127 DECLARE_COMPLETION_ONSTACK(comp); 5128 5129 ENTER; 5130 do { 5131 wait = 0; 5132 5133 for_each_hrrq(hrrq, ioa_cfg) { 5134 spin_lock_irqsave(hrrq->lock, flags); 5135 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5136 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; 5137 if (!ipr_cmnd_is_free(ipr_cmd)) { 5138 if (match(ipr_cmd, device)) { 5139 ipr_cmd->eh_comp = ∁ 5140 wait++; 5141 } 5142 } 5143 } 5144 spin_unlock_irqrestore(hrrq->lock, flags); 5145 } 5146 5147 if (wait) { 5148 timeout = wait_for_completion_timeout(&comp, timeout); 5149 5150 if (!timeout) { 5151 wait = 0; 5152 5153 for_each_hrrq(hrrq, ioa_cfg) { 5154 spin_lock_irqsave(hrrq->lock, flags); 5155 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5156 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; 5157 if (!ipr_cmnd_is_free(ipr_cmd)) { 5158 if (match(ipr_cmd, device)) { 5159 ipr_cmd->eh_comp = NULL; 5160 wait++; 5161 } 5162 } 5163 } 5164 spin_unlock_irqrestore(hrrq->lock, flags); 5165 } 5166 5167 if (wait) 5168 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); 5169 LEAVE; 5170 return wait ? FAILED : SUCCESS; 5171 } 5172 } 5173 } while (wait); 5174 5175 LEAVE; 5176 return SUCCESS; 5177 } 5178 5179 static int ipr_eh_host_reset(struct scsi_cmnd *cmd) 5180 { 5181 struct ipr_ioa_cfg *ioa_cfg; 5182 unsigned long lock_flags = 0; 5183 int rc = SUCCESS; 5184 5185 ENTER; 5186 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 5187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5188 5189 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 5190 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 5191 dev_err(&ioa_cfg->pdev->dev, 5192 "Adapter being reset as a result of error recovery.\n"); 5193 5194 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5195 ioa_cfg->sdt_state = GET_DUMP; 5196 } 5197 5198 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5199 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5200 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5201 5202 /* If we got hit with a host reset while we were already resetting 5203 the adapter for some reason, and the reset failed. */ 5204 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 5205 ipr_trace; 5206 rc = FAILED; 5207 } 5208 5209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5210 LEAVE; 5211 return rc; 5212 } 5213 5214 /** 5215 * ipr_device_reset - Reset the device 5216 * @ioa_cfg: ioa config struct 5217 * @res: resource entry struct 5218 * 5219 * This function issues a device reset to the affected device. 5220 * If the device is a SCSI device, a LUN reset will be sent 5221 * to the device first. If that does not work, a target reset 5222 * will be sent. If the device is a SATA device, a PHY reset will 5223 * be sent. 5224 * 5225 * Return value: 5226 * 0 on success / non-zero on failure 5227 **/ 5228 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, 5229 struct ipr_resource_entry *res) 5230 { 5231 struct ipr_cmnd *ipr_cmd; 5232 struct ipr_ioarcb *ioarcb; 5233 struct ipr_cmd_pkt *cmd_pkt; 5234 struct ipr_ioarcb_ata_regs *regs; 5235 u32 ioasc; 5236 5237 ENTER; 5238 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5239 ioarcb = &ipr_cmd->ioarcb; 5240 cmd_pkt = &ioarcb->cmd_pkt; 5241 5242 if (ipr_cmd->ioa_cfg->sis64) { 5243 regs = &ipr_cmd->i.ata_ioadl.regs; 5244 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 5245 } else 5246 regs = &ioarcb->u.add_data.u.regs; 5247 5248 ioarcb->res_handle = res->res_handle; 5249 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5250 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5251 if (ipr_is_gata(res)) { 5252 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 5253 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); 5254 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5255 } 5256 5257 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5258 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5259 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5260 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 5261 if (ipr_cmd->ioa_cfg->sis64) 5262 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 5263 sizeof(struct ipr_ioasa_gata)); 5264 else 5265 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 5266 sizeof(struct ipr_ioasa_gata)); 5267 } 5268 5269 LEAVE; 5270 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; 5271 } 5272 5273 /** 5274 * ipr_sata_reset - Reset the SATA port 5275 * @link: SATA link to reset 5276 * @classes: class of the attached device 5277 * @deadline: unused 5278 * 5279 * This function issues a SATA phy reset to the affected ATA link. 5280 * 5281 * Return value: 5282 * 0 on success / non-zero on failure 5283 **/ 5284 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, 5285 unsigned long deadline) 5286 { 5287 struct ipr_sata_port *sata_port = link->ap->private_data; 5288 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 5289 struct ipr_resource_entry *res; 5290 unsigned long lock_flags = 0; 5291 int rc = -ENXIO, ret; 5292 5293 ENTER; 5294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5295 while (ioa_cfg->in_reset_reload) { 5296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5297 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5298 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5299 } 5300 5301 res = sata_port->res; 5302 if (res) { 5303 rc = ipr_device_reset(ioa_cfg, res); 5304 *classes = res->ata_class; 5305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5306 5307 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); 5308 if (ret != SUCCESS) { 5309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5310 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 5311 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5312 5313 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5314 } 5315 } else 5316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5317 5318 LEAVE; 5319 return rc; 5320 } 5321 5322 /** 5323 * __ipr_eh_dev_reset - Reset the device 5324 * @scsi_cmd: scsi command struct 5325 * 5326 * This function issues a device reset to the affected device. 5327 * A LUN reset will be sent to the device first. If that does 5328 * not work, a target reset will be sent. 5329 * 5330 * Return value: 5331 * SUCCESS / FAILED 5332 **/ 5333 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) 5334 { 5335 struct ipr_cmnd *ipr_cmd; 5336 struct ipr_ioa_cfg *ioa_cfg; 5337 struct ipr_resource_entry *res; 5338 struct ata_port *ap; 5339 int rc = 0, i; 5340 struct ipr_hrr_queue *hrrq; 5341 5342 ENTER; 5343 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5344 res = scsi_cmd->device->hostdata; 5345 5346 /* 5347 * If we are currently going through reset/reload, return failed. This will force the 5348 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the 5349 * reset to complete 5350 */ 5351 if (ioa_cfg->in_reset_reload) 5352 return FAILED; 5353 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5354 return FAILED; 5355 5356 for_each_hrrq(hrrq, ioa_cfg) { 5357 spin_lock(&hrrq->_lock); 5358 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5359 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; 5360 5361 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 5362 if (!ipr_cmd->qc) 5363 continue; 5364 if (ipr_cmnd_is_free(ipr_cmd)) 5365 continue; 5366 5367 ipr_cmd->done = ipr_sata_eh_done; 5368 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_EH)) { 5369 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 5370 ipr_cmd->qc->flags |= ATA_QCFLAG_EH; 5371 } 5372 } 5373 } 5374 spin_unlock(&hrrq->_lock); 5375 } 5376 res->resetting_device = 1; 5377 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 5378 5379 if (ipr_is_gata(res) && res->sata_port) { 5380 ap = res->sata_port->ap; 5381 spin_unlock_irq(scsi_cmd->device->host->host_lock); 5382 ata_std_error_handler(ap); 5383 spin_lock_irq(scsi_cmd->device->host->host_lock); 5384 } else 5385 rc = ipr_device_reset(ioa_cfg, res); 5386 res->resetting_device = 0; 5387 res->reset_occurred = 1; 5388 5389 LEAVE; 5390 return rc ? FAILED : SUCCESS; 5391 } 5392 5393 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) 5394 { 5395 int rc; 5396 struct ipr_ioa_cfg *ioa_cfg; 5397 struct ipr_resource_entry *res; 5398 5399 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 5400 res = cmd->device->hostdata; 5401 5402 if (!res) 5403 return FAILED; 5404 5405 spin_lock_irq(cmd->device->host->host_lock); 5406 rc = __ipr_eh_dev_reset(cmd); 5407 spin_unlock_irq(cmd->device->host->host_lock); 5408 5409 if (rc == SUCCESS) { 5410 if (ipr_is_gata(res) && res->sata_port) 5411 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); 5412 else 5413 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); 5414 } 5415 5416 return rc; 5417 } 5418 5419 /** 5420 * ipr_bus_reset_done - Op done function for bus reset. 5421 * @ipr_cmd: ipr command struct 5422 * 5423 * This function is the op done function for a bus reset 5424 * 5425 * Return value: 5426 * none 5427 **/ 5428 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) 5429 { 5430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5431 struct ipr_resource_entry *res; 5432 5433 ENTER; 5434 if (!ioa_cfg->sis64) 5435 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 5436 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { 5437 scsi_report_bus_reset(ioa_cfg->host, res->bus); 5438 break; 5439 } 5440 } 5441 5442 /* 5443 * If abort has not completed, indicate the reset has, else call the 5444 * abort's done function to wake the sleeping eh thread 5445 */ 5446 if (ipr_cmd->sibling->sibling) 5447 ipr_cmd->sibling->sibling = NULL; 5448 else 5449 ipr_cmd->sibling->done(ipr_cmd->sibling); 5450 5451 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5452 LEAVE; 5453 } 5454 5455 /** 5456 * ipr_abort_timeout - An abort task has timed out 5457 * @t: Timer context used to fetch ipr command struct 5458 * 5459 * This function handles when an abort task times out. If this 5460 * happens we issue a bus reset since we have resources tied 5461 * up that must be freed before returning to the midlayer. 5462 * 5463 * Return value: 5464 * none 5465 **/ 5466 static void ipr_abort_timeout(struct timer_list *t) 5467 { 5468 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 5469 struct ipr_cmnd *reset_cmd; 5470 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5471 struct ipr_cmd_pkt *cmd_pkt; 5472 unsigned long lock_flags = 0; 5473 5474 ENTER; 5475 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5476 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { 5477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5478 return; 5479 } 5480 5481 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); 5482 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5483 ipr_cmd->sibling = reset_cmd; 5484 reset_cmd->sibling = ipr_cmd; 5485 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; 5486 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; 5487 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5488 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5489 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; 5490 5491 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5492 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5493 LEAVE; 5494 } 5495 5496 /** 5497 * ipr_cancel_op - Cancel specified op 5498 * @scsi_cmd: scsi command struct 5499 * 5500 * This function cancels specified op. 5501 * 5502 * Return value: 5503 * SUCCESS / FAILED 5504 **/ 5505 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) 5506 { 5507 struct ipr_cmnd *ipr_cmd; 5508 struct ipr_ioa_cfg *ioa_cfg; 5509 struct ipr_resource_entry *res; 5510 struct ipr_cmd_pkt *cmd_pkt; 5511 u32 ioasc; 5512 int i, op_found = 0; 5513 struct ipr_hrr_queue *hrrq; 5514 5515 ENTER; 5516 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5517 res = scsi_cmd->device->hostdata; 5518 5519 /* If we are currently going through reset/reload, return failed. 5520 * This will force the mid-layer to call ipr_eh_host_reset, 5521 * which will then go to sleep and wait for the reset to complete 5522 */ 5523 if (ioa_cfg->in_reset_reload || 5524 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5525 return FAILED; 5526 if (!res) 5527 return FAILED; 5528 5529 /* 5530 * If we are aborting a timed out op, chances are that the timeout was caused 5531 * by a still not detected EEH error. In such cases, reading a register will 5532 * trigger the EEH recovery infrastructure. 5533 */ 5534 readl(ioa_cfg->regs.sense_interrupt_reg); 5535 5536 if (!ipr_is_gscsi(res)) 5537 return FAILED; 5538 5539 for_each_hrrq(hrrq, ioa_cfg) { 5540 spin_lock(&hrrq->_lock); 5541 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5542 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { 5543 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { 5544 op_found = 1; 5545 break; 5546 } 5547 } 5548 } 5549 spin_unlock(&hrrq->_lock); 5550 } 5551 5552 if (!op_found) 5553 return SUCCESS; 5554 5555 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5556 ipr_cmd->ioarcb.res_handle = res->res_handle; 5557 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5558 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5559 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5560 ipr_cmd->u.sdev = scsi_cmd->device; 5561 5562 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 5563 scsi_cmd->cmnd[0]); 5564 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 5565 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5566 5567 /* 5568 * If the abort task timed out and we sent a bus reset, we will get 5569 * one the following responses to the abort 5570 */ 5571 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { 5572 ioasc = 0; 5573 ipr_trace; 5574 } 5575 5576 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5577 if (!ipr_is_naca_model(res)) 5578 res->needs_sync_complete = 1; 5579 5580 LEAVE; 5581 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; 5582 } 5583 5584 /** 5585 * ipr_scan_finished - Report whether scan is done 5586 * @shost: scsi host struct 5587 * @elapsed_time: elapsed time 5588 * 5589 * Return value: 5590 * 0 if scan in progress / 1 if scan is complete 5591 **/ 5592 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) 5593 { 5594 unsigned long lock_flags; 5595 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 5596 int rc = 0; 5597 5598 spin_lock_irqsave(shost->host_lock, lock_flags); 5599 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) 5600 rc = 1; 5601 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) 5602 rc = 1; 5603 spin_unlock_irqrestore(shost->host_lock, lock_flags); 5604 return rc; 5605 } 5606 5607 /** 5608 * ipr_eh_abort - Reset the host adapter 5609 * @scsi_cmd: scsi command struct 5610 * 5611 * Return value: 5612 * SUCCESS / FAILED 5613 **/ 5614 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) 5615 { 5616 unsigned long flags; 5617 int rc; 5618 struct ipr_ioa_cfg *ioa_cfg; 5619 5620 ENTER; 5621 5622 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5623 5624 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); 5625 rc = ipr_cancel_op(scsi_cmd); 5626 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); 5627 5628 if (rc == SUCCESS) 5629 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); 5630 LEAVE; 5631 return rc; 5632 } 5633 5634 /** 5635 * ipr_handle_other_interrupt - Handle "other" interrupts 5636 * @ioa_cfg: ioa config struct 5637 * @int_reg: interrupt register 5638 * 5639 * Return value: 5640 * IRQ_NONE / IRQ_HANDLED 5641 **/ 5642 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5643 u32 int_reg) 5644 { 5645 irqreturn_t rc = IRQ_HANDLED; 5646 u32 int_mask_reg; 5647 5648 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5649 int_reg &= ~int_mask_reg; 5650 5651 /* If an interrupt on the adapter did not occur, ignore it. 5652 * Or in the case of SIS 64, check for a stage change interrupt. 5653 */ 5654 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { 5655 if (ioa_cfg->sis64) { 5656 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 5657 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5658 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { 5659 5660 /* clear stage change */ 5661 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5663 list_del(&ioa_cfg->reset_cmd->queue); 5664 del_timer(&ioa_cfg->reset_cmd->timer); 5665 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5666 return IRQ_HANDLED; 5667 } 5668 } 5669 5670 return IRQ_NONE; 5671 } 5672 5673 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5674 /* Mask the interrupt */ 5675 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); 5676 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5677 5678 list_del(&ioa_cfg->reset_cmd->queue); 5679 del_timer(&ioa_cfg->reset_cmd->timer); 5680 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5681 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5682 if (ioa_cfg->clear_isr) { 5683 if (ipr_debug && printk_ratelimit()) 5684 dev_err(&ioa_cfg->pdev->dev, 5685 "Spurious interrupt detected. 0x%08X\n", int_reg); 5686 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5687 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5688 return IRQ_NONE; 5689 } 5690 } else { 5691 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5692 ioa_cfg->ioa_unit_checked = 1; 5693 else if (int_reg & IPR_PCII_NO_HOST_RRQ) 5694 dev_err(&ioa_cfg->pdev->dev, 5695 "No Host RRQ. 0x%08X\n", int_reg); 5696 else 5697 dev_err(&ioa_cfg->pdev->dev, 5698 "Permanent IOA failure. 0x%08X\n", int_reg); 5699 5700 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5701 ioa_cfg->sdt_state = GET_DUMP; 5702 5703 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5704 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5705 } 5706 5707 return rc; 5708 } 5709 5710 /** 5711 * ipr_isr_eh - Interrupt service routine error handler 5712 * @ioa_cfg: ioa config struct 5713 * @msg: message to log 5714 * @number: various meanings depending on the caller/message 5715 * 5716 * Return value: 5717 * none 5718 **/ 5719 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) 5720 { 5721 ioa_cfg->errors_logged++; 5722 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); 5723 5724 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5725 ioa_cfg->sdt_state = GET_DUMP; 5726 5727 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5728 } 5729 5730 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, 5731 struct list_head *doneq) 5732 { 5733 u32 ioasc; 5734 u16 cmd_index; 5735 struct ipr_cmnd *ipr_cmd; 5736 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; 5737 int num_hrrq = 0; 5738 5739 /* If interrupts are disabled, ignore the interrupt */ 5740 if (!hrr_queue->allow_interrupts) 5741 return 0; 5742 5743 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5744 hrr_queue->toggle_bit) { 5745 5746 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & 5747 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> 5748 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 5749 5750 if (unlikely(cmd_index > hrr_queue->max_cmd_id || 5751 cmd_index < hrr_queue->min_cmd_id)) { 5752 ipr_isr_eh(ioa_cfg, 5753 "Invalid response handle from IOA: ", 5754 cmd_index); 5755 break; 5756 } 5757 5758 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5759 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5760 5761 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5762 5763 list_move_tail(&ipr_cmd->queue, doneq); 5764 5765 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { 5766 hrr_queue->hrrq_curr++; 5767 } else { 5768 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; 5769 hrr_queue->toggle_bit ^= 1u; 5770 } 5771 num_hrrq++; 5772 if (budget > 0 && num_hrrq >= budget) 5773 break; 5774 } 5775 5776 return num_hrrq; 5777 } 5778 5779 static int ipr_iopoll(struct irq_poll *iop, int budget) 5780 { 5781 struct ipr_hrr_queue *hrrq; 5782 struct ipr_cmnd *ipr_cmd, *temp; 5783 unsigned long hrrq_flags; 5784 int completed_ops; 5785 LIST_HEAD(doneq); 5786 5787 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); 5788 5789 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5790 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); 5791 5792 if (completed_ops < budget) 5793 irq_poll_complete(iop); 5794 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5795 5796 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5797 list_del(&ipr_cmd->queue); 5798 del_timer(&ipr_cmd->timer); 5799 ipr_cmd->fast_done(ipr_cmd); 5800 } 5801 5802 return completed_ops; 5803 } 5804 5805 /** 5806 * ipr_isr - Interrupt service routine 5807 * @irq: irq number 5808 * @devp: pointer to ioa config struct 5809 * 5810 * Return value: 5811 * IRQ_NONE / IRQ_HANDLED 5812 **/ 5813 static irqreturn_t ipr_isr(int irq, void *devp) 5814 { 5815 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5816 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5817 unsigned long hrrq_flags = 0; 5818 u32 int_reg = 0; 5819 int num_hrrq = 0; 5820 int irq_none = 0; 5821 struct ipr_cmnd *ipr_cmd, *temp; 5822 irqreturn_t rc = IRQ_NONE; 5823 LIST_HEAD(doneq); 5824 5825 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5826 /* If interrupts are disabled, ignore the interrupt */ 5827 if (!hrrq->allow_interrupts) { 5828 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5829 return IRQ_NONE; 5830 } 5831 5832 while (1) { 5833 if (ipr_process_hrrq(hrrq, -1, &doneq)) { 5834 rc = IRQ_HANDLED; 5835 5836 if (!ioa_cfg->clear_isr) 5837 break; 5838 5839 /* Clear the PCI interrupt */ 5840 num_hrrq = 0; 5841 do { 5842 writel(IPR_PCII_HRRQ_UPDATED, 5843 ioa_cfg->regs.clr_interrupt_reg32); 5844 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5845 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5846 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5847 5848 } else if (rc == IRQ_NONE && irq_none == 0) { 5849 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5850 irq_none++; 5851 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5852 int_reg & IPR_PCII_HRRQ_UPDATED) { 5853 ipr_isr_eh(ioa_cfg, 5854 "Error clearing HRRQ: ", num_hrrq); 5855 rc = IRQ_HANDLED; 5856 break; 5857 } else 5858 break; 5859 } 5860 5861 if (unlikely(rc == IRQ_NONE)) 5862 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5863 5864 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5865 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5866 list_del(&ipr_cmd->queue); 5867 del_timer(&ipr_cmd->timer); 5868 ipr_cmd->fast_done(ipr_cmd); 5869 } 5870 return rc; 5871 } 5872 5873 /** 5874 * ipr_isr_mhrrq - Interrupt service routine 5875 * @irq: irq number 5876 * @devp: pointer to ioa config struct 5877 * 5878 * Return value: 5879 * IRQ_NONE / IRQ_HANDLED 5880 **/ 5881 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) 5882 { 5883 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5884 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5885 unsigned long hrrq_flags = 0; 5886 struct ipr_cmnd *ipr_cmd, *temp; 5887 irqreturn_t rc = IRQ_NONE; 5888 LIST_HEAD(doneq); 5889 5890 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5891 5892 /* If interrupts are disabled, ignore the interrupt */ 5893 if (!hrrq->allow_interrupts) { 5894 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5895 return IRQ_NONE; 5896 } 5897 5898 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 5899 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5900 hrrq->toggle_bit) { 5901 irq_poll_sched(&hrrq->iopoll); 5902 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5903 return IRQ_HANDLED; 5904 } 5905 } else { 5906 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5907 hrrq->toggle_bit) 5908 5909 if (ipr_process_hrrq(hrrq, -1, &doneq)) 5910 rc = IRQ_HANDLED; 5911 } 5912 5913 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5914 5915 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5916 list_del(&ipr_cmd->queue); 5917 del_timer(&ipr_cmd->timer); 5918 ipr_cmd->fast_done(ipr_cmd); 5919 } 5920 return rc; 5921 } 5922 5923 /** 5924 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer 5925 * @ioa_cfg: ioa config struct 5926 * @ipr_cmd: ipr command struct 5927 * 5928 * Return value: 5929 * 0 on success / -1 on failure 5930 **/ 5931 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, 5932 struct ipr_cmnd *ipr_cmd) 5933 { 5934 int i, nseg; 5935 struct scatterlist *sg; 5936 u32 length; 5937 u32 ioadl_flags = 0; 5938 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5939 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5940 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5941 5942 length = scsi_bufflen(scsi_cmd); 5943 if (!length) 5944 return 0; 5945 5946 nseg = scsi_dma_map(scsi_cmd); 5947 if (nseg < 0) { 5948 if (printk_ratelimit()) 5949 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5950 return -1; 5951 } 5952 5953 ipr_cmd->dma_use_sg = nseg; 5954 5955 ioarcb->data_transfer_length = cpu_to_be32(length); 5956 ioarcb->ioadl_len = 5957 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 5958 5959 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5960 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5961 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5962 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) 5963 ioadl_flags = IPR_IOADL_FLAGS_READ; 5964 5965 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5966 ioadl64[i].flags = cpu_to_be32(ioadl_flags); 5967 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 5968 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 5969 } 5970 5971 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5972 return 0; 5973 } 5974 5975 /** 5976 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5977 * @ioa_cfg: ioa config struct 5978 * @ipr_cmd: ipr command struct 5979 * 5980 * Return value: 5981 * 0 on success / -1 on failure 5982 **/ 5983 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 5984 struct ipr_cmnd *ipr_cmd) 5985 { 5986 int i, nseg; 5987 struct scatterlist *sg; 5988 u32 length; 5989 u32 ioadl_flags = 0; 5990 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5991 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5992 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 5993 5994 length = scsi_bufflen(scsi_cmd); 5995 if (!length) 5996 return 0; 5997 5998 nseg = scsi_dma_map(scsi_cmd); 5999 if (nseg < 0) { 6000 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 6001 return -1; 6002 } 6003 6004 ipr_cmd->dma_use_sg = nseg; 6005 6006 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 6007 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6008 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6009 ioarcb->data_transfer_length = cpu_to_be32(length); 6010 ioarcb->ioadl_len = 6011 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6012 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 6013 ioadl_flags = IPR_IOADL_FLAGS_READ; 6014 ioarcb->read_data_transfer_length = cpu_to_be32(length); 6015 ioarcb->read_ioadl_len = 6016 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6017 } 6018 6019 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { 6020 ioadl = ioarcb->u.add_data.u.ioadl; 6021 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + 6022 offsetof(struct ipr_ioarcb, u.add_data)); 6023 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 6024 } 6025 6026 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 6027 ioadl[i].flags_and_data_len = 6028 cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6029 ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); 6030 } 6031 6032 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6033 return 0; 6034 } 6035 6036 /** 6037 * __ipr_erp_done - Process completion of ERP for a device 6038 * @ipr_cmd: ipr command struct 6039 * 6040 * This function copies the sense buffer into the scsi_cmd 6041 * struct and pushes the scsi_done function. 6042 * 6043 * Return value: 6044 * nothing 6045 **/ 6046 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd) 6047 { 6048 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6049 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6050 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6051 6052 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 6053 scsi_cmd->result |= (DID_ERROR << 16); 6054 scmd_printk(KERN_ERR, scsi_cmd, 6055 "Request Sense failed with IOASC: 0x%08X\n", ioasc); 6056 } else { 6057 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, 6058 SCSI_SENSE_BUFFERSIZE); 6059 } 6060 6061 if (res) { 6062 if (!ipr_is_naca_model(res)) 6063 res->needs_sync_complete = 1; 6064 res->in_erp = 0; 6065 } 6066 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6067 scsi_done(scsi_cmd); 6068 if (ipr_cmd->eh_comp) 6069 complete(ipr_cmd->eh_comp); 6070 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6071 } 6072 6073 /** 6074 * ipr_erp_done - Process completion of ERP for a device 6075 * @ipr_cmd: ipr command struct 6076 * 6077 * This function copies the sense buffer into the scsi_cmd 6078 * struct and pushes the scsi_done function. 6079 * 6080 * Return value: 6081 * nothing 6082 **/ 6083 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) 6084 { 6085 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 6086 unsigned long hrrq_flags; 6087 6088 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 6089 __ipr_erp_done(ipr_cmd); 6090 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 6091 } 6092 6093 /** 6094 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP 6095 * @ipr_cmd: ipr command struct 6096 * 6097 * Return value: 6098 * none 6099 **/ 6100 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 6101 { 6102 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6103 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6104 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6105 6106 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 6107 ioarcb->data_transfer_length = 0; 6108 ioarcb->read_data_transfer_length = 0; 6109 ioarcb->ioadl_len = 0; 6110 ioarcb->read_ioadl_len = 0; 6111 ioasa->hdr.ioasc = 0; 6112 ioasa->hdr.residual_data_len = 0; 6113 6114 if (ipr_cmd->ioa_cfg->sis64) 6115 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6116 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 6117 else { 6118 ioarcb->write_ioadl_addr = 6119 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 6120 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 6121 } 6122 } 6123 6124 /** 6125 * __ipr_erp_request_sense - Send request sense to a device 6126 * @ipr_cmd: ipr command struct 6127 * 6128 * This function sends a request sense to a device as a result 6129 * of a check condition. 6130 * 6131 * Return value: 6132 * nothing 6133 **/ 6134 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 6135 { 6136 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 6137 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6138 6139 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 6140 __ipr_erp_done(ipr_cmd); 6141 return; 6142 } 6143 6144 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 6145 6146 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; 6147 cmd_pkt->cdb[0] = REQUEST_SENSE; 6148 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; 6149 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; 6150 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6151 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 6152 6153 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, 6154 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); 6155 6156 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 6157 IPR_REQUEST_SENSE_TIMEOUT * 2); 6158 } 6159 6160 /** 6161 * ipr_erp_request_sense - Send request sense to a device 6162 * @ipr_cmd: ipr command struct 6163 * 6164 * This function sends a request sense to a device as a result 6165 * of a check condition. 6166 * 6167 * Return value: 6168 * nothing 6169 **/ 6170 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 6171 { 6172 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 6173 unsigned long hrrq_flags; 6174 6175 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 6176 __ipr_erp_request_sense(ipr_cmd); 6177 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 6178 } 6179 6180 /** 6181 * ipr_erp_cancel_all - Send cancel all to a device 6182 * @ipr_cmd: ipr command struct 6183 * 6184 * This function sends a cancel all to a device to clear the 6185 * queue. If we are running TCQ on the device, QERR is set to 1, 6186 * which means all outstanding ops have been dropped on the floor. 6187 * Cancel all will return them to us. 6188 * 6189 * Return value: 6190 * nothing 6191 **/ 6192 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) 6193 { 6194 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6195 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6196 struct ipr_cmd_pkt *cmd_pkt; 6197 6198 res->in_erp = 1; 6199 6200 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 6201 6202 if (!scsi_cmd->device->simple_tags) { 6203 __ipr_erp_request_sense(ipr_cmd); 6204 return; 6205 } 6206 6207 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 6208 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 6209 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 6210 6211 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, 6212 IPR_CANCEL_ALL_TIMEOUT); 6213 } 6214 6215 /** 6216 * ipr_dump_ioasa - Dump contents of IOASA 6217 * @ioa_cfg: ioa config struct 6218 * @ipr_cmd: ipr command struct 6219 * @res: resource entry struct 6220 * 6221 * This function is invoked by the interrupt handler when ops 6222 * fail. It will log the IOASA if appropriate. Only called 6223 * for GPDD ops. 6224 * 6225 * Return value: 6226 * none 6227 **/ 6228 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, 6229 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) 6230 { 6231 int i; 6232 u16 data_len; 6233 u32 ioasc, fd_ioasc; 6234 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6235 __be32 *ioasa_data = (__be32 *)ioasa; 6236 int error_index; 6237 6238 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; 6239 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; 6240 6241 if (0 == ioasc) 6242 return; 6243 6244 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 6245 return; 6246 6247 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) 6248 error_index = ipr_get_error(fd_ioasc); 6249 else 6250 error_index = ipr_get_error(ioasc); 6251 6252 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 6253 /* Don't log an error if the IOA already logged one */ 6254 if (ioasa->hdr.ilid != 0) 6255 return; 6256 6257 if (!ipr_is_gscsi(res)) 6258 return; 6259 6260 if (ipr_error_table[error_index].log_ioasa == 0) 6261 return; 6262 } 6263 6264 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 6265 6266 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); 6267 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) 6268 data_len = sizeof(struct ipr_ioasa64); 6269 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) 6270 data_len = sizeof(struct ipr_ioasa); 6271 6272 ipr_err("IOASA Dump:\n"); 6273 6274 for (i = 0; i < data_len / 4; i += 4) { 6275 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 6276 be32_to_cpu(ioasa_data[i]), 6277 be32_to_cpu(ioasa_data[i+1]), 6278 be32_to_cpu(ioasa_data[i+2]), 6279 be32_to_cpu(ioasa_data[i+3])); 6280 } 6281 } 6282 6283 /** 6284 * ipr_gen_sense - Generate SCSI sense data from an IOASA 6285 * @ipr_cmd: ipr command struct 6286 * 6287 * Return value: 6288 * none 6289 **/ 6290 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) 6291 { 6292 u32 failing_lba; 6293 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 6294 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 6295 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6296 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); 6297 6298 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 6299 6300 if (ioasc >= IPR_FIRST_DRIVER_IOASC) 6301 return; 6302 6303 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; 6304 6305 if (ipr_is_vset_device(res) && 6306 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && 6307 ioasa->u.vset.failing_lba_hi != 0) { 6308 sense_buf[0] = 0x72; 6309 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); 6310 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); 6311 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); 6312 6313 sense_buf[7] = 12; 6314 sense_buf[8] = 0; 6315 sense_buf[9] = 0x0A; 6316 sense_buf[10] = 0x80; 6317 6318 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); 6319 6320 sense_buf[12] = (failing_lba & 0xff000000) >> 24; 6321 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; 6322 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; 6323 sense_buf[15] = failing_lba & 0x000000ff; 6324 6325 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6326 6327 sense_buf[16] = (failing_lba & 0xff000000) >> 24; 6328 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; 6329 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; 6330 sense_buf[19] = failing_lba & 0x000000ff; 6331 } else { 6332 sense_buf[0] = 0x70; 6333 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); 6334 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); 6335 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); 6336 6337 /* Illegal request */ 6338 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 6339 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 6340 sense_buf[7] = 10; /* additional length */ 6341 6342 /* IOARCB was in error */ 6343 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) 6344 sense_buf[15] = 0xC0; 6345 else /* Parameter data was invalid */ 6346 sense_buf[15] = 0x80; 6347 6348 sense_buf[16] = 6349 ((IPR_FIELD_POINTER_MASK & 6350 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; 6351 sense_buf[17] = 6352 (IPR_FIELD_POINTER_MASK & 6353 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; 6354 } else { 6355 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 6356 if (ipr_is_vset_device(res)) 6357 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6358 else 6359 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); 6360 6361 sense_buf[0] |= 0x80; /* Or in the Valid bit */ 6362 sense_buf[3] = (failing_lba & 0xff000000) >> 24; 6363 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; 6364 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; 6365 sense_buf[6] = failing_lba & 0x000000ff; 6366 } 6367 6368 sense_buf[7] = 6; /* additional length */ 6369 } 6370 } 6371 } 6372 6373 /** 6374 * ipr_get_autosense - Copy autosense data to sense buffer 6375 * @ipr_cmd: ipr command struct 6376 * 6377 * This function copies the autosense buffer to the buffer 6378 * in the scsi_cmd, if there is autosense available. 6379 * 6380 * Return value: 6381 * 1 if autosense was available / 0 if not 6382 **/ 6383 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 6384 { 6385 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6386 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 6387 6388 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 6389 return 0; 6390 6391 if (ipr_cmd->ioa_cfg->sis64) 6392 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, 6393 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), 6394 SCSI_SENSE_BUFFERSIZE)); 6395 else 6396 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 6397 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 6398 SCSI_SENSE_BUFFERSIZE)); 6399 return 1; 6400 } 6401 6402 /** 6403 * ipr_erp_start - Process an error response for a SCSI op 6404 * @ioa_cfg: ioa config struct 6405 * @ipr_cmd: ipr command struct 6406 * 6407 * This function determines whether or not to initiate ERP 6408 * on the affected device. 6409 * 6410 * Return value: 6411 * nothing 6412 **/ 6413 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, 6414 struct ipr_cmnd *ipr_cmd) 6415 { 6416 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6417 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6418 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6419 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 6420 6421 if (!res) { 6422 __ipr_scsi_eh_done(ipr_cmd); 6423 return; 6424 } 6425 6426 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) 6427 ipr_gen_sense(ipr_cmd); 6428 6429 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6430 6431 switch (masked_ioasc) { 6432 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 6433 if (ipr_is_naca_model(res)) 6434 scsi_cmd->result |= (DID_ABORT << 16); 6435 else 6436 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6437 break; 6438 case IPR_IOASC_IR_RESOURCE_HANDLE: 6439 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: 6440 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6441 break; 6442 case IPR_IOASC_HW_SEL_TIMEOUT: 6443 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6444 if (!ipr_is_naca_model(res)) 6445 res->needs_sync_complete = 1; 6446 break; 6447 case IPR_IOASC_SYNC_REQUIRED: 6448 if (!res->in_erp) 6449 res->needs_sync_complete = 1; 6450 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6451 break; 6452 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6453 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6454 /* 6455 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION 6456 * so SCSI mid-layer and upper layers handle it accordingly. 6457 */ 6458 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) 6459 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6460 break; 6461 case IPR_IOASC_BUS_WAS_RESET: 6462 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6463 /* 6464 * Report the bus reset and ask for a retry. The device 6465 * will give CC/UA the next command. 6466 */ 6467 if (!res->resetting_device) 6468 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); 6469 scsi_cmd->result |= (DID_ERROR << 16); 6470 if (!ipr_is_naca_model(res)) 6471 res->needs_sync_complete = 1; 6472 break; 6473 case IPR_IOASC_HW_DEV_BUS_STATUS: 6474 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); 6475 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { 6476 if (!ipr_get_autosense(ipr_cmd)) { 6477 if (!ipr_is_naca_model(res)) { 6478 ipr_erp_cancel_all(ipr_cmd); 6479 return; 6480 } 6481 } 6482 } 6483 if (!ipr_is_naca_model(res)) 6484 res->needs_sync_complete = 1; 6485 break; 6486 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 6487 break; 6488 case IPR_IOASC_IR_NON_OPTIMIZED: 6489 if (res->raw_mode) { 6490 res->raw_mode = 0; 6491 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6492 } else 6493 scsi_cmd->result |= (DID_ERROR << 16); 6494 break; 6495 default: 6496 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6497 scsi_cmd->result |= (DID_ERROR << 16); 6498 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 6499 res->needs_sync_complete = 1; 6500 break; 6501 } 6502 6503 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6504 scsi_done(scsi_cmd); 6505 if (ipr_cmd->eh_comp) 6506 complete(ipr_cmd->eh_comp); 6507 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6508 } 6509 6510 /** 6511 * ipr_scsi_done - mid-layer done function 6512 * @ipr_cmd: ipr command struct 6513 * 6514 * This function is invoked by the interrupt handler for 6515 * ops generated by the SCSI mid-layer 6516 * 6517 * Return value: 6518 * none 6519 **/ 6520 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) 6521 { 6522 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6523 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6524 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6525 unsigned long lock_flags; 6526 6527 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6528 6529 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6530 scsi_dma_unmap(scsi_cmd); 6531 6532 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); 6533 scsi_done(scsi_cmd); 6534 if (ipr_cmd->eh_comp) 6535 complete(ipr_cmd->eh_comp); 6536 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6537 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); 6538 } else { 6539 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6540 spin_lock(&ipr_cmd->hrrq->_lock); 6541 ipr_erp_start(ioa_cfg, ipr_cmd); 6542 spin_unlock(&ipr_cmd->hrrq->_lock); 6543 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6544 } 6545 } 6546 6547 /** 6548 * ipr_queuecommand - Queue a mid-layer request 6549 * @shost: scsi host struct 6550 * @scsi_cmd: scsi command struct 6551 * 6552 * This function queues a request generated by the mid-layer. 6553 * 6554 * Return value: 6555 * 0 on success 6556 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 6557 * SCSI_MLQUEUE_HOST_BUSY if host is busy 6558 **/ 6559 static int ipr_queuecommand(struct Scsi_Host *shost, 6560 struct scsi_cmnd *scsi_cmd) 6561 { 6562 struct ipr_ioa_cfg *ioa_cfg; 6563 struct ipr_resource_entry *res; 6564 struct ipr_ioarcb *ioarcb; 6565 struct ipr_cmnd *ipr_cmd; 6566 unsigned long hrrq_flags, lock_flags; 6567 int rc; 6568 struct ipr_hrr_queue *hrrq; 6569 int hrrq_id; 6570 6571 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6572 6573 scsi_cmd->result = (DID_OK << 16); 6574 res = scsi_cmd->device->hostdata; 6575 6576 if (ipr_is_gata(res) && res->sata_port) { 6577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6578 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 6579 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6580 return rc; 6581 } 6582 6583 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6584 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6585 6586 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6587 /* 6588 * We are currently blocking all devices due to a host reset 6589 * We have told the host to stop giving us new requests, but 6590 * ERP ops don't count. FIXME 6591 */ 6592 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { 6593 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6594 return SCSI_MLQUEUE_HOST_BUSY; 6595 } 6596 6597 /* 6598 * FIXME - Create scsi_set_host_offline interface 6599 * and the ioa_is_dead check can be removed 6600 */ 6601 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { 6602 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6603 goto err_nodev; 6604 } 6605 6606 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6607 if (ipr_cmd == NULL) { 6608 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6609 return SCSI_MLQUEUE_HOST_BUSY; 6610 } 6611 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6612 6613 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6614 ioarcb = &ipr_cmd->ioarcb; 6615 6616 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 6617 ipr_cmd->scsi_cmd = scsi_cmd; 6618 ipr_cmd->done = ipr_scsi_eh_done; 6619 6620 if (ipr_is_gscsi(res)) { 6621 if (scsi_cmd->underflow == 0) 6622 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6623 6624 if (res->reset_occurred) { 6625 res->reset_occurred = 0; 6626 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 6627 } 6628 } 6629 6630 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 6631 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6632 6633 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 6634 if (scsi_cmd->flags & SCMD_TAGGED) 6635 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; 6636 else 6637 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; 6638 } 6639 6640 if (scsi_cmd->cmnd[0] >= 0xC0 && 6641 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { 6642 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6643 } 6644 if (res->raw_mode && ipr_is_af_dasd_device(res)) { 6645 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; 6646 6647 if (scsi_cmd->underflow == 0) 6648 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6649 } 6650 6651 if (ioa_cfg->sis64) 6652 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6653 else 6654 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 6655 6656 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6657 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { 6658 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6659 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6660 if (!rc) 6661 scsi_dma_unmap(scsi_cmd); 6662 return SCSI_MLQUEUE_HOST_BUSY; 6663 } 6664 6665 if (unlikely(hrrq->ioa_is_dead)) { 6666 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6667 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6668 scsi_dma_unmap(scsi_cmd); 6669 goto err_nodev; 6670 } 6671 6672 ioarcb->res_handle = res->res_handle; 6673 if (res->needs_sync_complete) { 6674 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 6675 res->needs_sync_complete = 0; 6676 } 6677 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); 6678 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6679 ipr_send_command(ipr_cmd); 6680 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6681 return 0; 6682 6683 err_nodev: 6684 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6685 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6686 scsi_cmd->result = (DID_NO_CONNECT << 16); 6687 scsi_done(scsi_cmd); 6688 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6689 return 0; 6690 } 6691 6692 /** 6693 * ipr_ioctl - IOCTL handler 6694 * @sdev: scsi device struct 6695 * @cmd: IOCTL cmd 6696 * @arg: IOCTL arg 6697 * 6698 * Return value: 6699 * 0 on success / other on failure 6700 **/ 6701 static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd, 6702 void __user *arg) 6703 { 6704 struct ipr_resource_entry *res; 6705 6706 res = (struct ipr_resource_entry *)sdev->hostdata; 6707 if (res && ipr_is_gata(res)) { 6708 if (cmd == HDIO_GET_IDENTITY) 6709 return -ENOTTY; 6710 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); 6711 } 6712 6713 return -EINVAL; 6714 } 6715 6716 /** 6717 * ipr_ioa_info - Get information about the card/driver 6718 * @host: scsi host struct 6719 * 6720 * Return value: 6721 * pointer to buffer with description string 6722 **/ 6723 static const char *ipr_ioa_info(struct Scsi_Host *host) 6724 { 6725 static char buffer[512]; 6726 struct ipr_ioa_cfg *ioa_cfg; 6727 unsigned long lock_flags = 0; 6728 6729 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; 6730 6731 spin_lock_irqsave(host->host_lock, lock_flags); 6732 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); 6733 spin_unlock_irqrestore(host->host_lock, lock_flags); 6734 6735 return buffer; 6736 } 6737 6738 static struct scsi_host_template driver_template = { 6739 .module = THIS_MODULE, 6740 .name = "IPR", 6741 .info = ipr_ioa_info, 6742 .ioctl = ipr_ioctl, 6743 #ifdef CONFIG_COMPAT 6744 .compat_ioctl = ipr_ioctl, 6745 #endif 6746 .queuecommand = ipr_queuecommand, 6747 .dma_need_drain = ata_scsi_dma_need_drain, 6748 .eh_abort_handler = ipr_eh_abort, 6749 .eh_device_reset_handler = ipr_eh_dev_reset, 6750 .eh_host_reset_handler = ipr_eh_host_reset, 6751 .slave_alloc = ipr_slave_alloc, 6752 .slave_configure = ipr_slave_configure, 6753 .slave_destroy = ipr_slave_destroy, 6754 .scan_finished = ipr_scan_finished, 6755 .target_alloc = ipr_target_alloc, 6756 .target_destroy = ipr_target_destroy, 6757 .change_queue_depth = ipr_change_queue_depth, 6758 .bios_param = ipr_biosparam, 6759 .can_queue = IPR_MAX_COMMANDS, 6760 .this_id = -1, 6761 .sg_tablesize = IPR_MAX_SGLIST, 6762 .max_sectors = IPR_IOA_MAX_SECTORS, 6763 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 6764 .shost_groups = ipr_ioa_groups, 6765 .sdev_groups = ipr_dev_groups, 6766 .proc_name = IPR_NAME, 6767 }; 6768 6769 /** 6770 * ipr_ata_phy_reset - libata phy_reset handler 6771 * @ap: ata port to reset 6772 * 6773 **/ 6774 static void ipr_ata_phy_reset(struct ata_port *ap) 6775 { 6776 unsigned long flags; 6777 struct ipr_sata_port *sata_port = ap->private_data; 6778 struct ipr_resource_entry *res = sata_port->res; 6779 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6780 int rc; 6781 6782 ENTER; 6783 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6784 while (ioa_cfg->in_reset_reload) { 6785 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6786 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6787 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6788 } 6789 6790 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6791 goto out_unlock; 6792 6793 rc = ipr_device_reset(ioa_cfg, res); 6794 6795 if (rc) { 6796 ap->link.device[0].class = ATA_DEV_NONE; 6797 goto out_unlock; 6798 } 6799 6800 ap->link.device[0].class = res->ata_class; 6801 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) 6802 ap->link.device[0].class = ATA_DEV_NONE; 6803 6804 out_unlock: 6805 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6806 LEAVE; 6807 } 6808 6809 /** 6810 * ipr_ata_post_internal - Cleanup after an internal command 6811 * @qc: ATA queued command 6812 * 6813 * Return value: 6814 * none 6815 **/ 6816 static void ipr_ata_post_internal(struct ata_queued_cmd *qc) 6817 { 6818 struct ipr_sata_port *sata_port = qc->ap->private_data; 6819 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6820 struct ipr_cmnd *ipr_cmd; 6821 struct ipr_hrr_queue *hrrq; 6822 unsigned long flags; 6823 6824 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6825 while (ioa_cfg->in_reset_reload) { 6826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6827 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6828 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6829 } 6830 6831 for_each_hrrq(hrrq, ioa_cfg) { 6832 spin_lock(&hrrq->_lock); 6833 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 6834 if (ipr_cmd->qc == qc) { 6835 ipr_device_reset(ioa_cfg, sata_port->res); 6836 break; 6837 } 6838 } 6839 spin_unlock(&hrrq->_lock); 6840 } 6841 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6842 } 6843 6844 /** 6845 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure 6846 * @regs: destination 6847 * @tf: source ATA taskfile 6848 * 6849 * Return value: 6850 * none 6851 **/ 6852 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs, 6853 struct ata_taskfile *tf) 6854 { 6855 regs->feature = tf->feature; 6856 regs->nsect = tf->nsect; 6857 regs->lbal = tf->lbal; 6858 regs->lbam = tf->lbam; 6859 regs->lbah = tf->lbah; 6860 regs->device = tf->device; 6861 regs->command = tf->command; 6862 regs->hob_feature = tf->hob_feature; 6863 regs->hob_nsect = tf->hob_nsect; 6864 regs->hob_lbal = tf->hob_lbal; 6865 regs->hob_lbam = tf->hob_lbam; 6866 regs->hob_lbah = tf->hob_lbah; 6867 regs->ctl = tf->ctl; 6868 } 6869 6870 /** 6871 * ipr_sata_done - done function for SATA commands 6872 * @ipr_cmd: ipr command struct 6873 * 6874 * This function is invoked by the interrupt handler for 6875 * ops generated by the SCSI mid-layer to SATA devices 6876 * 6877 * Return value: 6878 * none 6879 **/ 6880 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) 6881 { 6882 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6883 struct ata_queued_cmd *qc = ipr_cmd->qc; 6884 struct ipr_sata_port *sata_port = qc->ap->private_data; 6885 struct ipr_resource_entry *res = sata_port->res; 6886 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6887 6888 spin_lock(&ipr_cmd->hrrq->_lock); 6889 if (ipr_cmd->ioa_cfg->sis64) 6890 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6891 sizeof(struct ipr_ioasa_gata)); 6892 else 6893 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 6894 sizeof(struct ipr_ioasa_gata)); 6895 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6896 6897 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 6898 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 6899 6900 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6901 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6902 else 6903 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6904 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6905 spin_unlock(&ipr_cmd->hrrq->_lock); 6906 ata_qc_complete(qc); 6907 } 6908 6909 /** 6910 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list 6911 * @ipr_cmd: ipr command struct 6912 * @qc: ATA queued command 6913 * 6914 **/ 6915 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, 6916 struct ata_queued_cmd *qc) 6917 { 6918 u32 ioadl_flags = 0; 6919 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6920 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; 6921 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6922 int len = qc->nbytes; 6923 struct scatterlist *sg; 6924 unsigned int si; 6925 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6926 6927 if (len == 0) 6928 return; 6929 6930 if (qc->dma_dir == DMA_TO_DEVICE) { 6931 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6932 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6933 } else if (qc->dma_dir == DMA_FROM_DEVICE) 6934 ioadl_flags = IPR_IOADL_FLAGS_READ; 6935 6936 ioarcb->data_transfer_length = cpu_to_be32(len); 6937 ioarcb->ioadl_len = 6938 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6939 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6940 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); 6941 6942 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6943 ioadl64->flags = cpu_to_be32(ioadl_flags); 6944 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); 6945 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); 6946 6947 last_ioadl64 = ioadl64; 6948 ioadl64++; 6949 } 6950 6951 if (likely(last_ioadl64)) 6952 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6953 } 6954 6955 /** 6956 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 6957 * @ipr_cmd: ipr command struct 6958 * @qc: ATA queued command 6959 * 6960 **/ 6961 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, 6962 struct ata_queued_cmd *qc) 6963 { 6964 u32 ioadl_flags = 0; 6965 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6966 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 6967 struct ipr_ioadl_desc *last_ioadl = NULL; 6968 int len = qc->nbytes; 6969 struct scatterlist *sg; 6970 unsigned int si; 6971 6972 if (len == 0) 6973 return; 6974 6975 if (qc->dma_dir == DMA_TO_DEVICE) { 6976 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6977 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6978 ioarcb->data_transfer_length = cpu_to_be32(len); 6979 ioarcb->ioadl_len = 6980 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6981 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 6982 ioadl_flags = IPR_IOADL_FLAGS_READ; 6983 ioarcb->read_data_transfer_length = cpu_to_be32(len); 6984 ioarcb->read_ioadl_len = 6985 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6986 } 6987 6988 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6989 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6990 ioadl->address = cpu_to_be32(sg_dma_address(sg)); 6991 6992 last_ioadl = ioadl; 6993 ioadl++; 6994 } 6995 6996 if (likely(last_ioadl)) 6997 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6998 } 6999 7000 /** 7001 * ipr_qc_defer - Get a free ipr_cmd 7002 * @qc: queued command 7003 * 7004 * Return value: 7005 * 0 if success 7006 **/ 7007 static int ipr_qc_defer(struct ata_queued_cmd *qc) 7008 { 7009 struct ata_port *ap = qc->ap; 7010 struct ipr_sata_port *sata_port = ap->private_data; 7011 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 7012 struct ipr_cmnd *ipr_cmd; 7013 struct ipr_hrr_queue *hrrq; 7014 int hrrq_id; 7015 7016 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 7017 hrrq = &ioa_cfg->hrrq[hrrq_id]; 7018 7019 qc->lldd_task = NULL; 7020 spin_lock(&hrrq->_lock); 7021 if (unlikely(hrrq->ioa_is_dead)) { 7022 spin_unlock(&hrrq->_lock); 7023 return 0; 7024 } 7025 7026 if (unlikely(!hrrq->allow_cmds)) { 7027 spin_unlock(&hrrq->_lock); 7028 return ATA_DEFER_LINK; 7029 } 7030 7031 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 7032 if (ipr_cmd == NULL) { 7033 spin_unlock(&hrrq->_lock); 7034 return ATA_DEFER_LINK; 7035 } 7036 7037 qc->lldd_task = ipr_cmd; 7038 spin_unlock(&hrrq->_lock); 7039 return 0; 7040 } 7041 7042 /** 7043 * ipr_qc_issue - Issue a SATA qc to a device 7044 * @qc: queued command 7045 * 7046 * Return value: 7047 * 0 if success 7048 **/ 7049 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) 7050 { 7051 struct ata_port *ap = qc->ap; 7052 struct ipr_sata_port *sata_port = ap->private_data; 7053 struct ipr_resource_entry *res = sata_port->res; 7054 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 7055 struct ipr_cmnd *ipr_cmd; 7056 struct ipr_ioarcb *ioarcb; 7057 struct ipr_ioarcb_ata_regs *regs; 7058 7059 if (qc->lldd_task == NULL) 7060 ipr_qc_defer(qc); 7061 7062 ipr_cmd = qc->lldd_task; 7063 if (ipr_cmd == NULL) 7064 return AC_ERR_SYSTEM; 7065 7066 qc->lldd_task = NULL; 7067 spin_lock(&ipr_cmd->hrrq->_lock); 7068 if (unlikely(!ipr_cmd->hrrq->allow_cmds || 7069 ipr_cmd->hrrq->ioa_is_dead)) { 7070 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7071 spin_unlock(&ipr_cmd->hrrq->_lock); 7072 return AC_ERR_SYSTEM; 7073 } 7074 7075 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 7076 ioarcb = &ipr_cmd->ioarcb; 7077 7078 if (ioa_cfg->sis64) { 7079 regs = &ipr_cmd->i.ata_ioadl.regs; 7080 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 7081 } else 7082 regs = &ioarcb->u.add_data.u.regs; 7083 7084 memset(regs, 0, sizeof(*regs)); 7085 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 7086 7087 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 7088 ipr_cmd->qc = qc; 7089 ipr_cmd->done = ipr_sata_done; 7090 ipr_cmd->ioarcb.res_handle = res->res_handle; 7091 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 7092 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 7093 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 7094 ipr_cmd->dma_use_sg = qc->n_elem; 7095 7096 if (ioa_cfg->sis64) 7097 ipr_build_ata_ioadl64(ipr_cmd, qc); 7098 else 7099 ipr_build_ata_ioadl(ipr_cmd, qc); 7100 7101 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 7102 ipr_copy_sata_tf(regs, &qc->tf); 7103 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 7104 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 7105 7106 switch (qc->tf.protocol) { 7107 case ATA_PROT_NODATA: 7108 case ATA_PROT_PIO: 7109 break; 7110 7111 case ATA_PROT_DMA: 7112 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 7113 break; 7114 7115 case ATAPI_PROT_PIO: 7116 case ATAPI_PROT_NODATA: 7117 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 7118 break; 7119 7120 case ATAPI_PROT_DMA: 7121 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 7122 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 7123 break; 7124 7125 default: 7126 WARN_ON(1); 7127 spin_unlock(&ipr_cmd->hrrq->_lock); 7128 return AC_ERR_INVALID; 7129 } 7130 7131 ipr_send_command(ipr_cmd); 7132 spin_unlock(&ipr_cmd->hrrq->_lock); 7133 7134 return 0; 7135 } 7136 7137 /** 7138 * ipr_qc_fill_rtf - Read result TF 7139 * @qc: ATA queued command 7140 **/ 7141 static void ipr_qc_fill_rtf(struct ata_queued_cmd *qc) 7142 { 7143 struct ipr_sata_port *sata_port = qc->ap->private_data; 7144 struct ipr_ioasa_gata *g = &sata_port->ioasa; 7145 struct ata_taskfile *tf = &qc->result_tf; 7146 7147 tf->feature = g->error; 7148 tf->nsect = g->nsect; 7149 tf->lbal = g->lbal; 7150 tf->lbam = g->lbam; 7151 tf->lbah = g->lbah; 7152 tf->device = g->device; 7153 tf->command = g->status; 7154 tf->hob_nsect = g->hob_nsect; 7155 tf->hob_lbal = g->hob_lbal; 7156 tf->hob_lbam = g->hob_lbam; 7157 tf->hob_lbah = g->hob_lbah; 7158 } 7159 7160 static struct ata_port_operations ipr_sata_ops = { 7161 .phy_reset = ipr_ata_phy_reset, 7162 .hardreset = ipr_sata_reset, 7163 .post_internal_cmd = ipr_ata_post_internal, 7164 .qc_prep = ata_noop_qc_prep, 7165 .qc_defer = ipr_qc_defer, 7166 .qc_issue = ipr_qc_issue, 7167 .qc_fill_rtf = ipr_qc_fill_rtf, 7168 .port_start = ata_sas_port_start, 7169 .port_stop = ata_sas_port_stop 7170 }; 7171 7172 static struct ata_port_info sata_port_info = { 7173 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 7174 ATA_FLAG_SAS_HOST, 7175 .pio_mask = ATA_PIO4_ONLY, 7176 .mwdma_mask = ATA_MWDMA2, 7177 .udma_mask = ATA_UDMA6, 7178 .port_ops = &ipr_sata_ops 7179 }; 7180 7181 #ifdef CONFIG_PPC_PSERIES 7182 static const u16 ipr_blocked_processors[] = { 7183 PVR_NORTHSTAR, 7184 PVR_PULSAR, 7185 PVR_POWER4, 7186 PVR_ICESTAR, 7187 PVR_SSTAR, 7188 PVR_POWER4p, 7189 PVR_630, 7190 PVR_630p 7191 }; 7192 7193 /** 7194 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware 7195 * @ioa_cfg: ioa cfg struct 7196 * 7197 * Adapters that use Gemstone revision < 3.1 do not work reliably on 7198 * certain pSeries hardware. This function determines if the given 7199 * adapter is in one of these confgurations or not. 7200 * 7201 * Return value: 7202 * 1 if adapter is not supported / 0 if adapter is supported 7203 **/ 7204 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) 7205 { 7206 int i; 7207 7208 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { 7209 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { 7210 if (pvr_version_is(ipr_blocked_processors[i])) 7211 return 1; 7212 } 7213 } 7214 return 0; 7215 } 7216 #else 7217 #define ipr_invalid_adapter(ioa_cfg) 0 7218 #endif 7219 7220 /** 7221 * ipr_ioa_bringdown_done - IOA bring down completion. 7222 * @ipr_cmd: ipr command struct 7223 * 7224 * This function processes the completion of an adapter bring down. 7225 * It wakes any reset sleepers. 7226 * 7227 * Return value: 7228 * IPR_RC_JOB_RETURN 7229 **/ 7230 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 7231 { 7232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7233 int i; 7234 7235 ENTER; 7236 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 7237 ipr_trace; 7238 ioa_cfg->scsi_unblock = 1; 7239 schedule_work(&ioa_cfg->work_q); 7240 } 7241 7242 ioa_cfg->in_reset_reload = 0; 7243 ioa_cfg->reset_retries = 0; 7244 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 7245 spin_lock(&ioa_cfg->hrrq[i]._lock); 7246 ioa_cfg->hrrq[i].ioa_is_dead = 1; 7247 spin_unlock(&ioa_cfg->hrrq[i]._lock); 7248 } 7249 wmb(); 7250 7251 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7252 wake_up_all(&ioa_cfg->reset_wait_q); 7253 LEAVE; 7254 7255 return IPR_RC_JOB_RETURN; 7256 } 7257 7258 /** 7259 * ipr_ioa_reset_done - IOA reset completion. 7260 * @ipr_cmd: ipr command struct 7261 * 7262 * This function processes the completion of an adapter reset. 7263 * It schedules any necessary mid-layer add/removes and 7264 * wakes any reset sleepers. 7265 * 7266 * Return value: 7267 * IPR_RC_JOB_RETURN 7268 **/ 7269 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) 7270 { 7271 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7272 struct ipr_resource_entry *res; 7273 int j; 7274 7275 ENTER; 7276 ioa_cfg->in_reset_reload = 0; 7277 for (j = 0; j < ioa_cfg->hrrq_num; j++) { 7278 spin_lock(&ioa_cfg->hrrq[j]._lock); 7279 ioa_cfg->hrrq[j].allow_cmds = 1; 7280 spin_unlock(&ioa_cfg->hrrq[j]._lock); 7281 } 7282 wmb(); 7283 ioa_cfg->reset_cmd = NULL; 7284 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 7285 7286 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 7287 if (res->add_to_ml || res->del_from_ml) { 7288 ipr_trace; 7289 break; 7290 } 7291 } 7292 schedule_work(&ioa_cfg->work_q); 7293 7294 for (j = 0; j < IPR_NUM_HCAMS; j++) { 7295 list_del_init(&ioa_cfg->hostrcb[j]->queue); 7296 if (j < IPR_NUM_LOG_HCAMS) 7297 ipr_send_hcam(ioa_cfg, 7298 IPR_HCAM_CDB_OP_CODE_LOG_DATA, 7299 ioa_cfg->hostrcb[j]); 7300 else 7301 ipr_send_hcam(ioa_cfg, 7302 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 7303 ioa_cfg->hostrcb[j]); 7304 } 7305 7306 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); 7307 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 7308 7309 ioa_cfg->reset_retries = 0; 7310 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7311 wake_up_all(&ioa_cfg->reset_wait_q); 7312 7313 ioa_cfg->scsi_unblock = 1; 7314 schedule_work(&ioa_cfg->work_q); 7315 LEAVE; 7316 return IPR_RC_JOB_RETURN; 7317 } 7318 7319 /** 7320 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer 7321 * @supported_dev: supported device struct 7322 * @vpids: vendor product id struct 7323 * 7324 * Return value: 7325 * none 7326 **/ 7327 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, 7328 struct ipr_std_inq_vpids *vpids) 7329 { 7330 memset(supported_dev, 0, sizeof(struct ipr_supported_device)); 7331 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); 7332 supported_dev->num_records = 1; 7333 supported_dev->data_length = 7334 cpu_to_be16(sizeof(struct ipr_supported_device)); 7335 supported_dev->reserved = 0; 7336 } 7337 7338 /** 7339 * ipr_set_supported_devs - Send Set Supported Devices for a device 7340 * @ipr_cmd: ipr command struct 7341 * 7342 * This function sends a Set Supported Devices to the adapter 7343 * 7344 * Return value: 7345 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7346 **/ 7347 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) 7348 { 7349 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7350 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 7351 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7352 struct ipr_resource_entry *res = ipr_cmd->u.res; 7353 7354 ipr_cmd->job_step = ipr_ioa_reset_done; 7355 7356 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { 7357 if (!ipr_is_scsi_disk(res)) 7358 continue; 7359 7360 ipr_cmd->u.res = res; 7361 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); 7362 7363 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7364 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7365 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7366 7367 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 7368 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; 7369 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 7370 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 7371 7372 ipr_init_ioadl(ipr_cmd, 7373 ioa_cfg->vpd_cbs_dma + 7374 offsetof(struct ipr_misc_cbs, supp_dev), 7375 sizeof(struct ipr_supported_device), 7376 IPR_IOADL_FLAGS_WRITE_LAST); 7377 7378 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7379 IPR_SET_SUP_DEVICE_TIMEOUT); 7380 7381 if (!ioa_cfg->sis64) 7382 ipr_cmd->job_step = ipr_set_supported_devs; 7383 LEAVE; 7384 return IPR_RC_JOB_RETURN; 7385 } 7386 7387 LEAVE; 7388 return IPR_RC_JOB_CONTINUE; 7389 } 7390 7391 /** 7392 * ipr_get_mode_page - Locate specified mode page 7393 * @mode_pages: mode page buffer 7394 * @page_code: page code to find 7395 * @len: minimum required length for mode page 7396 * 7397 * Return value: 7398 * pointer to mode page / NULL on failure 7399 **/ 7400 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, 7401 u32 page_code, u32 len) 7402 { 7403 struct ipr_mode_page_hdr *mode_hdr; 7404 u32 page_length; 7405 u32 length; 7406 7407 if (!mode_pages || (mode_pages->hdr.length == 0)) 7408 return NULL; 7409 7410 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; 7411 mode_hdr = (struct ipr_mode_page_hdr *) 7412 (mode_pages->data + mode_pages->hdr.block_desc_len); 7413 7414 while (length) { 7415 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { 7416 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) 7417 return mode_hdr; 7418 break; 7419 } else { 7420 page_length = (sizeof(struct ipr_mode_page_hdr) + 7421 mode_hdr->page_length); 7422 length -= page_length; 7423 mode_hdr = (struct ipr_mode_page_hdr *) 7424 ((unsigned long)mode_hdr + page_length); 7425 } 7426 } 7427 return NULL; 7428 } 7429 7430 /** 7431 * ipr_check_term_power - Check for term power errors 7432 * @ioa_cfg: ioa config struct 7433 * @mode_pages: IOAFP mode pages buffer 7434 * 7435 * Check the IOAFP's mode page 28 for term power errors 7436 * 7437 * Return value: 7438 * nothing 7439 **/ 7440 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, 7441 struct ipr_mode_pages *mode_pages) 7442 { 7443 int i; 7444 int entry_length; 7445 struct ipr_dev_bus_entry *bus; 7446 struct ipr_mode_page28 *mode_page; 7447 7448 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7449 sizeof(struct ipr_mode_page28)); 7450 7451 entry_length = mode_page->entry_length; 7452 7453 bus = mode_page->bus; 7454 7455 for (i = 0; i < mode_page->num_entries; i++) { 7456 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { 7457 dev_err(&ioa_cfg->pdev->dev, 7458 "Term power is absent on scsi bus %d\n", 7459 bus->res_addr.bus); 7460 } 7461 7462 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); 7463 } 7464 } 7465 7466 /** 7467 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table 7468 * @ioa_cfg: ioa config struct 7469 * 7470 * Looks through the config table checking for SES devices. If 7471 * the SES device is in the SES table indicating a maximum SCSI 7472 * bus speed, the speed is limited for the bus. 7473 * 7474 * Return value: 7475 * none 7476 **/ 7477 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) 7478 { 7479 u32 max_xfer_rate; 7480 int i; 7481 7482 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 7483 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, 7484 ioa_cfg->bus_attr[i].bus_width); 7485 7486 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) 7487 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; 7488 } 7489 } 7490 7491 /** 7492 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 7493 * @ioa_cfg: ioa config struct 7494 * @mode_pages: mode page 28 buffer 7495 * 7496 * Updates mode page 28 based on driver configuration 7497 * 7498 * Return value: 7499 * none 7500 **/ 7501 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, 7502 struct ipr_mode_pages *mode_pages) 7503 { 7504 int i, entry_length; 7505 struct ipr_dev_bus_entry *bus; 7506 struct ipr_bus_attributes *bus_attr; 7507 struct ipr_mode_page28 *mode_page; 7508 7509 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7510 sizeof(struct ipr_mode_page28)); 7511 7512 entry_length = mode_page->entry_length; 7513 7514 /* Loop for each device bus entry */ 7515 for (i = 0, bus = mode_page->bus; 7516 i < mode_page->num_entries; 7517 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { 7518 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { 7519 dev_err(&ioa_cfg->pdev->dev, 7520 "Invalid resource address reported: 0x%08X\n", 7521 IPR_GET_PHYS_LOC(bus->res_addr)); 7522 continue; 7523 } 7524 7525 bus_attr = &ioa_cfg->bus_attr[i]; 7526 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; 7527 bus->bus_width = bus_attr->bus_width; 7528 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); 7529 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; 7530 if (bus_attr->qas_enabled) 7531 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; 7532 else 7533 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; 7534 } 7535 } 7536 7537 /** 7538 * ipr_build_mode_select - Build a mode select command 7539 * @ipr_cmd: ipr command struct 7540 * @res_handle: resource handle to send command to 7541 * @parm: Byte 2 of Mode Sense command 7542 * @dma_addr: DMA buffer address 7543 * @xfer_len: data transfer length 7544 * 7545 * Return value: 7546 * none 7547 **/ 7548 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 7549 __be32 res_handle, u8 parm, 7550 dma_addr_t dma_addr, u8 xfer_len) 7551 { 7552 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7553 7554 ioarcb->res_handle = res_handle; 7555 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7556 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7557 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; 7558 ioarcb->cmd_pkt.cdb[1] = parm; 7559 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7560 7561 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); 7562 } 7563 7564 /** 7565 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA 7566 * @ipr_cmd: ipr command struct 7567 * 7568 * This function sets up the SCSI bus attributes and sends 7569 * a Mode Select for Page 28 to activate them. 7570 * 7571 * Return value: 7572 * IPR_RC_JOB_RETURN 7573 **/ 7574 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) 7575 { 7576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7577 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7578 int length; 7579 7580 ENTER; 7581 ipr_scsi_bus_speed_limit(ioa_cfg); 7582 ipr_check_term_power(ioa_cfg, mode_pages); 7583 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 7584 length = mode_pages->hdr.length + 1; 7585 mode_pages->hdr.length = 0; 7586 7587 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7588 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7589 length); 7590 7591 ipr_cmd->job_step = ipr_set_supported_devs; 7592 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7593 struct ipr_resource_entry, queue); 7594 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7595 7596 LEAVE; 7597 return IPR_RC_JOB_RETURN; 7598 } 7599 7600 /** 7601 * ipr_build_mode_sense - Builds a mode sense command 7602 * @ipr_cmd: ipr command struct 7603 * @res_handle: resource entry struct 7604 * @parm: Byte 2 of mode sense command 7605 * @dma_addr: DMA address of mode sense buffer 7606 * @xfer_len: Size of DMA buffer 7607 * 7608 * Return value: 7609 * none 7610 **/ 7611 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 7612 __be32 res_handle, 7613 u8 parm, dma_addr_t dma_addr, u8 xfer_len) 7614 { 7615 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7616 7617 ioarcb->res_handle = res_handle; 7618 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; 7619 ioarcb->cmd_pkt.cdb[2] = parm; 7620 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7621 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7622 7623 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7624 } 7625 7626 /** 7627 * ipr_reset_cmd_failed - Handle failure of IOA reset command 7628 * @ipr_cmd: ipr command struct 7629 * 7630 * This function handles the failure of an IOA bringup command. 7631 * 7632 * Return value: 7633 * IPR_RC_JOB_RETURN 7634 **/ 7635 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 7636 { 7637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7638 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7639 7640 dev_err(&ioa_cfg->pdev->dev, 7641 "0x%02X failed with IOASC: 0x%08X\n", 7642 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 7643 7644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7645 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7646 return IPR_RC_JOB_RETURN; 7647 } 7648 7649 /** 7650 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense 7651 * @ipr_cmd: ipr command struct 7652 * 7653 * This function handles the failure of a Mode Sense to the IOAFP. 7654 * Some adapters do not handle all mode pages. 7655 * 7656 * Return value: 7657 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7658 **/ 7659 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 7660 { 7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7662 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7663 7664 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7665 ipr_cmd->job_step = ipr_set_supported_devs; 7666 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7667 struct ipr_resource_entry, queue); 7668 return IPR_RC_JOB_CONTINUE; 7669 } 7670 7671 return ipr_reset_cmd_failed(ipr_cmd); 7672 } 7673 7674 /** 7675 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA 7676 * @ipr_cmd: ipr command struct 7677 * 7678 * This function send a Page 28 mode sense to the IOA to 7679 * retrieve SCSI bus attributes. 7680 * 7681 * Return value: 7682 * IPR_RC_JOB_RETURN 7683 **/ 7684 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) 7685 { 7686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7687 7688 ENTER; 7689 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7690 0x28, ioa_cfg->vpd_cbs_dma + 7691 offsetof(struct ipr_misc_cbs, mode_pages), 7692 sizeof(struct ipr_mode_pages)); 7693 7694 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; 7695 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; 7696 7697 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7698 7699 LEAVE; 7700 return IPR_RC_JOB_RETURN; 7701 } 7702 7703 /** 7704 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA 7705 * @ipr_cmd: ipr command struct 7706 * 7707 * This function enables dual IOA RAID support if possible. 7708 * 7709 * Return value: 7710 * IPR_RC_JOB_RETURN 7711 **/ 7712 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) 7713 { 7714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7715 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7716 struct ipr_mode_page24 *mode_page; 7717 int length; 7718 7719 ENTER; 7720 mode_page = ipr_get_mode_page(mode_pages, 0x24, 7721 sizeof(struct ipr_mode_page24)); 7722 7723 if (mode_page) 7724 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; 7725 7726 length = mode_pages->hdr.length + 1; 7727 mode_pages->hdr.length = 0; 7728 7729 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7730 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7731 length); 7732 7733 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7734 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7735 7736 LEAVE; 7737 return IPR_RC_JOB_RETURN; 7738 } 7739 7740 /** 7741 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense 7742 * @ipr_cmd: ipr command struct 7743 * 7744 * This function handles the failure of a Mode Sense to the IOAFP. 7745 * Some adapters do not handle all mode pages. 7746 * 7747 * Return value: 7748 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7749 **/ 7750 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 7751 { 7752 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7753 7754 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7755 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7756 return IPR_RC_JOB_CONTINUE; 7757 } 7758 7759 return ipr_reset_cmd_failed(ipr_cmd); 7760 } 7761 7762 /** 7763 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA 7764 * @ipr_cmd: ipr command struct 7765 * 7766 * This function send a mode sense to the IOA to retrieve 7767 * the IOA Advanced Function Control mode page. 7768 * 7769 * Return value: 7770 * IPR_RC_JOB_RETURN 7771 **/ 7772 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) 7773 { 7774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7775 7776 ENTER; 7777 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7778 0x24, ioa_cfg->vpd_cbs_dma + 7779 offsetof(struct ipr_misc_cbs, mode_pages), 7780 sizeof(struct ipr_mode_pages)); 7781 7782 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; 7783 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; 7784 7785 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7786 7787 LEAVE; 7788 return IPR_RC_JOB_RETURN; 7789 } 7790 7791 /** 7792 * ipr_init_res_table - Initialize the resource table 7793 * @ipr_cmd: ipr command struct 7794 * 7795 * This function looks through the existing resource table, comparing 7796 * it with the config table. This function will take care of old/new 7797 * devices and schedule adding/removing them from the mid-layer 7798 * as appropriate. 7799 * 7800 * Return value: 7801 * IPR_RC_JOB_CONTINUE 7802 **/ 7803 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) 7804 { 7805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7806 struct ipr_resource_entry *res, *temp; 7807 struct ipr_config_table_entry_wrapper cfgtew; 7808 int entries, found, flag, i; 7809 LIST_HEAD(old_res); 7810 7811 ENTER; 7812 if (ioa_cfg->sis64) 7813 flag = ioa_cfg->u.cfg_table64->hdr64.flags; 7814 else 7815 flag = ioa_cfg->u.cfg_table->hdr.flags; 7816 7817 if (flag & IPR_UCODE_DOWNLOAD_REQ) 7818 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 7819 7820 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 7821 list_move_tail(&res->queue, &old_res); 7822 7823 if (ioa_cfg->sis64) 7824 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); 7825 else 7826 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 7827 7828 for (i = 0; i < entries; i++) { 7829 if (ioa_cfg->sis64) 7830 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; 7831 else 7832 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; 7833 found = 0; 7834 7835 list_for_each_entry_safe(res, temp, &old_res, queue) { 7836 if (ipr_is_same_device(res, &cfgtew)) { 7837 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7838 found = 1; 7839 break; 7840 } 7841 } 7842 7843 if (!found) { 7844 if (list_empty(&ioa_cfg->free_res_q)) { 7845 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); 7846 break; 7847 } 7848 7849 found = 1; 7850 res = list_entry(ioa_cfg->free_res_q.next, 7851 struct ipr_resource_entry, queue); 7852 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7853 ipr_init_res_entry(res, &cfgtew); 7854 res->add_to_ml = 1; 7855 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) 7856 res->sdev->allow_restart = 1; 7857 7858 if (found) 7859 ipr_update_res_entry(res, &cfgtew); 7860 } 7861 7862 list_for_each_entry_safe(res, temp, &old_res, queue) { 7863 if (res->sdev) { 7864 res->del_from_ml = 1; 7865 res->res_handle = IPR_INVALID_RES_HANDLE; 7866 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7867 } 7868 } 7869 7870 list_for_each_entry_safe(res, temp, &old_res, queue) { 7871 ipr_clear_res_target(res); 7872 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 7873 } 7874 7875 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 7876 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 7877 else 7878 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7879 7880 LEAVE; 7881 return IPR_RC_JOB_CONTINUE; 7882 } 7883 7884 /** 7885 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. 7886 * @ipr_cmd: ipr command struct 7887 * 7888 * This function sends a Query IOA Configuration command 7889 * to the adapter to retrieve the IOA configuration table. 7890 * 7891 * Return value: 7892 * IPR_RC_JOB_RETURN 7893 **/ 7894 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) 7895 { 7896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7897 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7898 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 7899 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7900 7901 ENTER; 7902 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) 7903 ioa_cfg->dual_raid = 1; 7904 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", 7905 ucode_vpd->major_release, ucode_vpd->card_type, 7906 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); 7907 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7908 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7909 7910 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 7911 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; 7912 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 7913 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 7914 7915 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, 7916 IPR_IOADL_FLAGS_READ_LAST); 7917 7918 ipr_cmd->job_step = ipr_init_res_table; 7919 7920 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7921 7922 LEAVE; 7923 return IPR_RC_JOB_RETURN; 7924 } 7925 7926 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd) 7927 { 7928 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7929 7930 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) 7931 return IPR_RC_JOB_CONTINUE; 7932 7933 return ipr_reset_cmd_failed(ipr_cmd); 7934 } 7935 7936 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd, 7937 __be32 res_handle, u8 sa_code) 7938 { 7939 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7940 7941 ioarcb->res_handle = res_handle; 7942 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; 7943 ioarcb->cmd_pkt.cdb[1] = sa_code; 7944 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7945 } 7946 7947 /** 7948 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service 7949 * action 7950 * @ipr_cmd: ipr command struct 7951 * 7952 * Return value: 7953 * none 7954 **/ 7955 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd) 7956 { 7957 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7959 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; 7960 7961 ENTER; 7962 7963 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; 7964 7965 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { 7966 ipr_build_ioa_service_action(ipr_cmd, 7967 cpu_to_be32(IPR_IOA_RES_HANDLE), 7968 IPR_IOA_SA_CHANGE_CACHE_PARAMS); 7969 7970 ioarcb->cmd_pkt.cdb[2] = 0x40; 7971 7972 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; 7973 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7974 IPR_SET_SUP_DEVICE_TIMEOUT); 7975 7976 LEAVE; 7977 return IPR_RC_JOB_RETURN; 7978 } 7979 7980 LEAVE; 7981 return IPR_RC_JOB_CONTINUE; 7982 } 7983 7984 /** 7985 * ipr_ioafp_inquiry - Send an Inquiry to the adapter. 7986 * @ipr_cmd: ipr command struct 7987 * @flags: flags to send 7988 * @page: page to inquire 7989 * @dma_addr: DMA address 7990 * @xfer_len: transfer data length 7991 * 7992 * This utility function sends an inquiry to the adapter. 7993 * 7994 * Return value: 7995 * none 7996 **/ 7997 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 7998 dma_addr_t dma_addr, u8 xfer_len) 7999 { 8000 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 8001 8002 ENTER; 8003 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 8004 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8005 8006 ioarcb->cmd_pkt.cdb[0] = INQUIRY; 8007 ioarcb->cmd_pkt.cdb[1] = flags; 8008 ioarcb->cmd_pkt.cdb[2] = page; 8009 ioarcb->cmd_pkt.cdb[4] = xfer_len; 8010 8011 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 8012 8013 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 8014 LEAVE; 8015 } 8016 8017 /** 8018 * ipr_inquiry_page_supported - Is the given inquiry page supported 8019 * @page0: inquiry page 0 buffer 8020 * @page: page code. 8021 * 8022 * This function determines if the specified inquiry page is supported. 8023 * 8024 * Return value: 8025 * 1 if page is supported / 0 if not 8026 **/ 8027 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) 8028 { 8029 int i; 8030 8031 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) 8032 if (page0->page[i] == page) 8033 return 1; 8034 8035 return 0; 8036 } 8037 8038 /** 8039 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter. 8040 * @ipr_cmd: ipr command struct 8041 * 8042 * This function sends a Page 0xC4 inquiry to the adapter 8043 * to retrieve software VPD information. 8044 * 8045 * Return value: 8046 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8047 **/ 8048 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd) 8049 { 8050 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8051 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 8052 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; 8053 8054 ENTER; 8055 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; 8056 memset(pageC4, 0, sizeof(*pageC4)); 8057 8058 if (ipr_inquiry_page_supported(page0, 0xC4)) { 8059 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4, 8060 (ioa_cfg->vpd_cbs_dma 8061 + offsetof(struct ipr_misc_cbs, 8062 pageC4_data)), 8063 sizeof(struct ipr_inquiry_pageC4)); 8064 return IPR_RC_JOB_RETURN; 8065 } 8066 8067 LEAVE; 8068 return IPR_RC_JOB_CONTINUE; 8069 } 8070 8071 /** 8072 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. 8073 * @ipr_cmd: ipr command struct 8074 * 8075 * This function sends a Page 0xD0 inquiry to the adapter 8076 * to retrieve adapter capabilities. 8077 * 8078 * Return value: 8079 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8080 **/ 8081 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) 8082 { 8083 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8084 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 8085 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 8086 8087 ENTER; 8088 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; 8089 memset(cap, 0, sizeof(*cap)); 8090 8091 if (ipr_inquiry_page_supported(page0, 0xD0)) { 8092 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, 8093 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), 8094 sizeof(struct ipr_inquiry_cap)); 8095 return IPR_RC_JOB_RETURN; 8096 } 8097 8098 LEAVE; 8099 return IPR_RC_JOB_CONTINUE; 8100 } 8101 8102 /** 8103 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. 8104 * @ipr_cmd: ipr command struct 8105 * 8106 * This function sends a Page 3 inquiry to the adapter 8107 * to retrieve software VPD information. 8108 * 8109 * Return value: 8110 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8111 **/ 8112 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 8113 { 8114 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8115 8116 ENTER; 8117 8118 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 8119 8120 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 8121 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), 8122 sizeof(struct ipr_inquiry_page3)); 8123 8124 LEAVE; 8125 return IPR_RC_JOB_RETURN; 8126 } 8127 8128 /** 8129 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. 8130 * @ipr_cmd: ipr command struct 8131 * 8132 * This function sends a Page 0 inquiry to the adapter 8133 * to retrieve supported inquiry pages. 8134 * 8135 * Return value: 8136 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8137 **/ 8138 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) 8139 { 8140 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8141 char type[5]; 8142 8143 ENTER; 8144 8145 /* Grab the type out of the VPD and store it away */ 8146 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); 8147 type[4] = '\0'; 8148 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 8149 8150 if (ipr_invalid_adapter(ioa_cfg)) { 8151 dev_err(&ioa_cfg->pdev->dev, 8152 "Adapter not supported in this hardware configuration.\n"); 8153 8154 if (!ipr_testmode) { 8155 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 8156 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8157 list_add_tail(&ipr_cmd->queue, 8158 &ioa_cfg->hrrq->hrrq_free_q); 8159 return IPR_RC_JOB_RETURN; 8160 } 8161 } 8162 8163 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 8164 8165 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 8166 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), 8167 sizeof(struct ipr_inquiry_page0)); 8168 8169 LEAVE; 8170 return IPR_RC_JOB_RETURN; 8171 } 8172 8173 /** 8174 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. 8175 * @ipr_cmd: ipr command struct 8176 * 8177 * This function sends a standard inquiry to the adapter. 8178 * 8179 * Return value: 8180 * IPR_RC_JOB_RETURN 8181 **/ 8182 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) 8183 { 8184 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8185 8186 ENTER; 8187 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; 8188 8189 ipr_ioafp_inquiry(ipr_cmd, 0, 0, 8190 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), 8191 sizeof(struct ipr_ioa_vpd)); 8192 8193 LEAVE; 8194 return IPR_RC_JOB_RETURN; 8195 } 8196 8197 /** 8198 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. 8199 * @ipr_cmd: ipr command struct 8200 * 8201 * This function send an Identify Host Request Response Queue 8202 * command to establish the HRRQ with the adapter. 8203 * 8204 * Return value: 8205 * IPR_RC_JOB_RETURN 8206 **/ 8207 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) 8208 { 8209 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8210 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 8211 struct ipr_hrr_queue *hrrq; 8212 8213 ENTER; 8214 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 8215 if (ioa_cfg->identify_hrrq_index == 0) 8216 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 8217 8218 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { 8219 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; 8220 8221 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 8222 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8223 8224 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8225 if (ioa_cfg->sis64) 8226 ioarcb->cmd_pkt.cdb[1] = 0x1; 8227 8228 if (ioa_cfg->nvectors == 1) 8229 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; 8230 else 8231 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; 8232 8233 ioarcb->cmd_pkt.cdb[2] = 8234 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; 8235 ioarcb->cmd_pkt.cdb[3] = 8236 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; 8237 ioarcb->cmd_pkt.cdb[4] = 8238 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; 8239 ioarcb->cmd_pkt.cdb[5] = 8240 ((u64) hrrq->host_rrq_dma) & 0xff; 8241 ioarcb->cmd_pkt.cdb[7] = 8242 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; 8243 ioarcb->cmd_pkt.cdb[8] = 8244 (sizeof(u32) * hrrq->size) & 0xff; 8245 8246 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 8247 ioarcb->cmd_pkt.cdb[9] = 8248 ioa_cfg->identify_hrrq_index; 8249 8250 if (ioa_cfg->sis64) { 8251 ioarcb->cmd_pkt.cdb[10] = 8252 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; 8253 ioarcb->cmd_pkt.cdb[11] = 8254 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; 8255 ioarcb->cmd_pkt.cdb[12] = 8256 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; 8257 ioarcb->cmd_pkt.cdb[13] = 8258 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; 8259 } 8260 8261 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 8262 ioarcb->cmd_pkt.cdb[14] = 8263 ioa_cfg->identify_hrrq_index; 8264 8265 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 8266 IPR_INTERNAL_TIMEOUT); 8267 8268 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) 8269 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8270 8271 LEAVE; 8272 return IPR_RC_JOB_RETURN; 8273 } 8274 8275 LEAVE; 8276 return IPR_RC_JOB_CONTINUE; 8277 } 8278 8279 /** 8280 * ipr_reset_timer_done - Adapter reset timer function 8281 * @t: Timer context used to fetch ipr command struct 8282 * 8283 * Description: This function is used in adapter reset processing 8284 * for timing events. If the reset_cmd pointer in the IOA 8285 * config struct is not this adapter's we are doing nested 8286 * resets and fail_all_ops will take care of freeing the 8287 * command block. 8288 * 8289 * Return value: 8290 * none 8291 **/ 8292 static void ipr_reset_timer_done(struct timer_list *t) 8293 { 8294 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 8295 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8296 unsigned long lock_flags = 0; 8297 8298 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8299 8300 if (ioa_cfg->reset_cmd == ipr_cmd) { 8301 list_del(&ipr_cmd->queue); 8302 ipr_cmd->done(ipr_cmd); 8303 } 8304 8305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8306 } 8307 8308 /** 8309 * ipr_reset_start_timer - Start a timer for adapter reset job 8310 * @ipr_cmd: ipr command struct 8311 * @timeout: timeout value 8312 * 8313 * Description: This function is used in adapter reset processing 8314 * for timing events. If the reset_cmd pointer in the IOA 8315 * config struct is not this adapter's we are doing nested 8316 * resets and fail_all_ops will take care of freeing the 8317 * command block. 8318 * 8319 * Return value: 8320 * none 8321 **/ 8322 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 8323 unsigned long timeout) 8324 { 8325 8326 ENTER; 8327 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8328 ipr_cmd->done = ipr_reset_ioa_job; 8329 8330 ipr_cmd->timer.expires = jiffies + timeout; 8331 ipr_cmd->timer.function = ipr_reset_timer_done; 8332 add_timer(&ipr_cmd->timer); 8333 } 8334 8335 /** 8336 * ipr_init_ioa_mem - Initialize ioa_cfg control block 8337 * @ioa_cfg: ioa cfg struct 8338 * 8339 * Return value: 8340 * nothing 8341 **/ 8342 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 8343 { 8344 struct ipr_hrr_queue *hrrq; 8345 8346 for_each_hrrq(hrrq, ioa_cfg) { 8347 spin_lock(&hrrq->_lock); 8348 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); 8349 8350 /* Initialize Host RRQ pointers */ 8351 hrrq->hrrq_start = hrrq->host_rrq; 8352 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; 8353 hrrq->hrrq_curr = hrrq->hrrq_start; 8354 hrrq->toggle_bit = 1; 8355 spin_unlock(&hrrq->_lock); 8356 } 8357 wmb(); 8358 8359 ioa_cfg->identify_hrrq_index = 0; 8360 if (ioa_cfg->hrrq_num == 1) 8361 atomic_set(&ioa_cfg->hrrq_index, 0); 8362 else 8363 atomic_set(&ioa_cfg->hrrq_index, 1); 8364 8365 /* Zero out config table */ 8366 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 8367 } 8368 8369 /** 8370 * ipr_reset_next_stage - Process IPL stage change based on feedback register. 8371 * @ipr_cmd: ipr command struct 8372 * 8373 * Return value: 8374 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8375 **/ 8376 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) 8377 { 8378 unsigned long stage, stage_time; 8379 u32 feedback; 8380 volatile u32 int_reg; 8381 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8382 u64 maskval = 0; 8383 8384 feedback = readl(ioa_cfg->regs.init_feedback_reg); 8385 stage = feedback & IPR_IPL_INIT_STAGE_MASK; 8386 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; 8387 8388 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 8389 8390 /* sanity check the stage_time value */ 8391 if (stage_time == 0) 8392 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; 8393 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 8394 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 8395 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 8396 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 8397 8398 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { 8399 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); 8400 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8401 stage_time = ioa_cfg->transop_timeout; 8402 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8403 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 8404 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8405 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8406 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8407 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8408 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 8409 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 8410 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8411 return IPR_RC_JOB_CONTINUE; 8412 } 8413 } 8414 8415 ipr_cmd->timer.expires = jiffies + stage_time * HZ; 8416 ipr_cmd->timer.function = ipr_oper_timeout; 8417 ipr_cmd->done = ipr_reset_ioa_job; 8418 add_timer(&ipr_cmd->timer); 8419 8420 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8421 8422 return IPR_RC_JOB_RETURN; 8423 } 8424 8425 /** 8426 * ipr_reset_enable_ioa - Enable the IOA following a reset. 8427 * @ipr_cmd: ipr command struct 8428 * 8429 * This function reinitializes some control blocks and 8430 * enables destructive diagnostics on the adapter. 8431 * 8432 * Return value: 8433 * IPR_RC_JOB_RETURN 8434 **/ 8435 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) 8436 { 8437 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8438 volatile u32 int_reg; 8439 volatile u64 maskval; 8440 int i; 8441 8442 ENTER; 8443 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8444 ipr_init_ioa_mem(ioa_cfg); 8445 8446 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8447 spin_lock(&ioa_cfg->hrrq[i]._lock); 8448 ioa_cfg->hrrq[i].allow_interrupts = 1; 8449 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8450 } 8451 if (ioa_cfg->sis64) { 8452 /* Set the adapter to the correct endian mode. */ 8453 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8454 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8455 } 8456 8457 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8458 8459 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8460 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 8461 ioa_cfg->regs.clr_interrupt_mask_reg32); 8462 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8463 return IPR_RC_JOB_CONTINUE; 8464 } 8465 8466 /* Enable destructive diagnostics on IOA */ 8467 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 8468 8469 if (ioa_cfg->sis64) { 8470 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8471 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; 8472 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); 8473 } else 8474 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 8475 8476 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8477 8478 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 8479 8480 if (ioa_cfg->sis64) { 8481 ipr_cmd->job_step = ipr_reset_next_stage; 8482 return IPR_RC_JOB_CONTINUE; 8483 } 8484 8485 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 8486 ipr_cmd->timer.function = ipr_oper_timeout; 8487 ipr_cmd->done = ipr_reset_ioa_job; 8488 add_timer(&ipr_cmd->timer); 8489 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8490 8491 LEAVE; 8492 return IPR_RC_JOB_RETURN; 8493 } 8494 8495 /** 8496 * ipr_reset_wait_for_dump - Wait for a dump to timeout. 8497 * @ipr_cmd: ipr command struct 8498 * 8499 * This function is invoked when an adapter dump has run out 8500 * of processing time. 8501 * 8502 * Return value: 8503 * IPR_RC_JOB_CONTINUE 8504 **/ 8505 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) 8506 { 8507 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8508 8509 if (ioa_cfg->sdt_state == GET_DUMP) 8510 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8511 else if (ioa_cfg->sdt_state == READ_DUMP) 8512 ioa_cfg->sdt_state = ABORT_DUMP; 8513 8514 ioa_cfg->dump_timeout = 1; 8515 ipr_cmd->job_step = ipr_reset_alert; 8516 8517 return IPR_RC_JOB_CONTINUE; 8518 } 8519 8520 /** 8521 * ipr_unit_check_no_data - Log a unit check/no data error log 8522 * @ioa_cfg: ioa config struct 8523 * 8524 * Logs an error indicating the adapter unit checked, but for some 8525 * reason, we were unable to fetch the unit check buffer. 8526 * 8527 * Return value: 8528 * nothing 8529 **/ 8530 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) 8531 { 8532 ioa_cfg->errors_logged++; 8533 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); 8534 } 8535 8536 /** 8537 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA 8538 * @ioa_cfg: ioa config struct 8539 * 8540 * Fetches the unit check buffer from the adapter by clocking the data 8541 * through the mailbox register. 8542 * 8543 * Return value: 8544 * nothing 8545 **/ 8546 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) 8547 { 8548 unsigned long mailbox; 8549 struct ipr_hostrcb *hostrcb; 8550 struct ipr_uc_sdt sdt; 8551 int rc, length; 8552 u32 ioasc; 8553 8554 mailbox = readl(ioa_cfg->ioa_mailbox); 8555 8556 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { 8557 ipr_unit_check_no_data(ioa_cfg); 8558 return; 8559 } 8560 8561 memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); 8562 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 8563 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 8564 8565 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || 8566 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 8567 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 8568 ipr_unit_check_no_data(ioa_cfg); 8569 return; 8570 } 8571 8572 /* Find length of the first sdt entry (UC buffer) */ 8573 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) 8574 length = be32_to_cpu(sdt.entry[0].end_token); 8575 else 8576 length = (be32_to_cpu(sdt.entry[0].end_token) - 8577 be32_to_cpu(sdt.entry[0].start_token)) & 8578 IPR_FMT2_MBX_ADDR_MASK; 8579 8580 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 8581 struct ipr_hostrcb, queue); 8582 list_del_init(&hostrcb->queue); 8583 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 8584 8585 rc = ipr_get_ldump_data_section(ioa_cfg, 8586 be32_to_cpu(sdt.entry[0].start_token), 8587 (__be32 *)&hostrcb->hcam, 8588 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 8589 8590 if (!rc) { 8591 ipr_handle_log_data(ioa_cfg, hostrcb); 8592 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 8593 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 8594 ioa_cfg->sdt_state == GET_DUMP) 8595 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8596 } else 8597 ipr_unit_check_no_data(ioa_cfg); 8598 8599 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 8600 } 8601 8602 /** 8603 * ipr_reset_get_unit_check_job - Call to get the unit check buffer. 8604 * @ipr_cmd: ipr command struct 8605 * 8606 * Description: This function will call to get the unit check buffer. 8607 * 8608 * Return value: 8609 * IPR_RC_JOB_RETURN 8610 **/ 8611 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) 8612 { 8613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8614 8615 ENTER; 8616 ioa_cfg->ioa_unit_checked = 0; 8617 ipr_get_unit_check_buffer(ioa_cfg); 8618 ipr_cmd->job_step = ipr_reset_alert; 8619 ipr_reset_start_timer(ipr_cmd, 0); 8620 8621 LEAVE; 8622 return IPR_RC_JOB_RETURN; 8623 } 8624 8625 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd) 8626 { 8627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8628 8629 ENTER; 8630 8631 if (ioa_cfg->sdt_state != GET_DUMP) 8632 return IPR_RC_JOB_RETURN; 8633 8634 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || 8635 (readl(ioa_cfg->regs.sense_interrupt_reg) & 8636 IPR_PCII_MAILBOX_STABLE)) { 8637 8638 if (!ipr_cmd->u.time_left) 8639 dev_err(&ioa_cfg->pdev->dev, 8640 "Timed out waiting for Mailbox register.\n"); 8641 8642 ioa_cfg->sdt_state = READ_DUMP; 8643 ioa_cfg->dump_timeout = 0; 8644 if (ioa_cfg->sis64) 8645 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); 8646 else 8647 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); 8648 ipr_cmd->job_step = ipr_reset_wait_for_dump; 8649 schedule_work(&ioa_cfg->work_q); 8650 8651 } else { 8652 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8653 ipr_reset_start_timer(ipr_cmd, 8654 IPR_CHECK_FOR_RESET_TIMEOUT); 8655 } 8656 8657 LEAVE; 8658 return IPR_RC_JOB_RETURN; 8659 } 8660 8661 /** 8662 * ipr_reset_restore_cfg_space - Restore PCI config space. 8663 * @ipr_cmd: ipr command struct 8664 * 8665 * Description: This function restores the saved PCI config space of 8666 * the adapter, fails all outstanding ops back to the callers, and 8667 * fetches the dump/unit check if applicable to this reset. 8668 * 8669 * Return value: 8670 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8671 **/ 8672 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 8673 { 8674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8675 8676 ENTER; 8677 ioa_cfg->pdev->state_saved = true; 8678 pci_restore_state(ioa_cfg->pdev); 8679 8680 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 8681 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8682 return IPR_RC_JOB_CONTINUE; 8683 } 8684 8685 ipr_fail_all_ops(ioa_cfg); 8686 8687 if (ioa_cfg->sis64) { 8688 /* Set the adapter to the correct endian mode. */ 8689 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8690 readl(ioa_cfg->regs.endian_swap_reg); 8691 } 8692 8693 if (ioa_cfg->ioa_unit_checked) { 8694 if (ioa_cfg->sis64) { 8695 ipr_cmd->job_step = ipr_reset_get_unit_check_job; 8696 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); 8697 return IPR_RC_JOB_RETURN; 8698 } else { 8699 ioa_cfg->ioa_unit_checked = 0; 8700 ipr_get_unit_check_buffer(ioa_cfg); 8701 ipr_cmd->job_step = ipr_reset_alert; 8702 ipr_reset_start_timer(ipr_cmd, 0); 8703 return IPR_RC_JOB_RETURN; 8704 } 8705 } 8706 8707 if (ioa_cfg->in_ioa_bringdown) { 8708 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8709 } else if (ioa_cfg->sdt_state == GET_DUMP) { 8710 ipr_cmd->job_step = ipr_dump_mailbox_wait; 8711 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; 8712 } else { 8713 ipr_cmd->job_step = ipr_reset_enable_ioa; 8714 } 8715 8716 LEAVE; 8717 return IPR_RC_JOB_CONTINUE; 8718 } 8719 8720 /** 8721 * ipr_reset_bist_done - BIST has completed on the adapter. 8722 * @ipr_cmd: ipr command struct 8723 * 8724 * Description: Unblock config space and resume the reset process. 8725 * 8726 * Return value: 8727 * IPR_RC_JOB_CONTINUE 8728 **/ 8729 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) 8730 { 8731 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8732 8733 ENTER; 8734 if (ioa_cfg->cfg_locked) 8735 pci_cfg_access_unlock(ioa_cfg->pdev); 8736 ioa_cfg->cfg_locked = 0; 8737 ipr_cmd->job_step = ipr_reset_restore_cfg_space; 8738 LEAVE; 8739 return IPR_RC_JOB_CONTINUE; 8740 } 8741 8742 /** 8743 * ipr_reset_start_bist - Run BIST on the adapter. 8744 * @ipr_cmd: ipr command struct 8745 * 8746 * Description: This function runs BIST on the adapter, then delays 2 seconds. 8747 * 8748 * Return value: 8749 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8750 **/ 8751 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) 8752 { 8753 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8754 int rc = PCIBIOS_SUCCESSFUL; 8755 8756 ENTER; 8757 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) 8758 writel(IPR_UPROCI_SIS64_START_BIST, 8759 ioa_cfg->regs.set_uproc_interrupt_reg32); 8760 else 8761 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 8762 8763 if (rc == PCIBIOS_SUCCESSFUL) { 8764 ipr_cmd->job_step = ipr_reset_bist_done; 8765 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8766 rc = IPR_RC_JOB_RETURN; 8767 } else { 8768 if (ioa_cfg->cfg_locked) 8769 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); 8770 ioa_cfg->cfg_locked = 0; 8771 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8772 rc = IPR_RC_JOB_CONTINUE; 8773 } 8774 8775 LEAVE; 8776 return rc; 8777 } 8778 8779 /** 8780 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter 8781 * @ipr_cmd: ipr command struct 8782 * 8783 * Description: This clears PCI reset to the adapter and delays two seconds. 8784 * 8785 * Return value: 8786 * IPR_RC_JOB_RETURN 8787 **/ 8788 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) 8789 { 8790 ENTER; 8791 ipr_cmd->job_step = ipr_reset_bist_done; 8792 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8793 LEAVE; 8794 return IPR_RC_JOB_RETURN; 8795 } 8796 8797 /** 8798 * ipr_reset_reset_work - Pulse a PCIe fundamental reset 8799 * @work: work struct 8800 * 8801 * Description: This pulses warm reset to a slot. 8802 * 8803 **/ 8804 static void ipr_reset_reset_work(struct work_struct *work) 8805 { 8806 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); 8807 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8808 struct pci_dev *pdev = ioa_cfg->pdev; 8809 unsigned long lock_flags = 0; 8810 8811 ENTER; 8812 pci_set_pcie_reset_state(pdev, pcie_warm_reset); 8813 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); 8814 pci_set_pcie_reset_state(pdev, pcie_deassert_reset); 8815 8816 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8817 if (ioa_cfg->reset_cmd == ipr_cmd) 8818 ipr_reset_ioa_job(ipr_cmd); 8819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8820 LEAVE; 8821 } 8822 8823 /** 8824 * ipr_reset_slot_reset - Reset the PCI slot of the adapter. 8825 * @ipr_cmd: ipr command struct 8826 * 8827 * Description: This asserts PCI reset to the adapter. 8828 * 8829 * Return value: 8830 * IPR_RC_JOB_RETURN 8831 **/ 8832 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) 8833 { 8834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8835 8836 ENTER; 8837 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); 8838 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); 8839 ipr_cmd->job_step = ipr_reset_slot_reset_done; 8840 LEAVE; 8841 return IPR_RC_JOB_RETURN; 8842 } 8843 8844 /** 8845 * ipr_reset_block_config_access_wait - Wait for permission to block config access 8846 * @ipr_cmd: ipr command struct 8847 * 8848 * Description: This attempts to block config access to the IOA. 8849 * 8850 * Return value: 8851 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8852 **/ 8853 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) 8854 { 8855 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8856 int rc = IPR_RC_JOB_CONTINUE; 8857 8858 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { 8859 ioa_cfg->cfg_locked = 1; 8860 ipr_cmd->job_step = ioa_cfg->reset; 8861 } else { 8862 if (ipr_cmd->u.time_left) { 8863 rc = IPR_RC_JOB_RETURN; 8864 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8865 ipr_reset_start_timer(ipr_cmd, 8866 IPR_CHECK_FOR_RESET_TIMEOUT); 8867 } else { 8868 ipr_cmd->job_step = ioa_cfg->reset; 8869 dev_err(&ioa_cfg->pdev->dev, 8870 "Timed out waiting to lock config access. Resetting anyway.\n"); 8871 } 8872 } 8873 8874 return rc; 8875 } 8876 8877 /** 8878 * ipr_reset_block_config_access - Block config access to the IOA 8879 * @ipr_cmd: ipr command struct 8880 * 8881 * Description: This attempts to block config access to the IOA 8882 * 8883 * Return value: 8884 * IPR_RC_JOB_CONTINUE 8885 **/ 8886 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) 8887 { 8888 ipr_cmd->ioa_cfg->cfg_locked = 0; 8889 ipr_cmd->job_step = ipr_reset_block_config_access_wait; 8890 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8891 return IPR_RC_JOB_CONTINUE; 8892 } 8893 8894 /** 8895 * ipr_reset_allowed - Query whether or not IOA can be reset 8896 * @ioa_cfg: ioa config struct 8897 * 8898 * Return value: 8899 * 0 if reset not allowed / non-zero if reset is allowed 8900 **/ 8901 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) 8902 { 8903 volatile u32 temp_reg; 8904 8905 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8906 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); 8907 } 8908 8909 /** 8910 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. 8911 * @ipr_cmd: ipr command struct 8912 * 8913 * Description: This function waits for adapter permission to run BIST, 8914 * then runs BIST. If the adapter does not give permission after a 8915 * reasonable time, we will reset the adapter anyway. The impact of 8916 * resetting the adapter without warning the adapter is the risk of 8917 * losing the persistent error log on the adapter. If the adapter is 8918 * reset while it is writing to the flash on the adapter, the flash 8919 * segment will have bad ECC and be zeroed. 8920 * 8921 * Return value: 8922 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8923 **/ 8924 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) 8925 { 8926 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8927 int rc = IPR_RC_JOB_RETURN; 8928 8929 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { 8930 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8931 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8932 } else { 8933 ipr_cmd->job_step = ipr_reset_block_config_access; 8934 rc = IPR_RC_JOB_CONTINUE; 8935 } 8936 8937 return rc; 8938 } 8939 8940 /** 8941 * ipr_reset_alert - Alert the adapter of a pending reset 8942 * @ipr_cmd: ipr command struct 8943 * 8944 * Description: This function alerts the adapter that it will be reset. 8945 * If memory space is not currently enabled, proceed directly 8946 * to running BIST on the adapter. The timer must always be started 8947 * so we guarantee we do not run BIST from ipr_isr. 8948 * 8949 * Return value: 8950 * IPR_RC_JOB_RETURN 8951 **/ 8952 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) 8953 { 8954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8955 u16 cmd_reg; 8956 int rc; 8957 8958 ENTER; 8959 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); 8960 8961 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 8962 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 8963 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); 8964 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 8965 } else { 8966 ipr_cmd->job_step = ipr_reset_block_config_access; 8967 } 8968 8969 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8970 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8971 8972 LEAVE; 8973 return IPR_RC_JOB_RETURN; 8974 } 8975 8976 /** 8977 * ipr_reset_quiesce_done - Complete IOA disconnect 8978 * @ipr_cmd: ipr command struct 8979 * 8980 * Description: Freeze the adapter to complete quiesce processing 8981 * 8982 * Return value: 8983 * IPR_RC_JOB_CONTINUE 8984 **/ 8985 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) 8986 { 8987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8988 8989 ENTER; 8990 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8991 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8992 LEAVE; 8993 return IPR_RC_JOB_CONTINUE; 8994 } 8995 8996 /** 8997 * ipr_reset_cancel_hcam_done - Check for outstanding commands 8998 * @ipr_cmd: ipr command struct 8999 * 9000 * Description: Ensure nothing is outstanding to the IOA and 9001 * proceed with IOA disconnect. Otherwise reset the IOA. 9002 * 9003 * Return value: 9004 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE 9005 **/ 9006 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) 9007 { 9008 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9009 struct ipr_cmnd *loop_cmd; 9010 struct ipr_hrr_queue *hrrq; 9011 int rc = IPR_RC_JOB_CONTINUE; 9012 int count = 0; 9013 9014 ENTER; 9015 ipr_cmd->job_step = ipr_reset_quiesce_done; 9016 9017 for_each_hrrq(hrrq, ioa_cfg) { 9018 spin_lock(&hrrq->_lock); 9019 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { 9020 count++; 9021 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9022 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 9023 rc = IPR_RC_JOB_RETURN; 9024 break; 9025 } 9026 spin_unlock(&hrrq->_lock); 9027 9028 if (count) 9029 break; 9030 } 9031 9032 LEAVE; 9033 return rc; 9034 } 9035 9036 /** 9037 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs 9038 * @ipr_cmd: ipr command struct 9039 * 9040 * Description: Cancel any oustanding HCAMs to the IOA. 9041 * 9042 * Return value: 9043 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 9044 **/ 9045 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) 9046 { 9047 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9048 int rc = IPR_RC_JOB_CONTINUE; 9049 struct ipr_cmd_pkt *cmd_pkt; 9050 struct ipr_cmnd *hcam_cmd; 9051 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; 9052 9053 ENTER; 9054 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; 9055 9056 if (!hrrq->ioa_is_dead) { 9057 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { 9058 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { 9059 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) 9060 continue; 9061 9062 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9063 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 9064 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 9065 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 9066 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; 9067 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; 9068 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; 9069 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; 9070 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; 9071 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; 9072 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; 9073 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; 9074 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; 9075 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; 9076 9077 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 9078 IPR_CANCEL_TIMEOUT); 9079 9080 rc = IPR_RC_JOB_RETURN; 9081 ipr_cmd->job_step = ipr_reset_cancel_hcam; 9082 break; 9083 } 9084 } 9085 } else 9086 ipr_cmd->job_step = ipr_reset_alert; 9087 9088 LEAVE; 9089 return rc; 9090 } 9091 9092 /** 9093 * ipr_reset_ucode_download_done - Microcode download completion 9094 * @ipr_cmd: ipr command struct 9095 * 9096 * Description: This function unmaps the microcode download buffer. 9097 * 9098 * Return value: 9099 * IPR_RC_JOB_CONTINUE 9100 **/ 9101 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) 9102 { 9103 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9104 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 9105 9106 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, 9107 sglist->num_sg, DMA_TO_DEVICE); 9108 9109 ipr_cmd->job_step = ipr_reset_alert; 9110 return IPR_RC_JOB_CONTINUE; 9111 } 9112 9113 /** 9114 * ipr_reset_ucode_download - Download microcode to the adapter 9115 * @ipr_cmd: ipr command struct 9116 * 9117 * Description: This function checks to see if it there is microcode 9118 * to download to the adapter. If there is, a download is performed. 9119 * 9120 * Return value: 9121 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 9122 **/ 9123 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) 9124 { 9125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9126 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 9127 9128 ENTER; 9129 ipr_cmd->job_step = ipr_reset_alert; 9130 9131 if (!sglist) 9132 return IPR_RC_JOB_CONTINUE; 9133 9134 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9135 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 9136 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; 9137 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; 9138 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; 9139 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 9140 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 9141 9142 if (ioa_cfg->sis64) 9143 ipr_build_ucode_ioadl64(ipr_cmd, sglist); 9144 else 9145 ipr_build_ucode_ioadl(ipr_cmd, sglist); 9146 ipr_cmd->job_step = ipr_reset_ucode_download_done; 9147 9148 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 9149 IPR_WRITE_BUFFER_TIMEOUT); 9150 9151 LEAVE; 9152 return IPR_RC_JOB_RETURN; 9153 } 9154 9155 /** 9156 * ipr_reset_shutdown_ioa - Shutdown the adapter 9157 * @ipr_cmd: ipr command struct 9158 * 9159 * Description: This function issues an adapter shutdown of the 9160 * specified type to the specified adapter as part of the 9161 * adapter reset job. 9162 * 9163 * Return value: 9164 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 9165 **/ 9166 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) 9167 { 9168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9169 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; 9170 unsigned long timeout; 9171 int rc = IPR_RC_JOB_CONTINUE; 9172 9173 ENTER; 9174 if (shutdown_type == IPR_SHUTDOWN_QUIESCE) 9175 ipr_cmd->job_step = ipr_reset_cancel_hcam; 9176 else if (shutdown_type != IPR_SHUTDOWN_NONE && 9177 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 9178 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9179 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 9180 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 9181 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; 9182 9183 if (shutdown_type == IPR_SHUTDOWN_NORMAL) 9184 timeout = IPR_SHUTDOWN_TIMEOUT; 9185 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) 9186 timeout = IPR_INTERNAL_TIMEOUT; 9187 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 9188 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; 9189 else 9190 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; 9191 9192 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); 9193 9194 rc = IPR_RC_JOB_RETURN; 9195 ipr_cmd->job_step = ipr_reset_ucode_download; 9196 } else 9197 ipr_cmd->job_step = ipr_reset_alert; 9198 9199 LEAVE; 9200 return rc; 9201 } 9202 9203 /** 9204 * ipr_reset_ioa_job - Adapter reset job 9205 * @ipr_cmd: ipr command struct 9206 * 9207 * Description: This function is the job router for the adapter reset job. 9208 * 9209 * Return value: 9210 * none 9211 **/ 9212 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) 9213 { 9214 u32 rc, ioasc; 9215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9216 9217 do { 9218 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 9219 9220 if (ioa_cfg->reset_cmd != ipr_cmd) { 9221 /* 9222 * We are doing nested adapter resets and this is 9223 * not the current reset job. 9224 */ 9225 list_add_tail(&ipr_cmd->queue, 9226 &ipr_cmd->hrrq->hrrq_free_q); 9227 return; 9228 } 9229 9230 if (IPR_IOASC_SENSE_KEY(ioasc)) { 9231 rc = ipr_cmd->job_step_failed(ipr_cmd); 9232 if (rc == IPR_RC_JOB_RETURN) 9233 return; 9234 } 9235 9236 ipr_reinit_ipr_cmnd(ipr_cmd); 9237 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; 9238 rc = ipr_cmd->job_step(ipr_cmd); 9239 } while (rc == IPR_RC_JOB_CONTINUE); 9240 } 9241 9242 /** 9243 * _ipr_initiate_ioa_reset - Initiate an adapter reset 9244 * @ioa_cfg: ioa config struct 9245 * @job_step: first job step of reset job 9246 * @shutdown_type: shutdown type 9247 * 9248 * Description: This function will initiate the reset of the given adapter 9249 * starting at the selected job step. 9250 * If the caller needs to wait on the completion of the reset, 9251 * the caller must sleep on the reset_wait_q. 9252 * 9253 * Return value: 9254 * none 9255 **/ 9256 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 9257 int (*job_step) (struct ipr_cmnd *), 9258 enum ipr_shutdown_type shutdown_type) 9259 { 9260 struct ipr_cmnd *ipr_cmd; 9261 int i; 9262 9263 ioa_cfg->in_reset_reload = 1; 9264 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9265 spin_lock(&ioa_cfg->hrrq[i]._lock); 9266 ioa_cfg->hrrq[i].allow_cmds = 0; 9267 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9268 } 9269 wmb(); 9270 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9271 ioa_cfg->scsi_unblock = 0; 9272 ioa_cfg->scsi_blocked = 1; 9273 scsi_block_requests(ioa_cfg->host); 9274 } 9275 9276 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9277 ioa_cfg->reset_cmd = ipr_cmd; 9278 ipr_cmd->job_step = job_step; 9279 ipr_cmd->u.shutdown_type = shutdown_type; 9280 9281 ipr_reset_ioa_job(ipr_cmd); 9282 } 9283 9284 /** 9285 * ipr_initiate_ioa_reset - Initiate an adapter reset 9286 * @ioa_cfg: ioa config struct 9287 * @shutdown_type: shutdown type 9288 * 9289 * Description: This function will initiate the reset of the given adapter. 9290 * If the caller needs to wait on the completion of the reset, 9291 * the caller must sleep on the reset_wait_q. 9292 * 9293 * Return value: 9294 * none 9295 **/ 9296 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 9297 enum ipr_shutdown_type shutdown_type) 9298 { 9299 int i; 9300 9301 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 9302 return; 9303 9304 if (ioa_cfg->in_reset_reload) { 9305 if (ioa_cfg->sdt_state == GET_DUMP) 9306 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 9307 else if (ioa_cfg->sdt_state == READ_DUMP) 9308 ioa_cfg->sdt_state = ABORT_DUMP; 9309 } 9310 9311 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { 9312 dev_err(&ioa_cfg->pdev->dev, 9313 "IOA taken offline - error recovery failed\n"); 9314 9315 ioa_cfg->reset_retries = 0; 9316 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9317 spin_lock(&ioa_cfg->hrrq[i]._lock); 9318 ioa_cfg->hrrq[i].ioa_is_dead = 1; 9319 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9320 } 9321 wmb(); 9322 9323 if (ioa_cfg->in_ioa_bringdown) { 9324 ioa_cfg->reset_cmd = NULL; 9325 ioa_cfg->in_reset_reload = 0; 9326 ipr_fail_all_ops(ioa_cfg); 9327 wake_up_all(&ioa_cfg->reset_wait_q); 9328 9329 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9330 ioa_cfg->scsi_unblock = 1; 9331 schedule_work(&ioa_cfg->work_q); 9332 } 9333 return; 9334 } else { 9335 ioa_cfg->in_ioa_bringdown = 1; 9336 shutdown_type = IPR_SHUTDOWN_NONE; 9337 } 9338 } 9339 9340 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, 9341 shutdown_type); 9342 } 9343 9344 /** 9345 * ipr_reset_freeze - Hold off all I/O activity 9346 * @ipr_cmd: ipr command struct 9347 * 9348 * Description: If the PCI slot is frozen, hold off all I/O 9349 * activity; then, as soon as the slot is available again, 9350 * initiate an adapter reset. 9351 */ 9352 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 9353 { 9354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9355 int i; 9356 9357 /* Disallow new interrupts, avoid loop */ 9358 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9359 spin_lock(&ioa_cfg->hrrq[i]._lock); 9360 ioa_cfg->hrrq[i].allow_interrupts = 0; 9361 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9362 } 9363 wmb(); 9364 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 9365 ipr_cmd->done = ipr_reset_ioa_job; 9366 return IPR_RC_JOB_RETURN; 9367 } 9368 9369 /** 9370 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled 9371 * @pdev: PCI device struct 9372 * 9373 * Description: This routine is called to tell us that the MMIO 9374 * access to the IOA has been restored 9375 */ 9376 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) 9377 { 9378 unsigned long flags = 0; 9379 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9380 9381 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9382 if (!ioa_cfg->probe_done) 9383 pci_save_state(pdev); 9384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9385 return PCI_ERS_RESULT_NEED_RESET; 9386 } 9387 9388 /** 9389 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 9390 * @pdev: PCI device struct 9391 * 9392 * Description: This routine is called to tell us that the PCI bus 9393 * is down. Can't do anything here, except put the device driver 9394 * into a holding pattern, waiting for the PCI bus to come back. 9395 */ 9396 static void ipr_pci_frozen(struct pci_dev *pdev) 9397 { 9398 unsigned long flags = 0; 9399 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9400 9401 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9402 if (ioa_cfg->probe_done) 9403 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 9404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9405 } 9406 9407 /** 9408 * ipr_pci_slot_reset - Called when PCI slot has been reset. 9409 * @pdev: PCI device struct 9410 * 9411 * Description: This routine is called by the pci error recovery 9412 * code after the PCI slot has been reset, just before we 9413 * should resume normal operations. 9414 */ 9415 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) 9416 { 9417 unsigned long flags = 0; 9418 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9419 9420 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9421 if (ioa_cfg->probe_done) { 9422 if (ioa_cfg->needs_warm_reset) 9423 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9424 else 9425 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 9426 IPR_SHUTDOWN_NONE); 9427 } else 9428 wake_up_all(&ioa_cfg->eeh_wait_q); 9429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9430 return PCI_ERS_RESULT_RECOVERED; 9431 } 9432 9433 /** 9434 * ipr_pci_perm_failure - Called when PCI slot is dead for good. 9435 * @pdev: PCI device struct 9436 * 9437 * Description: This routine is called when the PCI bus has 9438 * permanently failed. 9439 */ 9440 static void ipr_pci_perm_failure(struct pci_dev *pdev) 9441 { 9442 unsigned long flags = 0; 9443 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9444 int i; 9445 9446 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9447 if (ioa_cfg->probe_done) { 9448 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 9449 ioa_cfg->sdt_state = ABORT_DUMP; 9450 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; 9451 ioa_cfg->in_ioa_bringdown = 1; 9452 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9453 spin_lock(&ioa_cfg->hrrq[i]._lock); 9454 ioa_cfg->hrrq[i].allow_cmds = 0; 9455 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9456 } 9457 wmb(); 9458 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9459 } else 9460 wake_up_all(&ioa_cfg->eeh_wait_q); 9461 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9462 } 9463 9464 /** 9465 * ipr_pci_error_detected - Called when a PCI error is detected. 9466 * @pdev: PCI device struct 9467 * @state: PCI channel state 9468 * 9469 * Description: Called when a PCI error is detected. 9470 * 9471 * Return value: 9472 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 9473 */ 9474 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, 9475 pci_channel_state_t state) 9476 { 9477 switch (state) { 9478 case pci_channel_io_frozen: 9479 ipr_pci_frozen(pdev); 9480 return PCI_ERS_RESULT_CAN_RECOVER; 9481 case pci_channel_io_perm_failure: 9482 ipr_pci_perm_failure(pdev); 9483 return PCI_ERS_RESULT_DISCONNECT; 9484 default: 9485 break; 9486 } 9487 return PCI_ERS_RESULT_NEED_RESET; 9488 } 9489 9490 /** 9491 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) 9492 * @ioa_cfg: ioa cfg struct 9493 * 9494 * Description: This is the second phase of adapter initialization 9495 * This function takes care of initilizing the adapter to the point 9496 * where it can accept new commands. 9497 * Return value: 9498 * 0 on success / -EIO on failure 9499 **/ 9500 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) 9501 { 9502 int rc = 0; 9503 unsigned long host_lock_flags = 0; 9504 9505 ENTER; 9506 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9507 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 9508 ioa_cfg->probe_done = 1; 9509 if (ioa_cfg->needs_hard_reset) { 9510 ioa_cfg->needs_hard_reset = 0; 9511 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9512 } else 9513 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 9514 IPR_SHUTDOWN_NONE); 9515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9516 9517 LEAVE; 9518 return rc; 9519 } 9520 9521 /** 9522 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter 9523 * @ioa_cfg: ioa config struct 9524 * 9525 * Return value: 9526 * none 9527 **/ 9528 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9529 { 9530 int i; 9531 9532 if (ioa_cfg->ipr_cmnd_list) { 9533 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9534 if (ioa_cfg->ipr_cmnd_list[i]) 9535 dma_pool_free(ioa_cfg->ipr_cmd_pool, 9536 ioa_cfg->ipr_cmnd_list[i], 9537 ioa_cfg->ipr_cmnd_list_dma[i]); 9538 9539 ioa_cfg->ipr_cmnd_list[i] = NULL; 9540 } 9541 } 9542 9543 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); 9544 9545 kfree(ioa_cfg->ipr_cmnd_list); 9546 kfree(ioa_cfg->ipr_cmnd_list_dma); 9547 ioa_cfg->ipr_cmnd_list = NULL; 9548 ioa_cfg->ipr_cmnd_list_dma = NULL; 9549 ioa_cfg->ipr_cmd_pool = NULL; 9550 } 9551 9552 /** 9553 * ipr_free_mem - Frees memory allocated for an adapter 9554 * @ioa_cfg: ioa cfg struct 9555 * 9556 * Return value: 9557 * nothing 9558 **/ 9559 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) 9560 { 9561 int i; 9562 9563 kfree(ioa_cfg->res_entries); 9564 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), 9565 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9566 ipr_free_cmd_blks(ioa_cfg); 9567 9568 for (i = 0; i < ioa_cfg->hrrq_num; i++) 9569 dma_free_coherent(&ioa_cfg->pdev->dev, 9570 sizeof(u32) * ioa_cfg->hrrq[i].size, 9571 ioa_cfg->hrrq[i].host_rrq, 9572 ioa_cfg->hrrq[i].host_rrq_dma); 9573 9574 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, 9575 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9576 9577 for (i = 0; i < IPR_MAX_HCAMS; i++) { 9578 dma_free_coherent(&ioa_cfg->pdev->dev, 9579 sizeof(struct ipr_hostrcb), 9580 ioa_cfg->hostrcb[i], 9581 ioa_cfg->hostrcb_dma[i]); 9582 } 9583 9584 ipr_free_dump(ioa_cfg); 9585 kfree(ioa_cfg->trace); 9586 } 9587 9588 /** 9589 * ipr_free_irqs - Free all allocated IRQs for the adapter. 9590 * @ioa_cfg: ipr cfg struct 9591 * 9592 * This function frees all allocated IRQs for the 9593 * specified adapter. 9594 * 9595 * Return value: 9596 * none 9597 **/ 9598 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) 9599 { 9600 struct pci_dev *pdev = ioa_cfg->pdev; 9601 int i; 9602 9603 for (i = 0; i < ioa_cfg->nvectors; i++) 9604 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); 9605 pci_free_irq_vectors(pdev); 9606 } 9607 9608 /** 9609 * ipr_free_all_resources - Free all allocated resources for an adapter. 9610 * @ioa_cfg: ioa config struct 9611 * 9612 * This function frees all allocated resources for the 9613 * specified adapter. 9614 * 9615 * Return value: 9616 * none 9617 **/ 9618 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) 9619 { 9620 struct pci_dev *pdev = ioa_cfg->pdev; 9621 9622 ENTER; 9623 ipr_free_irqs(ioa_cfg); 9624 if (ioa_cfg->reset_work_q) 9625 destroy_workqueue(ioa_cfg->reset_work_q); 9626 iounmap(ioa_cfg->hdw_dma_regs); 9627 pci_release_regions(pdev); 9628 ipr_free_mem(ioa_cfg); 9629 scsi_host_put(ioa_cfg->host); 9630 pci_disable_device(pdev); 9631 LEAVE; 9632 } 9633 9634 /** 9635 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter 9636 * @ioa_cfg: ioa config struct 9637 * 9638 * Return value: 9639 * 0 on success / -ENOMEM on allocation failure 9640 **/ 9641 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9642 { 9643 struct ipr_cmnd *ipr_cmd; 9644 struct ipr_ioarcb *ioarcb; 9645 dma_addr_t dma_addr; 9646 int i, entries_each_hrrq, hrrq_id = 0; 9647 9648 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, 9649 sizeof(struct ipr_cmnd), 512, 0); 9650 9651 if (!ioa_cfg->ipr_cmd_pool) 9652 return -ENOMEM; 9653 9654 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); 9655 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); 9656 9657 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { 9658 ipr_free_cmd_blks(ioa_cfg); 9659 return -ENOMEM; 9660 } 9661 9662 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9663 if (ioa_cfg->hrrq_num > 1) { 9664 if (i == 0) { 9665 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; 9666 ioa_cfg->hrrq[i].min_cmd_id = 0; 9667 ioa_cfg->hrrq[i].max_cmd_id = 9668 (entries_each_hrrq - 1); 9669 } else { 9670 entries_each_hrrq = 9671 IPR_NUM_BASE_CMD_BLKS/ 9672 (ioa_cfg->hrrq_num - 1); 9673 ioa_cfg->hrrq[i].min_cmd_id = 9674 IPR_NUM_INTERNAL_CMD_BLKS + 9675 (i - 1) * entries_each_hrrq; 9676 ioa_cfg->hrrq[i].max_cmd_id = 9677 (IPR_NUM_INTERNAL_CMD_BLKS + 9678 i * entries_each_hrrq - 1); 9679 } 9680 } else { 9681 entries_each_hrrq = IPR_NUM_CMD_BLKS; 9682 ioa_cfg->hrrq[i].min_cmd_id = 0; 9683 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); 9684 } 9685 ioa_cfg->hrrq[i].size = entries_each_hrrq; 9686 } 9687 9688 BUG_ON(ioa_cfg->hrrq_num == 0); 9689 9690 i = IPR_NUM_CMD_BLKS - 9691 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; 9692 if (i > 0) { 9693 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; 9694 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; 9695 } 9696 9697 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9698 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, 9699 GFP_KERNEL, &dma_addr); 9700 9701 if (!ipr_cmd) { 9702 ipr_free_cmd_blks(ioa_cfg); 9703 return -ENOMEM; 9704 } 9705 9706 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; 9707 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 9708 9709 ioarcb = &ipr_cmd->ioarcb; 9710 ipr_cmd->dma_addr = dma_addr; 9711 if (ioa_cfg->sis64) 9712 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); 9713 else 9714 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 9715 9716 ioarcb->host_response_handle = cpu_to_be32(i << 2); 9717 if (ioa_cfg->sis64) { 9718 ioarcb->u.sis64_addr_data.data_ioadl_addr = 9719 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 9720 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 9721 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); 9722 } else { 9723 ioarcb->write_ioadl_addr = 9724 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 9725 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 9726 ioarcb->ioasa_host_pci_addr = 9727 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); 9728 } 9729 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 9730 ipr_cmd->cmd_index = i; 9731 ipr_cmd->ioa_cfg = ioa_cfg; 9732 ipr_cmd->sense_buffer_dma = dma_addr + 9733 offsetof(struct ipr_cmnd, sense_buffer); 9734 9735 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; 9736 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; 9737 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 9738 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) 9739 hrrq_id++; 9740 } 9741 9742 return 0; 9743 } 9744 9745 /** 9746 * ipr_alloc_mem - Allocate memory for an adapter 9747 * @ioa_cfg: ioa config struct 9748 * 9749 * Return value: 9750 * 0 on success / non-zero for error 9751 **/ 9752 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) 9753 { 9754 struct pci_dev *pdev = ioa_cfg->pdev; 9755 int i, rc = -ENOMEM; 9756 9757 ENTER; 9758 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, 9759 sizeof(struct ipr_resource_entry), 9760 GFP_KERNEL); 9761 9762 if (!ioa_cfg->res_entries) 9763 goto out; 9764 9765 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 9766 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 9767 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 9768 } 9769 9770 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, 9771 sizeof(struct ipr_misc_cbs), 9772 &ioa_cfg->vpd_cbs_dma, 9773 GFP_KERNEL); 9774 9775 if (!ioa_cfg->vpd_cbs) 9776 goto out_free_res_entries; 9777 9778 if (ipr_alloc_cmd_blks(ioa_cfg)) 9779 goto out_free_vpd_cbs; 9780 9781 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9782 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, 9783 sizeof(u32) * ioa_cfg->hrrq[i].size, 9784 &ioa_cfg->hrrq[i].host_rrq_dma, 9785 GFP_KERNEL); 9786 9787 if (!ioa_cfg->hrrq[i].host_rrq) { 9788 while (--i >= 0) 9789 dma_free_coherent(&pdev->dev, 9790 sizeof(u32) * ioa_cfg->hrrq[i].size, 9791 ioa_cfg->hrrq[i].host_rrq, 9792 ioa_cfg->hrrq[i].host_rrq_dma); 9793 goto out_ipr_free_cmd_blocks; 9794 } 9795 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; 9796 } 9797 9798 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, 9799 ioa_cfg->cfg_table_size, 9800 &ioa_cfg->cfg_table_dma, 9801 GFP_KERNEL); 9802 9803 if (!ioa_cfg->u.cfg_table) 9804 goto out_free_host_rrq; 9805 9806 for (i = 0; i < IPR_MAX_HCAMS; i++) { 9807 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, 9808 sizeof(struct ipr_hostrcb), 9809 &ioa_cfg->hostrcb_dma[i], 9810 GFP_KERNEL); 9811 9812 if (!ioa_cfg->hostrcb[i]) 9813 goto out_free_hostrcb_dma; 9814 9815 ioa_cfg->hostrcb[i]->hostrcb_dma = 9816 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 9817 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; 9818 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 9819 } 9820 9821 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, 9822 sizeof(struct ipr_trace_entry), 9823 GFP_KERNEL); 9824 9825 if (!ioa_cfg->trace) 9826 goto out_free_hostrcb_dma; 9827 9828 rc = 0; 9829 out: 9830 LEAVE; 9831 return rc; 9832 9833 out_free_hostrcb_dma: 9834 while (i-- > 0) { 9835 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), 9836 ioa_cfg->hostrcb[i], 9837 ioa_cfg->hostrcb_dma[i]); 9838 } 9839 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, 9840 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9841 out_free_host_rrq: 9842 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9843 dma_free_coherent(&pdev->dev, 9844 sizeof(u32) * ioa_cfg->hrrq[i].size, 9845 ioa_cfg->hrrq[i].host_rrq, 9846 ioa_cfg->hrrq[i].host_rrq_dma); 9847 } 9848 out_ipr_free_cmd_blocks: 9849 ipr_free_cmd_blks(ioa_cfg); 9850 out_free_vpd_cbs: 9851 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), 9852 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9853 out_free_res_entries: 9854 kfree(ioa_cfg->res_entries); 9855 goto out; 9856 } 9857 9858 /** 9859 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values 9860 * @ioa_cfg: ioa config struct 9861 * 9862 * Return value: 9863 * none 9864 **/ 9865 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) 9866 { 9867 int i; 9868 9869 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 9870 ioa_cfg->bus_attr[i].bus = i; 9871 ioa_cfg->bus_attr[i].qas_enabled = 0; 9872 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; 9873 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) 9874 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; 9875 else 9876 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; 9877 } 9878 } 9879 9880 /** 9881 * ipr_init_regs - Initialize IOA registers 9882 * @ioa_cfg: ioa config struct 9883 * 9884 * Return value: 9885 * none 9886 **/ 9887 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) 9888 { 9889 const struct ipr_interrupt_offsets *p; 9890 struct ipr_interrupts *t; 9891 void __iomem *base; 9892 9893 p = &ioa_cfg->chip_cfg->regs; 9894 t = &ioa_cfg->regs; 9895 base = ioa_cfg->hdw_dma_regs; 9896 9897 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 9898 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 9899 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 9900 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 9901 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; 9902 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 9903 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; 9904 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 9905 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; 9906 t->ioarrin_reg = base + p->ioarrin_reg; 9907 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 9908 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; 9909 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 9910 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; 9911 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 9912 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; 9913 9914 if (ioa_cfg->sis64) { 9915 t->init_feedback_reg = base + p->init_feedback_reg; 9916 t->dump_addr_reg = base + p->dump_addr_reg; 9917 t->dump_data_reg = base + p->dump_data_reg; 9918 t->endian_swap_reg = base + p->endian_swap_reg; 9919 } 9920 } 9921 9922 /** 9923 * ipr_init_ioa_cfg - Initialize IOA config struct 9924 * @ioa_cfg: ioa config struct 9925 * @host: scsi host struct 9926 * @pdev: PCI dev struct 9927 * 9928 * Return value: 9929 * none 9930 **/ 9931 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 9932 struct Scsi_Host *host, struct pci_dev *pdev) 9933 { 9934 int i; 9935 9936 ioa_cfg->host = host; 9937 ioa_cfg->pdev = pdev; 9938 ioa_cfg->log_level = ipr_log_level; 9939 ioa_cfg->doorbell = IPR_DOORBELL; 9940 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 9941 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 9942 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 9943 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 9944 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 9945 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 9946 9947 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 9948 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 9949 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); 9950 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9951 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9952 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9953 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); 9954 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9955 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9956 init_waitqueue_head(&ioa_cfg->eeh_wait_q); 9957 ioa_cfg->sdt_state = INACTIVE; 9958 9959 ipr_initialize_bus_attr(ioa_cfg); 9960 ioa_cfg->max_devs_supported = ipr_max_devs; 9961 9962 if (ioa_cfg->sis64) { 9963 host->max_channel = IPR_MAX_SIS64_BUSES; 9964 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; 9965 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 9966 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 9967 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 9968 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 9969 + ((sizeof(struct ipr_config_table_entry64) 9970 * ioa_cfg->max_devs_supported))); 9971 } else { 9972 host->max_channel = IPR_VSET_BUS; 9973 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 9974 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 9975 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 9976 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 9977 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) 9978 + ((sizeof(struct ipr_config_table_entry) 9979 * ioa_cfg->max_devs_supported))); 9980 } 9981 9982 host->unique_id = host->host_no; 9983 host->max_cmd_len = IPR_MAX_CDB_LEN; 9984 host->can_queue = ioa_cfg->max_cmds; 9985 pci_set_drvdata(pdev, ioa_cfg); 9986 9987 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { 9988 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); 9989 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); 9990 spin_lock_init(&ioa_cfg->hrrq[i]._lock); 9991 if (i == 0) 9992 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; 9993 else 9994 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; 9995 } 9996 } 9997 9998 /** 9999 * ipr_get_chip_info - Find adapter chip information 10000 * @dev_id: PCI device id struct 10001 * 10002 * Return value: 10003 * ptr to chip information on success / NULL on failure 10004 **/ 10005 static const struct ipr_chip_t * 10006 ipr_get_chip_info(const struct pci_device_id *dev_id) 10007 { 10008 int i; 10009 10010 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 10011 if (ipr_chip[i].vendor == dev_id->vendor && 10012 ipr_chip[i].device == dev_id->device) 10013 return &ipr_chip[i]; 10014 return NULL; 10015 } 10016 10017 /** 10018 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete 10019 * during probe time 10020 * @ioa_cfg: ioa config struct 10021 * 10022 * Return value: 10023 * None 10024 **/ 10025 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) 10026 { 10027 struct pci_dev *pdev = ioa_cfg->pdev; 10028 10029 if (pci_channel_offline(pdev)) { 10030 wait_event_timeout(ioa_cfg->eeh_wait_q, 10031 !pci_channel_offline(pdev), 10032 IPR_PCI_ERROR_RECOVERY_TIMEOUT); 10033 pci_restore_state(pdev); 10034 } 10035 } 10036 10037 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) 10038 { 10039 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; 10040 10041 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { 10042 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, 10043 "host%d-%d", ioa_cfg->host->host_no, vec_idx); 10044 ioa_cfg->vectors_info[vec_idx]. 10045 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; 10046 } 10047 } 10048 10049 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, 10050 struct pci_dev *pdev) 10051 { 10052 int i, rc; 10053 10054 for (i = 1; i < ioa_cfg->nvectors; i++) { 10055 rc = request_irq(pci_irq_vector(pdev, i), 10056 ipr_isr_mhrrq, 10057 0, 10058 ioa_cfg->vectors_info[i].desc, 10059 &ioa_cfg->hrrq[i]); 10060 if (rc) { 10061 while (--i > 0) 10062 free_irq(pci_irq_vector(pdev, i), 10063 &ioa_cfg->hrrq[i]); 10064 return rc; 10065 } 10066 } 10067 return 0; 10068 } 10069 10070 /** 10071 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 10072 * @devp: PCI device struct 10073 * @irq: IRQ number 10074 * 10075 * Description: Simply set the msi_received flag to 1 indicating that 10076 * Message Signaled Interrupts are supported. 10077 * 10078 * Return value: 10079 * 0 on success / non-zero on failure 10080 **/ 10081 static irqreturn_t ipr_test_intr(int irq, void *devp) 10082 { 10083 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 10084 unsigned long lock_flags = 0; 10085 10086 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); 10087 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10088 10089 ioa_cfg->msi_received = 1; 10090 wake_up(&ioa_cfg->msi_wait_q); 10091 10092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10093 return IRQ_HANDLED; 10094 } 10095 10096 /** 10097 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 10098 * @ioa_cfg: ioa config struct 10099 * @pdev: PCI device struct 10100 * 10101 * Description: This routine sets up and initiates a test interrupt to determine 10102 * if the interrupt is received via the ipr_test_intr() service routine. 10103 * If the tests fails, the driver will fall back to LSI. 10104 * 10105 * Return value: 10106 * 0 on success / non-zero on failure 10107 **/ 10108 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) 10109 { 10110 int rc; 10111 unsigned long lock_flags = 0; 10112 int irq = pci_irq_vector(pdev, 0); 10113 10114 ENTER; 10115 10116 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10117 init_waitqueue_head(&ioa_cfg->msi_wait_q); 10118 ioa_cfg->msi_received = 0; 10119 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10120 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); 10121 readl(ioa_cfg->regs.sense_interrupt_mask_reg); 10122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10123 10124 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 10125 if (rc) { 10126 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); 10127 return rc; 10128 } else if (ipr_debug) 10129 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); 10130 10131 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 10132 readl(ioa_cfg->regs.sense_interrupt_reg); 10133 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 10134 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10135 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10136 10137 if (!ioa_cfg->msi_received) { 10138 /* MSI test failed */ 10139 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 10140 rc = -EOPNOTSUPP; 10141 } else if (ipr_debug) 10142 dev_info(&pdev->dev, "MSI test succeeded.\n"); 10143 10144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10145 10146 free_irq(irq, ioa_cfg); 10147 10148 LEAVE; 10149 10150 return rc; 10151 } 10152 10153 /* ipr_probe_ioa - Allocates memory and does first stage of initialization 10154 * @pdev: PCI device struct 10155 * @dev_id: PCI device id struct 10156 * 10157 * Return value: 10158 * 0 on success / non-zero on failure 10159 **/ 10160 static int ipr_probe_ioa(struct pci_dev *pdev, 10161 const struct pci_device_id *dev_id) 10162 { 10163 struct ipr_ioa_cfg *ioa_cfg; 10164 struct Scsi_Host *host; 10165 unsigned long ipr_regs_pci; 10166 void __iomem *ipr_regs; 10167 int rc = PCIBIOS_SUCCESSFUL; 10168 volatile u32 mask, uproc, interrupts; 10169 unsigned long lock_flags, driver_lock_flags; 10170 unsigned int irq_flag; 10171 10172 ENTER; 10173 10174 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 10175 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 10176 10177 if (!host) { 10178 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 10179 rc = -ENOMEM; 10180 goto out; 10181 } 10182 10183 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 10184 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 10185 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); 10186 10187 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 10188 10189 if (!ioa_cfg->ipr_chip) { 10190 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 10191 dev_id->vendor, dev_id->device); 10192 goto out_scsi_host_put; 10193 } 10194 10195 /* set SIS 32 or SIS 64 */ 10196 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 10197 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 10198 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; 10199 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; 10200 10201 if (ipr_transop_timeout) 10202 ioa_cfg->transop_timeout = ipr_transop_timeout; 10203 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) 10204 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; 10205 else 10206 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; 10207 10208 ioa_cfg->revid = pdev->revision; 10209 10210 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 10211 10212 ipr_regs_pci = pci_resource_start(pdev, 0); 10213 10214 rc = pci_request_regions(pdev, IPR_NAME); 10215 if (rc < 0) { 10216 dev_err(&pdev->dev, 10217 "Couldn't register memory range of registers\n"); 10218 goto out_scsi_host_put; 10219 } 10220 10221 rc = pci_enable_device(pdev); 10222 10223 if (rc || pci_channel_offline(pdev)) { 10224 if (pci_channel_offline(pdev)) { 10225 ipr_wait_for_pci_err_recovery(ioa_cfg); 10226 rc = pci_enable_device(pdev); 10227 } 10228 10229 if (rc) { 10230 dev_err(&pdev->dev, "Cannot enable adapter\n"); 10231 ipr_wait_for_pci_err_recovery(ioa_cfg); 10232 goto out_release_regions; 10233 } 10234 } 10235 10236 ipr_regs = pci_ioremap_bar(pdev, 0); 10237 10238 if (!ipr_regs) { 10239 dev_err(&pdev->dev, 10240 "Couldn't map memory range of registers\n"); 10241 rc = -ENOMEM; 10242 goto out_disable; 10243 } 10244 10245 ioa_cfg->hdw_dma_regs = ipr_regs; 10246 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 10247 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 10248 10249 ipr_init_regs(ioa_cfg); 10250 10251 if (ioa_cfg->sis64) { 10252 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10253 if (rc < 0) { 10254 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); 10255 rc = dma_set_mask_and_coherent(&pdev->dev, 10256 DMA_BIT_MASK(32)); 10257 } 10258 } else 10259 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10260 10261 if (rc < 0) { 10262 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 10263 goto cleanup_nomem; 10264 } 10265 10266 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 10267 ioa_cfg->chip_cfg->cache_line_size); 10268 10269 if (rc != PCIBIOS_SUCCESSFUL) { 10270 dev_err(&pdev->dev, "Write of cache line size failed\n"); 10271 ipr_wait_for_pci_err_recovery(ioa_cfg); 10272 rc = -EIO; 10273 goto cleanup_nomem; 10274 } 10275 10276 /* Issue MMIO read to ensure card is not in EEH */ 10277 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 10278 ipr_wait_for_pci_err_recovery(ioa_cfg); 10279 10280 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { 10281 dev_err(&pdev->dev, "The max number of MSIX is %d\n", 10282 IPR_MAX_MSIX_VECTORS); 10283 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; 10284 } 10285 10286 irq_flag = PCI_IRQ_LEGACY; 10287 if (ioa_cfg->ipr_chip->has_msi) 10288 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX; 10289 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag); 10290 if (rc < 0) { 10291 ipr_wait_for_pci_err_recovery(ioa_cfg); 10292 goto cleanup_nomem; 10293 } 10294 ioa_cfg->nvectors = rc; 10295 10296 if (!pdev->msi_enabled && !pdev->msix_enabled) 10297 ioa_cfg->clear_isr = 1; 10298 10299 pci_set_master(pdev); 10300 10301 if (pci_channel_offline(pdev)) { 10302 ipr_wait_for_pci_err_recovery(ioa_cfg); 10303 pci_set_master(pdev); 10304 if (pci_channel_offline(pdev)) { 10305 rc = -EIO; 10306 goto out_msi_disable; 10307 } 10308 } 10309 10310 if (pdev->msi_enabled || pdev->msix_enabled) { 10311 rc = ipr_test_msi(ioa_cfg, pdev); 10312 switch (rc) { 10313 case 0: 10314 dev_info(&pdev->dev, 10315 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, 10316 pdev->msix_enabled ? "-X" : ""); 10317 break; 10318 case -EOPNOTSUPP: 10319 ipr_wait_for_pci_err_recovery(ioa_cfg); 10320 pci_free_irq_vectors(pdev); 10321 10322 ioa_cfg->nvectors = 1; 10323 ioa_cfg->clear_isr = 1; 10324 break; 10325 default: 10326 goto out_msi_disable; 10327 } 10328 } 10329 10330 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, 10331 (unsigned int)num_online_cpus(), 10332 (unsigned int)IPR_MAX_HRRQ_NUM); 10333 10334 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 10335 goto out_msi_disable; 10336 10337 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 10338 goto out_msi_disable; 10339 10340 rc = ipr_alloc_mem(ioa_cfg); 10341 if (rc < 0) { 10342 dev_err(&pdev->dev, 10343 "Couldn't allocate enough memory for device driver!\n"); 10344 goto out_msi_disable; 10345 } 10346 10347 /* Save away PCI config space for use following IOA reset */ 10348 rc = pci_save_state(pdev); 10349 10350 if (rc != PCIBIOS_SUCCESSFUL) { 10351 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 10352 rc = -EIO; 10353 goto cleanup_nolog; 10354 } 10355 10356 /* 10357 * If HRRQ updated interrupt is not masked, or reset alert is set, 10358 * the card is in an unknown state and needs a hard reset 10359 */ 10360 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 10361 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); 10362 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 10363 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 10364 ioa_cfg->needs_hard_reset = 1; 10365 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) 10366 ioa_cfg->needs_hard_reset = 1; 10367 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 10368 ioa_cfg->ioa_unit_checked = 1; 10369 10370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10371 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10373 10374 if (pdev->msi_enabled || pdev->msix_enabled) { 10375 name_msi_vectors(ioa_cfg); 10376 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0, 10377 ioa_cfg->vectors_info[0].desc, 10378 &ioa_cfg->hrrq[0]); 10379 if (!rc) 10380 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev); 10381 } else { 10382 rc = request_irq(pdev->irq, ipr_isr, 10383 IRQF_SHARED, 10384 IPR_NAME, &ioa_cfg->hrrq[0]); 10385 } 10386 if (rc) { 10387 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 10388 pdev->irq, rc); 10389 goto cleanup_nolog; 10390 } 10391 10392 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || 10393 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { 10394 ioa_cfg->needs_warm_reset = 1; 10395 ioa_cfg->reset = ipr_reset_slot_reset; 10396 10397 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", 10398 WQ_MEM_RECLAIM, host->host_no); 10399 10400 if (!ioa_cfg->reset_work_q) { 10401 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); 10402 rc = -ENOMEM; 10403 goto out_free_irq; 10404 } 10405 } else 10406 ioa_cfg->reset = ipr_reset_start_bist; 10407 10408 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10409 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); 10410 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10411 10412 LEAVE; 10413 out: 10414 return rc; 10415 10416 out_free_irq: 10417 ipr_free_irqs(ioa_cfg); 10418 cleanup_nolog: 10419 ipr_free_mem(ioa_cfg); 10420 out_msi_disable: 10421 ipr_wait_for_pci_err_recovery(ioa_cfg); 10422 pci_free_irq_vectors(pdev); 10423 cleanup_nomem: 10424 iounmap(ipr_regs); 10425 out_disable: 10426 pci_disable_device(pdev); 10427 out_release_regions: 10428 pci_release_regions(pdev); 10429 out_scsi_host_put: 10430 scsi_host_put(host); 10431 goto out; 10432 } 10433 10434 /** 10435 * ipr_initiate_ioa_bringdown - Bring down an adapter 10436 * @ioa_cfg: ioa config struct 10437 * @shutdown_type: shutdown type 10438 * 10439 * Description: This function will initiate bringing down the adapter. 10440 * This consists of issuing an IOA shutdown to the adapter 10441 * to flush the cache, and running BIST. 10442 * If the caller needs to wait on the completion of the reset, 10443 * the caller must sleep on the reset_wait_q. 10444 * 10445 * Return value: 10446 * none 10447 **/ 10448 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, 10449 enum ipr_shutdown_type shutdown_type) 10450 { 10451 ENTER; 10452 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 10453 ioa_cfg->sdt_state = ABORT_DUMP; 10454 ioa_cfg->reset_retries = 0; 10455 ioa_cfg->in_ioa_bringdown = 1; 10456 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); 10457 LEAVE; 10458 } 10459 10460 /** 10461 * __ipr_remove - Remove a single adapter 10462 * @pdev: pci device struct 10463 * 10464 * Adapter hot plug remove entry point. 10465 * 10466 * Return value: 10467 * none 10468 **/ 10469 static void __ipr_remove(struct pci_dev *pdev) 10470 { 10471 unsigned long host_lock_flags = 0; 10472 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10473 int i; 10474 unsigned long driver_lock_flags; 10475 ENTER; 10476 10477 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10478 while (ioa_cfg->in_reset_reload) { 10479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10480 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10481 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10482 } 10483 10484 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 10485 spin_lock(&ioa_cfg->hrrq[i]._lock); 10486 ioa_cfg->hrrq[i].removing_ioa = 1; 10487 spin_unlock(&ioa_cfg->hrrq[i]._lock); 10488 } 10489 wmb(); 10490 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 10491 10492 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10493 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10494 flush_work(&ioa_cfg->work_q); 10495 if (ioa_cfg->reset_work_q) 10496 flush_workqueue(ioa_cfg->reset_work_q); 10497 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 10498 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10499 10500 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10501 list_del(&ioa_cfg->queue); 10502 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10503 10504 if (ioa_cfg->sdt_state == ABORT_DUMP) 10505 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 10506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10507 10508 ipr_free_all_resources(ioa_cfg); 10509 10510 LEAVE; 10511 } 10512 10513 /** 10514 * ipr_remove - IOA hot plug remove entry point 10515 * @pdev: pci device struct 10516 * 10517 * Adapter hot plug remove entry point. 10518 * 10519 * Return value: 10520 * none 10521 **/ 10522 static void ipr_remove(struct pci_dev *pdev) 10523 { 10524 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10525 10526 ENTER; 10527 10528 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10529 &ipr_trace_attr); 10530 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10531 &ipr_dump_attr); 10532 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, 10533 &ipr_ioa_async_err_log); 10534 scsi_remove_host(ioa_cfg->host); 10535 10536 __ipr_remove(pdev); 10537 10538 LEAVE; 10539 } 10540 10541 /** 10542 * ipr_probe - Adapter hot plug add entry point 10543 * @pdev: pci device struct 10544 * @dev_id: pci device ID 10545 * 10546 * Return value: 10547 * 0 on success / non-zero on failure 10548 **/ 10549 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 10550 { 10551 struct ipr_ioa_cfg *ioa_cfg; 10552 unsigned long flags; 10553 int rc, i; 10554 10555 rc = ipr_probe_ioa(pdev, dev_id); 10556 10557 if (rc) 10558 return rc; 10559 10560 ioa_cfg = pci_get_drvdata(pdev); 10561 rc = ipr_probe_ioa_part2(ioa_cfg); 10562 10563 if (rc) { 10564 __ipr_remove(pdev); 10565 return rc; 10566 } 10567 10568 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); 10569 10570 if (rc) { 10571 __ipr_remove(pdev); 10572 return rc; 10573 } 10574 10575 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, 10576 &ipr_trace_attr); 10577 10578 if (rc) { 10579 scsi_remove_host(ioa_cfg->host); 10580 __ipr_remove(pdev); 10581 return rc; 10582 } 10583 10584 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, 10585 &ipr_ioa_async_err_log); 10586 10587 if (rc) { 10588 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10589 &ipr_dump_attr); 10590 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10591 &ipr_trace_attr); 10592 scsi_remove_host(ioa_cfg->host); 10593 __ipr_remove(pdev); 10594 return rc; 10595 } 10596 10597 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, 10598 &ipr_dump_attr); 10599 10600 if (rc) { 10601 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, 10602 &ipr_ioa_async_err_log); 10603 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10604 &ipr_trace_attr); 10605 scsi_remove_host(ioa_cfg->host); 10606 __ipr_remove(pdev); 10607 return rc; 10608 } 10609 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10610 ioa_cfg->scan_enabled = 1; 10611 schedule_work(&ioa_cfg->work_q); 10612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10613 10614 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 10615 10616 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10617 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 10618 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, 10619 ioa_cfg->iopoll_weight, ipr_iopoll); 10620 } 10621 } 10622 10623 scsi_scan_host(ioa_cfg->host); 10624 10625 return 0; 10626 } 10627 10628 /** 10629 * ipr_shutdown - Shutdown handler. 10630 * @pdev: pci device struct 10631 * 10632 * This function is invoked upon system shutdown/reboot. It will issue 10633 * an adapter shutdown to the adapter to flush the write cache. 10634 * 10635 * Return value: 10636 * none 10637 **/ 10638 static void ipr_shutdown(struct pci_dev *pdev) 10639 { 10640 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10641 unsigned long lock_flags = 0; 10642 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; 10643 int i; 10644 10645 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10646 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10647 ioa_cfg->iopoll_weight = 0; 10648 for (i = 1; i < ioa_cfg->hrrq_num; i++) 10649 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); 10650 } 10651 10652 while (ioa_cfg->in_reset_reload) { 10653 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10654 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10655 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10656 } 10657 10658 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) 10659 shutdown_type = IPR_SHUTDOWN_QUIESCE; 10660 10661 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); 10662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10663 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10664 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { 10665 ipr_free_irqs(ioa_cfg); 10666 pci_disable_device(ioa_cfg->pdev); 10667 } 10668 } 10669 10670 static struct pci_device_id ipr_pci_table[] = { 10671 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10672 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, 10673 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10674 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, 10675 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, 10677 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, 10679 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, 10681 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10682 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, 10683 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10684 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 10685 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 10687 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10688 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10690 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10692 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10693 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10695 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10700 IPR_USE_LONG_TRANSOP_TIMEOUT}, 10701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10703 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 10706 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10707 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10708 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 10709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, 10711 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 10713 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, 10714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 10715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 10716 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 10718 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 10720 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10721 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 10723 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10724 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, 10726 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 10728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 10730 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10731 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 }, 10732 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 10734 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10735 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 10736 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10737 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 10738 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10739 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 10740 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10741 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 10742 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10743 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, 10744 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10745 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 10746 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10747 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 }, 10748 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10749 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 }, 10750 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10751 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, 10752 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10753 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, 10754 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10755 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, 10756 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10757 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, 10758 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10759 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, 10760 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10761 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, 10762 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10763 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, 10764 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10765 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, 10766 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10767 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, 10768 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10769 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, 10770 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10771 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, 10772 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10773 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, 10774 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10775 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, 10776 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, 10777 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 }, 10778 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, 10779 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 }, 10780 { } 10781 }; 10782 MODULE_DEVICE_TABLE(pci, ipr_pci_table); 10783 10784 static const struct pci_error_handlers ipr_err_handler = { 10785 .error_detected = ipr_pci_error_detected, 10786 .mmio_enabled = ipr_pci_mmio_enabled, 10787 .slot_reset = ipr_pci_slot_reset, 10788 }; 10789 10790 static struct pci_driver ipr_driver = { 10791 .name = IPR_NAME, 10792 .id_table = ipr_pci_table, 10793 .probe = ipr_probe, 10794 .remove = ipr_remove, 10795 .shutdown = ipr_shutdown, 10796 .err_handler = &ipr_err_handler, 10797 }; 10798 10799 /** 10800 * ipr_halt_done - Shutdown prepare completion 10801 * @ipr_cmd: ipr command struct 10802 * 10803 * Return value: 10804 * none 10805 **/ 10806 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 10807 { 10808 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 10809 } 10810 10811 /** 10812 * ipr_halt - Issue shutdown prepare to all adapters 10813 * @nb: Notifier block 10814 * @event: Notifier event 10815 * @buf: Notifier data (unused) 10816 * 10817 * Return value: 10818 * NOTIFY_OK on success / NOTIFY_DONE on failure 10819 **/ 10820 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) 10821 { 10822 struct ipr_cmnd *ipr_cmd; 10823 struct ipr_ioa_cfg *ioa_cfg; 10824 unsigned long flags = 0, driver_lock_flags; 10825 10826 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 10827 return NOTIFY_DONE; 10828 10829 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10830 10831 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 10832 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10833 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 10834 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { 10835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10836 continue; 10837 } 10838 10839 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 10840 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 10841 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 10842 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 10843 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 10844 10845 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 10846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10847 } 10848 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10849 10850 return NOTIFY_OK; 10851 } 10852 10853 static struct notifier_block ipr_notifier = { 10854 ipr_halt, NULL, 0 10855 }; 10856 10857 /** 10858 * ipr_init - Module entry point 10859 * 10860 * Return value: 10861 * 0 on success / negative value on failure 10862 **/ 10863 static int __init ipr_init(void) 10864 { 10865 int rc; 10866 10867 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 10868 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 10869 10870 register_reboot_notifier(&ipr_notifier); 10871 rc = pci_register_driver(&ipr_driver); 10872 if (rc) { 10873 unregister_reboot_notifier(&ipr_notifier); 10874 return rc; 10875 } 10876 10877 return 0; 10878 } 10879 10880 /** 10881 * ipr_exit - Module unload 10882 * 10883 * Module unload entry point. 10884 * 10885 * Return value: 10886 * none 10887 **/ 10888 static void __exit ipr_exit(void) 10889 { 10890 unregister_reboot_notifier(&ipr_notifier); 10891 pci_unregister_driver(&ipr_driver); 10892 } 10893 10894 module_init(ipr_init); 10895 module_exit(ipr_exit); 10896