1 /* 2 * ipr.c -- driver for IBM Power Linux RAID adapters 3 * 4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation 5 * 6 * Copyright (C) 2003, 2004 IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 /* 25 * Notes: 26 * 27 * This driver is used to control the following SCSI adapters: 28 * 29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B 30 * 31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter 32 * PCI-X Dual Channel Ultra 320 SCSI Adapter 33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card 34 * Embedded SCSI adapter on p615 and p655 systems 35 * 36 * Supported Hardware Features: 37 * - Ultra 320 SCSI controller 38 * - PCI-X host interface 39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine 40 * - Non-Volatile Write Cache 41 * - Supports attachment of non-RAID disks, tape, and optical devices 42 * - RAID Levels 0, 5, 10 43 * - Hot spare 44 * - Background Parity Checking 45 * - Background Data Scrubbing 46 * - Ability to increase the capacity of an existing RAID 5 disk array 47 * by adding disks 48 * 49 * Driver Features: 50 * - Tagged command queuing 51 * - Adapter microcode download 52 * - PCI hot plug 53 * - SCSI device hot plug 54 * 55 */ 56 57 #include <linux/fs.h> 58 #include <linux/init.h> 59 #include <linux/types.h> 60 #include <linux/errno.h> 61 #include <linux/kernel.h> 62 #include <linux/slab.h> 63 #include <linux/vmalloc.h> 64 #include <linux/ioport.h> 65 #include <linux/delay.h> 66 #include <linux/pci.h> 67 #include <linux/wait.h> 68 #include <linux/spinlock.h> 69 #include <linux/sched.h> 70 #include <linux/interrupt.h> 71 #include <linux/blkdev.h> 72 #include <linux/firmware.h> 73 #include <linux/module.h> 74 #include <linux/moduleparam.h> 75 #include <linux/libata.h> 76 #include <linux/hdreg.h> 77 #include <linux/reboot.h> 78 #include <linux/stringify.h> 79 #include <asm/io.h> 80 #include <asm/irq.h> 81 #include <asm/processor.h> 82 #include <scsi/scsi.h> 83 #include <scsi/scsi_host.h> 84 #include <scsi/scsi_tcq.h> 85 #include <scsi/scsi_eh.h> 86 #include <scsi/scsi_cmnd.h> 87 #include "ipr.h" 88 89 /* 90 * Global Data 91 */ 92 static LIST_HEAD(ipr_ioa_head); 93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; 94 static unsigned int ipr_max_speed = 1; 95 static int ipr_testmode = 0; 96 static unsigned int ipr_fastfail = 0; 97 static unsigned int ipr_transop_timeout = 0; 98 static unsigned int ipr_debug = 0; 99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 100 static unsigned int ipr_dual_ioa_raid = 1; 101 static unsigned int ipr_number_of_msix = 2; 102 static unsigned int ipr_fast_reboot; 103 static DEFINE_SPINLOCK(ipr_driver_lock); 104 105 /* This table describes the differences between DMA controller chips */ 106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ 108 .mailbox = 0x0042C, 109 .max_cmds = 100, 110 .cache_line_size = 0x20, 111 .clear_isr = 1, 112 .iopoll_weight = 0, 113 { 114 .set_interrupt_mask_reg = 0x0022C, 115 .clr_interrupt_mask_reg = 0x00230, 116 .clr_interrupt_mask_reg32 = 0x00230, 117 .sense_interrupt_mask_reg = 0x0022C, 118 .sense_interrupt_mask_reg32 = 0x0022C, 119 .clr_interrupt_reg = 0x00228, 120 .clr_interrupt_reg32 = 0x00228, 121 .sense_interrupt_reg = 0x00224, 122 .sense_interrupt_reg32 = 0x00224, 123 .ioarrin_reg = 0x00404, 124 .sense_uproc_interrupt_reg = 0x00214, 125 .sense_uproc_interrupt_reg32 = 0x00214, 126 .set_uproc_interrupt_reg = 0x00214, 127 .set_uproc_interrupt_reg32 = 0x00214, 128 .clr_uproc_interrupt_reg = 0x00218, 129 .clr_uproc_interrupt_reg32 = 0x00218 130 } 131 }, 132 { /* Snipe and Scamp */ 133 .mailbox = 0x0052C, 134 .max_cmds = 100, 135 .cache_line_size = 0x20, 136 .clear_isr = 1, 137 .iopoll_weight = 0, 138 { 139 .set_interrupt_mask_reg = 0x00288, 140 .clr_interrupt_mask_reg = 0x0028C, 141 .clr_interrupt_mask_reg32 = 0x0028C, 142 .sense_interrupt_mask_reg = 0x00288, 143 .sense_interrupt_mask_reg32 = 0x00288, 144 .clr_interrupt_reg = 0x00284, 145 .clr_interrupt_reg32 = 0x00284, 146 .sense_interrupt_reg = 0x00280, 147 .sense_interrupt_reg32 = 0x00280, 148 .ioarrin_reg = 0x00504, 149 .sense_uproc_interrupt_reg = 0x00290, 150 .sense_uproc_interrupt_reg32 = 0x00290, 151 .set_uproc_interrupt_reg = 0x00290, 152 .set_uproc_interrupt_reg32 = 0x00290, 153 .clr_uproc_interrupt_reg = 0x00294, 154 .clr_uproc_interrupt_reg32 = 0x00294 155 } 156 }, 157 { /* CRoC */ 158 .mailbox = 0x00044, 159 .max_cmds = 1000, 160 .cache_line_size = 0x20, 161 .clear_isr = 0, 162 .iopoll_weight = 64, 163 { 164 .set_interrupt_mask_reg = 0x00010, 165 .clr_interrupt_mask_reg = 0x00018, 166 .clr_interrupt_mask_reg32 = 0x0001C, 167 .sense_interrupt_mask_reg = 0x00010, 168 .sense_interrupt_mask_reg32 = 0x00014, 169 .clr_interrupt_reg = 0x00008, 170 .clr_interrupt_reg32 = 0x0000C, 171 .sense_interrupt_reg = 0x00000, 172 .sense_interrupt_reg32 = 0x00004, 173 .ioarrin_reg = 0x00070, 174 .sense_uproc_interrupt_reg = 0x00020, 175 .sense_uproc_interrupt_reg32 = 0x00024, 176 .set_uproc_interrupt_reg = 0x00020, 177 .set_uproc_interrupt_reg32 = 0x00024, 178 .clr_uproc_interrupt_reg = 0x00028, 179 .clr_uproc_interrupt_reg32 = 0x0002C, 180 .init_feedback_reg = 0x0005C, 181 .dump_addr_reg = 0x00064, 182 .dump_data_reg = 0x00068, 183 .endian_swap_reg = 0x00084 184 } 185 }, 186 }; 187 188 static const struct ipr_chip_t ipr_chip[] = { 189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 198 }; 199 200 static int ipr_max_bus_speeds[] = { 201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE 202 }; 203 204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>"); 205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); 206 module_param_named(max_speed, ipr_max_speed, uint, 0); 207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); 208 module_param_named(log_level, ipr_log_level, uint, 0); 209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); 210 module_param_named(testmode, ipr_testmode, int, 0); 211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); 212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); 213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 220 module_param_named(max_devs, ipr_max_devs, int, 0); 221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0); 224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)"); 225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); 226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); 227 MODULE_LICENSE("GPL"); 228 MODULE_VERSION(IPR_DRIVER_VERSION); 229 230 /* A constant array of IOASCs/URCs/Error Messages */ 231 static const 232 struct ipr_error_table_t ipr_error_table[] = { 233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, 234 "8155: An unknown error was received"}, 235 {0x00330000, 0, 0, 236 "Soft underlength error"}, 237 {0x005A0000, 0, 0, 238 "Command to be cancelled not found"}, 239 {0x00808000, 0, 0, 240 "Qualified success"}, 241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, 242 "FFFE: Soft device bus error recovered by the IOA"}, 243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 244 "4101: Soft device bus fabric error"}, 245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, 246 "FFFC: Logical block guard error recovered by the device"}, 247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, 248 "FFFC: Logical block reference tag error recovered by the device"}, 249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, 250 "4171: Recovered scatter list tag / sequence number error"}, 251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, 252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, 253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, 254 "4171: Recovered logical block sequence number error on IOA to Host transfer"}, 255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, 256 "FFFD: Recovered logical block reference tag error detected by the IOA"}, 257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, 258 "FFFD: Logical block guard error recovered by the IOA"}, 259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 260 "FFF9: Device sector reassign successful"}, 261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 262 "FFF7: Media error recovered by device rewrite procedures"}, 263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, 264 "7001: IOA sector reassignment successful"}, 265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, 266 "FFF9: Soft media error. Sector reassignment recommended"}, 267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, 268 "FFF7: Media error recovered by IOA rewrite procedures"}, 269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, 270 "FF3D: Soft PCI bus error recovered by the IOA"}, 271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, 272 "FFF6: Device hardware error recovered by the IOA"}, 273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, 274 "FFF6: Device hardware error recovered by the device"}, 275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, 276 "FF3D: Soft IOA error recovered by the IOA"}, 277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, 278 "FFFA: Undefined device response recovered by the IOA"}, 279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, 280 "FFF6: Device bus error, message or command phase"}, 281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, 282 "FFFE: Task Management Function failed"}, 283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, 284 "FFF6: Failure prediction threshold exceeded"}, 285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, 286 "8009: Impending cache battery pack failure"}, 287 {0x02040100, 0, 0, 288 "Logical Unit in process of becoming ready"}, 289 {0x02040200, 0, 0, 290 "Initializing command required"}, 291 {0x02040400, 0, 0, 292 "34FF: Disk device format in progress"}, 293 {0x02040C00, 0, 0, 294 "Logical unit not accessible, target port in unavailable state"}, 295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, 296 "9070: IOA requested reset"}, 297 {0x023F0000, 0, 0, 298 "Synchronization required"}, 299 {0x02408500, 0, 0, 300 "IOA microcode download required"}, 301 {0x02408600, 0, 0, 302 "Device bus connection is prohibited by host"}, 303 {0x024E0000, 0, 0, 304 "No ready, IOA shutdown"}, 305 {0x025A0000, 0, 0, 306 "Not ready, IOA has been shutdown"}, 307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, 308 "3020: Storage subsystem configuration error"}, 309 {0x03110B00, 0, 0, 310 "FFF5: Medium error, data unreadable, recommend reassign"}, 311 {0x03110C00, 0, 0, 312 "7000: Medium error, data unreadable, do not reassign"}, 313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, 314 "FFF3: Disk media format bad"}, 315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, 316 "3002: Addressed device failed to respond to selection"}, 317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, 318 "3100: Device bus error"}, 319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, 320 "3109: IOA timed out a device command"}, 321 {0x04088000, 0, 0, 322 "3120: SCSI bus is not operational"}, 323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 324 "4100: Hard device bus fabric error"}, 325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, 326 "310C: Logical block guard error detected by the device"}, 327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, 328 "310C: Logical block reference tag error detected by the device"}, 329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, 330 "4170: Scatter list tag / sequence number error"}, 331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, 332 "8150: Logical block CRC error on IOA to Host transfer"}, 333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, 334 "4170: Logical block sequence number error on IOA to Host transfer"}, 335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, 336 "310D: Logical block reference tag error detected by the IOA"}, 337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, 338 "310D: Logical block guard error detected by the IOA"}, 339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 340 "9000: IOA reserved area data check"}, 341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 342 "9001: IOA reserved area invalid data pattern"}, 343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 344 "9002: IOA reserved area LRC error"}, 345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, 346 "Hardware Error, IOA metadata access error"}, 347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 348 "102E: Out of alternate sectors for disk storage"}, 349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 350 "FFF4: Data transfer underlength error"}, 351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, 352 "FFF4: Data transfer overlength error"}, 353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, 354 "3400: Logical unit failure"}, 355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, 356 "FFF4: Device microcode is corrupt"}, 357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, 358 "8150: PCI bus error"}, 359 {0x04430000, 1, 0, 360 "Unsupported device bus message received"}, 361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, 362 "FFF4: Disk device problem"}, 363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, 364 "8150: Permanent IOA failure"}, 365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, 366 "3010: Disk device returned wrong response to IOA"}, 367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, 368 "8151: IOA microcode error"}, 369 {0x04448500, 0, 0, 370 "Device bus status error"}, 371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, 372 "8157: IOA error requiring IOA reset to recover"}, 373 {0x04448700, 0, 0, 374 "ATA device status error"}, 375 {0x04490000, 0, 0, 376 "Message reject received from the device"}, 377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, 378 "8008: A permanent cache battery pack failure occurred"}, 379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, 380 "9090: Disk unit has been modified after the last known status"}, 381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, 382 "9081: IOA detected device error"}, 383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, 384 "9082: IOA detected device error"}, 385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, 386 "3110: Device bus error, message or command phase"}, 387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, 388 "3110: SAS Command / Task Management Function failed"}, 389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, 390 "9091: Incorrect hardware configuration change has been detected"}, 391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, 392 "9073: Invalid multi-adapter configuration"}, 393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, 394 "4010: Incorrect connection between cascaded expanders"}, 395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, 396 "4020: Connections exceed IOA design limits"}, 397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, 398 "4030: Incorrect multipath connection"}, 399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, 400 "4110: Unsupported enclosure function"}, 401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, 402 "4120: SAS cable VPD cannot be read"}, 403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, 404 "FFF4: Command to logical unit failed"}, 405 {0x05240000, 1, 0, 406 "Illegal request, invalid request type or request packet"}, 407 {0x05250000, 0, 0, 408 "Illegal request, invalid resource handle"}, 409 {0x05258000, 0, 0, 410 "Illegal request, commands not allowed to this device"}, 411 {0x05258100, 0, 0, 412 "Illegal request, command not allowed to a secondary adapter"}, 413 {0x05258200, 0, 0, 414 "Illegal request, command not allowed to a non-optimized resource"}, 415 {0x05260000, 0, 0, 416 "Illegal request, invalid field in parameter list"}, 417 {0x05260100, 0, 0, 418 "Illegal request, parameter not supported"}, 419 {0x05260200, 0, 0, 420 "Illegal request, parameter value invalid"}, 421 {0x052C0000, 0, 0, 422 "Illegal request, command sequence error"}, 423 {0x052C8000, 1, 0, 424 "Illegal request, dual adapter support not enabled"}, 425 {0x052C8100, 1, 0, 426 "Illegal request, another cable connector was physically disabled"}, 427 {0x054E8000, 1, 0, 428 "Illegal request, inconsistent group id/group count"}, 429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, 430 "9031: Array protection temporarily suspended, protection resuming"}, 431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, 432 "9040: Array protection temporarily suspended, protection resuming"}, 433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, 434 "4080: IOA exceeded maximum operating temperature"}, 435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, 436 "4085: Service required"}, 437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, 438 "3140: Device bus not ready to ready transition"}, 439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, 440 "FFFB: SCSI bus was reset"}, 441 {0x06290500, 0, 0, 442 "FFFE: SCSI bus transition to single ended"}, 443 {0x06290600, 0, 0, 444 "FFFE: SCSI bus transition to LVD"}, 445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, 446 "FFFB: SCSI bus was reset by another initiator"}, 447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, 448 "3029: A device replacement has occurred"}, 449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, 450 "4102: Device bus fabric performance degradation"}, 451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, 452 "9051: IOA cache data exists for a missing or failed device"}, 453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, 454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, 456 "9025: Disk unit is not supported at its physical location"}, 457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, 458 "3020: IOA detected a SCSI bus configuration error"}, 459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, 460 "3150: SCSI bus configuration error"}, 461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, 462 "9074: Asymmetric advanced function disk configuration"}, 463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, 464 "4040: Incomplete multipath connection between IOA and enclosure"}, 465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, 466 "4041: Incomplete multipath connection between enclosure and device"}, 467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, 468 "9075: Incomplete multipath connection between IOA and remote IOA"}, 469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, 470 "9076: Configuration error, missing remote IOA"}, 471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, 472 "4050: Enclosure does not support a required multipath function"}, 473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, 474 "4121: Configuration error, required cable is missing"}, 475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, 476 "4122: Cable is not plugged into the correct location on remote IOA"}, 477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, 478 "4123: Configuration error, invalid cable vital product data"}, 479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, 480 "4124: Configuration error, both cable ends are plugged into the same IOA"}, 481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, 482 "4070: Logically bad block written on device"}, 483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, 484 "9041: Array protection temporarily suspended"}, 485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, 486 "9042: Corrupt array parity detected on specified device"}, 487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, 488 "9030: Array no longer protected due to missing or failed disk unit"}, 489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, 490 "9071: Link operational transition"}, 491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, 492 "9072: Link not operational transition"}, 493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, 494 "9032: Array exposed but still protected"}, 495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1, 496 "70DD: Device forced failed by disrupt device command"}, 497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, 498 "4061: Multipath redundancy level got better"}, 499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, 500 "4060: Multipath redundancy level got worse"}, 501 {0x07270000, 0, 0, 502 "Failure due to other device"}, 503 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, 504 "9008: IOA does not support functions expected by devices"}, 505 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, 506 "9010: Cache data associated with attached devices cannot be found"}, 507 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, 508 "9011: Cache data belongs to devices other than those attached"}, 509 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, 510 "9020: Array missing 2 or more devices with only 1 device present"}, 511 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, 512 "9021: Array missing 2 or more devices with 2 or more devices present"}, 513 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, 514 "9022: Exposed array is missing a required device"}, 515 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, 516 "9023: Array member(s) not at required physical locations"}, 517 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, 518 "9024: Array not functional due to present hardware configuration"}, 519 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, 520 "9026: Array not functional due to present hardware configuration"}, 521 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, 522 "9027: Array is missing a device and parity is out of sync"}, 523 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, 524 "9028: Maximum number of arrays already exist"}, 525 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, 526 "9050: Required cache data cannot be located for a disk unit"}, 527 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, 528 "9052: Cache data exists for a device that has been modified"}, 529 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, 530 "9054: IOA resources not available due to previous problems"}, 531 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, 532 "9092: Disk unit requires initialization before use"}, 533 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, 534 "9029: Incorrect hardware configuration change has been detected"}, 535 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, 536 "9060: One or more disk pairs are missing from an array"}, 537 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, 538 "9061: One or more disks are missing from an array"}, 539 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, 540 "9062: One or more disks are missing from an array"}, 541 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, 542 "9063: Maximum number of functional arrays has been exceeded"}, 543 {0x07279A00, 0, 0, 544 "Data protect, other volume set problem"}, 545 {0x0B260000, 0, 0, 546 "Aborted command, invalid descriptor"}, 547 {0x0B3F9000, 0, 0, 548 "Target operating conditions have changed, dual adapter takeover"}, 549 {0x0B530200, 0, 0, 550 "Aborted command, medium removal prevented"}, 551 {0x0B5A0000, 0, 0, 552 "Command terminated by host"}, 553 {0x0B5B8000, 0, 0, 554 "Aborted command, command terminated by host"} 555 }; 556 557 static const struct ipr_ses_table_entry ipr_ses_table[] = { 558 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, 559 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, 560 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ 561 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ 562 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ 563 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ 564 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, 565 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, 566 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 567 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 568 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, 569 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, 570 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } 571 }; 572 573 /* 574 * Function Prototypes 575 */ 576 static int ipr_reset_alert(struct ipr_cmnd *); 577 static void ipr_process_ccn(struct ipr_cmnd *); 578 static void ipr_process_error(struct ipr_cmnd *); 579 static void ipr_reset_ioa_job(struct ipr_cmnd *); 580 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, 581 enum ipr_shutdown_type); 582 583 #ifdef CONFIG_SCSI_IPR_TRACE 584 /** 585 * ipr_trc_hook - Add a trace entry to the driver trace 586 * @ipr_cmd: ipr command struct 587 * @type: trace type 588 * @add_data: additional data 589 * 590 * Return value: 591 * none 592 **/ 593 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, 594 u8 type, u32 add_data) 595 { 596 struct ipr_trace_entry *trace_entry; 597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 598 599 trace_entry = &ioa_cfg->trace[atomic_add_return 600 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; 601 trace_entry->time = jiffies; 602 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 603 trace_entry->type = type; 604 if (ipr_cmd->ioa_cfg->sis64) 605 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; 606 else 607 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; 608 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 609 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 610 trace_entry->u.add_data = add_data; 611 wmb(); 612 } 613 #else 614 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) 615 #endif 616 617 /** 618 * ipr_lock_and_done - Acquire lock and complete command 619 * @ipr_cmd: ipr command struct 620 * 621 * Return value: 622 * none 623 **/ 624 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) 625 { 626 unsigned long lock_flags; 627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 628 629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 630 ipr_cmd->done(ipr_cmd); 631 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 632 } 633 634 /** 635 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 636 * @ipr_cmd: ipr command struct 637 * 638 * Return value: 639 * none 640 **/ 641 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 642 { 643 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 644 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 645 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 646 dma_addr_t dma_addr = ipr_cmd->dma_addr; 647 int hrrq_id; 648 649 hrrq_id = ioarcb->cmd_pkt.hrrq_id; 650 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 651 ioarcb->cmd_pkt.hrrq_id = hrrq_id; 652 ioarcb->data_transfer_length = 0; 653 ioarcb->read_data_transfer_length = 0; 654 ioarcb->ioadl_len = 0; 655 ioarcb->read_ioadl_len = 0; 656 657 if (ipr_cmd->ioa_cfg->sis64) { 658 ioarcb->u.sis64_addr_data.data_ioadl_addr = 659 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 660 ioasa64->u.gata.status = 0; 661 } else { 662 ioarcb->write_ioadl_addr = 663 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 664 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 665 ioasa->u.gata.status = 0; 666 } 667 668 ioasa->hdr.ioasc = 0; 669 ioasa->hdr.residual_data_len = 0; 670 ipr_cmd->scsi_cmd = NULL; 671 ipr_cmd->qc = NULL; 672 ipr_cmd->sense_buffer[0] = 0; 673 ipr_cmd->dma_use_sg = 0; 674 } 675 676 /** 677 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block 678 * @ipr_cmd: ipr command struct 679 * 680 * Return value: 681 * none 682 **/ 683 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, 684 void (*fast_done) (struct ipr_cmnd *)) 685 { 686 ipr_reinit_ipr_cmnd(ipr_cmd); 687 ipr_cmd->u.scratch = 0; 688 ipr_cmd->sibling = NULL; 689 ipr_cmd->eh_comp = NULL; 690 ipr_cmd->fast_done = fast_done; 691 init_timer(&ipr_cmd->timer); 692 } 693 694 /** 695 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 696 * @ioa_cfg: ioa config struct 697 * 698 * Return value: 699 * pointer to ipr command struct 700 **/ 701 static 702 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) 703 { 704 struct ipr_cmnd *ipr_cmd = NULL; 705 706 if (likely(!list_empty(&hrrq->hrrq_free_q))) { 707 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, 708 struct ipr_cmnd, queue); 709 list_del(&ipr_cmd->queue); 710 } 711 712 713 return ipr_cmd; 714 } 715 716 /** 717 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it 718 * @ioa_cfg: ioa config struct 719 * 720 * Return value: 721 * pointer to ipr command struct 722 **/ 723 static 724 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 725 { 726 struct ipr_cmnd *ipr_cmd = 727 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); 728 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 729 return ipr_cmd; 730 } 731 732 /** 733 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 734 * @ioa_cfg: ioa config struct 735 * @clr_ints: interrupts to clear 736 * 737 * This function masks all interrupts on the adapter, then clears the 738 * interrupts specified in the mask 739 * 740 * Return value: 741 * none 742 **/ 743 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, 744 u32 clr_ints) 745 { 746 volatile u32 int_reg; 747 int i; 748 749 /* Stop new interrupts */ 750 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 751 spin_lock(&ioa_cfg->hrrq[i]._lock); 752 ioa_cfg->hrrq[i].allow_interrupts = 0; 753 spin_unlock(&ioa_cfg->hrrq[i]._lock); 754 } 755 wmb(); 756 757 /* Set interrupt mask to stop all new interrupts */ 758 if (ioa_cfg->sis64) 759 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); 760 else 761 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 762 763 /* Clear any pending interrupts */ 764 if (ioa_cfg->sis64) 765 writel(~0, ioa_cfg->regs.clr_interrupt_reg); 766 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); 767 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 768 } 769 770 /** 771 * ipr_save_pcix_cmd_reg - Save PCI-X command register 772 * @ioa_cfg: ioa config struct 773 * 774 * Return value: 775 * 0 on success / -EIO on failure 776 **/ 777 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 778 { 779 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 780 781 if (pcix_cmd_reg == 0) 782 return 0; 783 784 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 785 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 786 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); 787 return -EIO; 788 } 789 790 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; 791 return 0; 792 } 793 794 /** 795 * ipr_set_pcix_cmd_reg - Setup PCI-X command register 796 * @ioa_cfg: ioa config struct 797 * 798 * Return value: 799 * 0 on success / -EIO on failure 800 **/ 801 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 802 { 803 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 804 805 if (pcix_cmd_reg) { 806 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 807 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 808 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); 809 return -EIO; 810 } 811 } 812 813 return 0; 814 } 815 816 /** 817 * ipr_sata_eh_done - done function for aborted SATA commands 818 * @ipr_cmd: ipr command struct 819 * 820 * This function is invoked for ops generated to SATA 821 * devices which are being aborted. 822 * 823 * Return value: 824 * none 825 **/ 826 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 827 { 828 struct ata_queued_cmd *qc = ipr_cmd->qc; 829 struct ipr_sata_port *sata_port = qc->ap->private_data; 830 831 qc->err_mask |= AC_ERR_OTHER; 832 sata_port->ioasa.status |= ATA_BUSY; 833 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 834 ata_qc_complete(qc); 835 } 836 837 /** 838 * ipr_scsi_eh_done - mid-layer done function for aborted ops 839 * @ipr_cmd: ipr command struct 840 * 841 * This function is invoked by the interrupt handler for 842 * ops generated by the SCSI mid-layer which are being aborted. 843 * 844 * Return value: 845 * none 846 **/ 847 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 848 { 849 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 850 851 scsi_cmd->result |= (DID_ERROR << 16); 852 853 scsi_dma_unmap(ipr_cmd->scsi_cmd); 854 scsi_cmd->scsi_done(scsi_cmd); 855 if (ipr_cmd->eh_comp) 856 complete(ipr_cmd->eh_comp); 857 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 858 } 859 860 /** 861 * ipr_fail_all_ops - Fails all outstanding ops. 862 * @ioa_cfg: ioa config struct 863 * 864 * This function fails all outstanding ops. 865 * 866 * Return value: 867 * none 868 **/ 869 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 870 { 871 struct ipr_cmnd *ipr_cmd, *temp; 872 struct ipr_hrr_queue *hrrq; 873 874 ENTER; 875 for_each_hrrq(hrrq, ioa_cfg) { 876 spin_lock(&hrrq->_lock); 877 list_for_each_entry_safe(ipr_cmd, 878 temp, &hrrq->hrrq_pending_q, queue) { 879 list_del(&ipr_cmd->queue); 880 881 ipr_cmd->s.ioasa.hdr.ioasc = 882 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 883 ipr_cmd->s.ioasa.hdr.ilid = 884 cpu_to_be32(IPR_DRIVER_ILID); 885 886 if (ipr_cmd->scsi_cmd) 887 ipr_cmd->done = ipr_scsi_eh_done; 888 else if (ipr_cmd->qc) 889 ipr_cmd->done = ipr_sata_eh_done; 890 891 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, 892 IPR_IOASC_IOA_WAS_RESET); 893 del_timer(&ipr_cmd->timer); 894 ipr_cmd->done(ipr_cmd); 895 } 896 spin_unlock(&hrrq->_lock); 897 } 898 LEAVE; 899 } 900 901 /** 902 * ipr_send_command - Send driver initiated requests. 903 * @ipr_cmd: ipr command struct 904 * 905 * This function sends a command to the adapter using the correct write call. 906 * In the case of sis64, calculate the ioarcb size required. Then or in the 907 * appropriate bits. 908 * 909 * Return value: 910 * none 911 **/ 912 static void ipr_send_command(struct ipr_cmnd *ipr_cmd) 913 { 914 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 915 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; 916 917 if (ioa_cfg->sis64) { 918 /* The default size is 256 bytes */ 919 send_dma_addr |= 0x1; 920 921 /* If the number of ioadls * size of ioadl > 128 bytes, 922 then use a 512 byte ioarcb */ 923 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) 924 send_dma_addr |= 0x4; 925 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 926 } else 927 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 928 } 929 930 /** 931 * ipr_do_req - Send driver initiated requests. 932 * @ipr_cmd: ipr command struct 933 * @done: done function 934 * @timeout_func: timeout function 935 * @timeout: timeout value 936 * 937 * This function sends the specified command to the adapter with the 938 * timeout given. The done function is invoked on command completion. 939 * 940 * Return value: 941 * none 942 **/ 943 static void ipr_do_req(struct ipr_cmnd *ipr_cmd, 944 void (*done) (struct ipr_cmnd *), 945 void (*timeout_func) (struct ipr_cmnd *), u32 timeout) 946 { 947 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 948 949 ipr_cmd->done = done; 950 951 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 952 ipr_cmd->timer.expires = jiffies + timeout; 953 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; 954 955 add_timer(&ipr_cmd->timer); 956 957 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 958 959 ipr_send_command(ipr_cmd); 960 } 961 962 /** 963 * ipr_internal_cmd_done - Op done function for an internally generated op. 964 * @ipr_cmd: ipr command struct 965 * 966 * This function is the op done function for an internally generated, 967 * blocking op. It simply wakes the sleeping thread. 968 * 969 * Return value: 970 * none 971 **/ 972 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) 973 { 974 if (ipr_cmd->sibling) 975 ipr_cmd->sibling = NULL; 976 else 977 complete(&ipr_cmd->completion); 978 } 979 980 /** 981 * ipr_init_ioadl - initialize the ioadl for the correct SIS type 982 * @ipr_cmd: ipr command struct 983 * @dma_addr: dma address 984 * @len: transfer length 985 * @flags: ioadl flag value 986 * 987 * This function initializes an ioadl in the case where there is only a single 988 * descriptor. 989 * 990 * Return value: 991 * nothing 992 **/ 993 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, 994 u32 len, int flags) 995 { 996 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 997 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 998 999 ipr_cmd->dma_use_sg = 1; 1000 1001 if (ipr_cmd->ioa_cfg->sis64) { 1002 ioadl64->flags = cpu_to_be32(flags); 1003 ioadl64->data_len = cpu_to_be32(len); 1004 ioadl64->address = cpu_to_be64(dma_addr); 1005 1006 ipr_cmd->ioarcb.ioadl_len = 1007 cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); 1008 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1009 } else { 1010 ioadl->flags_and_data_len = cpu_to_be32(flags | len); 1011 ioadl->address = cpu_to_be32(dma_addr); 1012 1013 if (flags == IPR_IOADL_FLAGS_READ_LAST) { 1014 ipr_cmd->ioarcb.read_ioadl_len = 1015 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1016 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); 1017 } else { 1018 ipr_cmd->ioarcb.ioadl_len = 1019 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1020 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1021 } 1022 } 1023 } 1024 1025 /** 1026 * ipr_send_blocking_cmd - Send command and sleep on its completion. 1027 * @ipr_cmd: ipr command struct 1028 * @timeout_func: function to invoke if command times out 1029 * @timeout: timeout 1030 * 1031 * Return value: 1032 * none 1033 **/ 1034 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, 1035 void (*timeout_func) (struct ipr_cmnd *ipr_cmd), 1036 u32 timeout) 1037 { 1038 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1039 1040 init_completion(&ipr_cmd->completion); 1041 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); 1042 1043 spin_unlock_irq(ioa_cfg->host->host_lock); 1044 wait_for_completion(&ipr_cmd->completion); 1045 spin_lock_irq(ioa_cfg->host->host_lock); 1046 } 1047 1048 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1049 { 1050 if (ioa_cfg->hrrq_num == 1) 1051 return 0; 1052 else 1053 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; 1054 } 1055 1056 /** 1057 * ipr_send_hcam - Send an HCAM to the adapter. 1058 * @ioa_cfg: ioa config struct 1059 * @type: HCAM type 1060 * @hostrcb: hostrcb struct 1061 * 1062 * This function will send a Host Controlled Async command to the adapter. 1063 * If HCAMs are currently not allowed to be issued to the adapter, it will 1064 * place the hostrcb on the free queue. 1065 * 1066 * Return value: 1067 * none 1068 **/ 1069 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, 1070 struct ipr_hostrcb *hostrcb) 1071 { 1072 struct ipr_cmnd *ipr_cmd; 1073 struct ipr_ioarcb *ioarcb; 1074 1075 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 1076 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1077 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 1078 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1079 1080 ipr_cmd->u.hostrcb = hostrcb; 1081 ioarcb = &ipr_cmd->ioarcb; 1082 1083 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 1084 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; 1085 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; 1086 ioarcb->cmd_pkt.cdb[1] = type; 1087 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 1088 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 1089 1090 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, 1091 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); 1092 1093 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 1094 ipr_cmd->done = ipr_process_ccn; 1095 else 1096 ipr_cmd->done = ipr_process_error; 1097 1098 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 1099 1100 ipr_send_command(ipr_cmd); 1101 } else { 1102 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 1103 } 1104 } 1105 1106 /** 1107 * ipr_update_ata_class - Update the ata class in the resource entry 1108 * @res: resource entry struct 1109 * @proto: cfgte device bus protocol value 1110 * 1111 * Return value: 1112 * none 1113 **/ 1114 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) 1115 { 1116 switch (proto) { 1117 case IPR_PROTO_SATA: 1118 case IPR_PROTO_SAS_STP: 1119 res->ata_class = ATA_DEV_ATA; 1120 break; 1121 case IPR_PROTO_SATA_ATAPI: 1122 case IPR_PROTO_SAS_STP_ATAPI: 1123 res->ata_class = ATA_DEV_ATAPI; 1124 break; 1125 default: 1126 res->ata_class = ATA_DEV_UNKNOWN; 1127 break; 1128 }; 1129 } 1130 1131 /** 1132 * ipr_init_res_entry - Initialize a resource entry struct. 1133 * @res: resource entry struct 1134 * @cfgtew: config table entry wrapper struct 1135 * 1136 * Return value: 1137 * none 1138 **/ 1139 static void ipr_init_res_entry(struct ipr_resource_entry *res, 1140 struct ipr_config_table_entry_wrapper *cfgtew) 1141 { 1142 int found = 0; 1143 unsigned int proto; 1144 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1145 struct ipr_resource_entry *gscsi_res = NULL; 1146 1147 res->needs_sync_complete = 0; 1148 res->in_erp = 0; 1149 res->add_to_ml = 0; 1150 res->del_from_ml = 0; 1151 res->resetting_device = 0; 1152 res->reset_occurred = 0; 1153 res->sdev = NULL; 1154 res->sata_port = NULL; 1155 1156 if (ioa_cfg->sis64) { 1157 proto = cfgtew->u.cfgte64->proto; 1158 res->res_flags = cfgtew->u.cfgte64->res_flags; 1159 res->qmodel = IPR_QUEUEING_MODEL64(res); 1160 res->type = cfgtew->u.cfgte64->res_type; 1161 1162 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1163 sizeof(res->res_path)); 1164 1165 res->bus = 0; 1166 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1167 sizeof(res->dev_lun.scsi_lun)); 1168 res->lun = scsilun_to_int(&res->dev_lun); 1169 1170 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1171 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { 1172 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { 1173 found = 1; 1174 res->target = gscsi_res->target; 1175 break; 1176 } 1177 } 1178 if (!found) { 1179 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1180 ioa_cfg->max_devs_supported); 1181 set_bit(res->target, ioa_cfg->target_ids); 1182 } 1183 } else if (res->type == IPR_RES_TYPE_IOAFP) { 1184 res->bus = IPR_IOAFP_VIRTUAL_BUS; 1185 res->target = 0; 1186 } else if (res->type == IPR_RES_TYPE_ARRAY) { 1187 res->bus = IPR_ARRAY_VIRTUAL_BUS; 1188 res->target = find_first_zero_bit(ioa_cfg->array_ids, 1189 ioa_cfg->max_devs_supported); 1190 set_bit(res->target, ioa_cfg->array_ids); 1191 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { 1192 res->bus = IPR_VSET_VIRTUAL_BUS; 1193 res->target = find_first_zero_bit(ioa_cfg->vset_ids, 1194 ioa_cfg->max_devs_supported); 1195 set_bit(res->target, ioa_cfg->vset_ids); 1196 } else { 1197 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1198 ioa_cfg->max_devs_supported); 1199 set_bit(res->target, ioa_cfg->target_ids); 1200 } 1201 } else { 1202 proto = cfgtew->u.cfgte->proto; 1203 res->qmodel = IPR_QUEUEING_MODEL(res); 1204 res->flags = cfgtew->u.cfgte->flags; 1205 if (res->flags & IPR_IS_IOA_RESOURCE) 1206 res->type = IPR_RES_TYPE_IOAFP; 1207 else 1208 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1209 1210 res->bus = cfgtew->u.cfgte->res_addr.bus; 1211 res->target = cfgtew->u.cfgte->res_addr.target; 1212 res->lun = cfgtew->u.cfgte->res_addr.lun; 1213 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); 1214 } 1215 1216 ipr_update_ata_class(res, proto); 1217 } 1218 1219 /** 1220 * ipr_is_same_device - Determine if two devices are the same. 1221 * @res: resource entry struct 1222 * @cfgtew: config table entry wrapper struct 1223 * 1224 * Return value: 1225 * 1 if the devices are the same / 0 otherwise 1226 **/ 1227 static int ipr_is_same_device(struct ipr_resource_entry *res, 1228 struct ipr_config_table_entry_wrapper *cfgtew) 1229 { 1230 if (res->ioa_cfg->sis64) { 1231 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 1232 sizeof(cfgtew->u.cfgte64->dev_id)) && 1233 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1234 sizeof(cfgtew->u.cfgte64->lun))) { 1235 return 1; 1236 } 1237 } else { 1238 if (res->bus == cfgtew->u.cfgte->res_addr.bus && 1239 res->target == cfgtew->u.cfgte->res_addr.target && 1240 res->lun == cfgtew->u.cfgte->res_addr.lun) 1241 return 1; 1242 } 1243 1244 return 0; 1245 } 1246 1247 /** 1248 * __ipr_format_res_path - Format the resource path for printing. 1249 * @res_path: resource path 1250 * @buf: buffer 1251 * @len: length of buffer provided 1252 * 1253 * Return value: 1254 * pointer to buffer 1255 **/ 1256 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len) 1257 { 1258 int i; 1259 char *p = buffer; 1260 1261 *p = '\0'; 1262 p += snprintf(p, buffer + len - p, "%02X", res_path[0]); 1263 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) 1264 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); 1265 1266 return buffer; 1267 } 1268 1269 /** 1270 * ipr_format_res_path - Format the resource path for printing. 1271 * @ioa_cfg: ioa config struct 1272 * @res_path: resource path 1273 * @buf: buffer 1274 * @len: length of buffer provided 1275 * 1276 * Return value: 1277 * pointer to buffer 1278 **/ 1279 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, 1280 u8 *res_path, char *buffer, int len) 1281 { 1282 char *p = buffer; 1283 1284 *p = '\0'; 1285 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); 1286 __ipr_format_res_path(res_path, p, len - (buffer - p)); 1287 return buffer; 1288 } 1289 1290 /** 1291 * ipr_update_res_entry - Update the resource entry. 1292 * @res: resource entry struct 1293 * @cfgtew: config table entry wrapper struct 1294 * 1295 * Return value: 1296 * none 1297 **/ 1298 static void ipr_update_res_entry(struct ipr_resource_entry *res, 1299 struct ipr_config_table_entry_wrapper *cfgtew) 1300 { 1301 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1302 unsigned int proto; 1303 int new_path = 0; 1304 1305 if (res->ioa_cfg->sis64) { 1306 res->flags = cfgtew->u.cfgte64->flags; 1307 res->res_flags = cfgtew->u.cfgte64->res_flags; 1308 res->type = cfgtew->u.cfgte64->res_type; 1309 1310 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1311 sizeof(struct ipr_std_inq_data)); 1312 1313 res->qmodel = IPR_QUEUEING_MODEL64(res); 1314 proto = cfgtew->u.cfgte64->proto; 1315 res->res_handle = cfgtew->u.cfgte64->res_handle; 1316 res->dev_id = cfgtew->u.cfgte64->dev_id; 1317 1318 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1319 sizeof(res->dev_lun.scsi_lun)); 1320 1321 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, 1322 sizeof(res->res_path))) { 1323 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1324 sizeof(res->res_path)); 1325 new_path = 1; 1326 } 1327 1328 if (res->sdev && new_path) 1329 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1330 ipr_format_res_path(res->ioa_cfg, 1331 res->res_path, buffer, sizeof(buffer))); 1332 } else { 1333 res->flags = cfgtew->u.cfgte->flags; 1334 if (res->flags & IPR_IS_IOA_RESOURCE) 1335 res->type = IPR_RES_TYPE_IOAFP; 1336 else 1337 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1338 1339 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, 1340 sizeof(struct ipr_std_inq_data)); 1341 1342 res->qmodel = IPR_QUEUEING_MODEL(res); 1343 proto = cfgtew->u.cfgte->proto; 1344 res->res_handle = cfgtew->u.cfgte->res_handle; 1345 } 1346 1347 ipr_update_ata_class(res, proto); 1348 } 1349 1350 /** 1351 * ipr_clear_res_target - Clear the bit in the bit map representing the target 1352 * for the resource. 1353 * @res: resource entry struct 1354 * @cfgtew: config table entry wrapper struct 1355 * 1356 * Return value: 1357 * none 1358 **/ 1359 static void ipr_clear_res_target(struct ipr_resource_entry *res) 1360 { 1361 struct ipr_resource_entry *gscsi_res = NULL; 1362 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1363 1364 if (!ioa_cfg->sis64) 1365 return; 1366 1367 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) 1368 clear_bit(res->target, ioa_cfg->array_ids); 1369 else if (res->bus == IPR_VSET_VIRTUAL_BUS) 1370 clear_bit(res->target, ioa_cfg->vset_ids); 1371 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1372 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) 1373 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) 1374 return; 1375 clear_bit(res->target, ioa_cfg->target_ids); 1376 1377 } else if (res->bus == 0) 1378 clear_bit(res->target, ioa_cfg->target_ids); 1379 } 1380 1381 /** 1382 * ipr_handle_config_change - Handle a config change from the adapter 1383 * @ioa_cfg: ioa config struct 1384 * @hostrcb: hostrcb 1385 * 1386 * Return value: 1387 * none 1388 **/ 1389 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1390 struct ipr_hostrcb *hostrcb) 1391 { 1392 struct ipr_resource_entry *res = NULL; 1393 struct ipr_config_table_entry_wrapper cfgtew; 1394 __be32 cc_res_handle; 1395 1396 u32 is_ndn = 1; 1397 1398 if (ioa_cfg->sis64) { 1399 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; 1400 cc_res_handle = cfgtew.u.cfgte64->res_handle; 1401 } else { 1402 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; 1403 cc_res_handle = cfgtew.u.cfgte->res_handle; 1404 } 1405 1406 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1407 if (res->res_handle == cc_res_handle) { 1408 is_ndn = 0; 1409 break; 1410 } 1411 } 1412 1413 if (is_ndn) { 1414 if (list_empty(&ioa_cfg->free_res_q)) { 1415 ipr_send_hcam(ioa_cfg, 1416 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 1417 hostrcb); 1418 return; 1419 } 1420 1421 res = list_entry(ioa_cfg->free_res_q.next, 1422 struct ipr_resource_entry, queue); 1423 1424 list_del(&res->queue); 1425 ipr_init_res_entry(res, &cfgtew); 1426 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1427 } 1428 1429 ipr_update_res_entry(res, &cfgtew); 1430 1431 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1432 if (res->sdev) { 1433 res->del_from_ml = 1; 1434 res->res_handle = IPR_INVALID_RES_HANDLE; 1435 schedule_work(&ioa_cfg->work_q); 1436 } else { 1437 ipr_clear_res_target(res); 1438 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1439 } 1440 } else if (!res->sdev || res->del_from_ml) { 1441 res->add_to_ml = 1; 1442 schedule_work(&ioa_cfg->work_q); 1443 } 1444 1445 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1446 } 1447 1448 /** 1449 * ipr_process_ccn - Op done function for a CCN. 1450 * @ipr_cmd: ipr command struct 1451 * 1452 * This function is the op done function for a configuration 1453 * change notification host controlled async from the adapter. 1454 * 1455 * Return value: 1456 * none 1457 **/ 1458 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) 1459 { 1460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1461 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1462 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1463 1464 list_del(&hostrcb->queue); 1465 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 1466 1467 if (ioasc) { 1468 if (ioasc != IPR_IOASC_IOA_WAS_RESET && 1469 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) 1470 dev_err(&ioa_cfg->pdev->dev, 1471 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 1472 1473 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1474 } else { 1475 ipr_handle_config_change(ioa_cfg, hostrcb); 1476 } 1477 } 1478 1479 /** 1480 * strip_and_pad_whitespace - Strip and pad trailing whitespace. 1481 * @i: index into buffer 1482 * @buf: string to modify 1483 * 1484 * This function will strip all trailing whitespace, pad the end 1485 * of the string with a single space, and NULL terminate the string. 1486 * 1487 * Return value: 1488 * new length of string 1489 **/ 1490 static int strip_and_pad_whitespace(int i, char *buf) 1491 { 1492 while (i && buf[i] == ' ') 1493 i--; 1494 buf[i+1] = ' '; 1495 buf[i+2] = '\0'; 1496 return i + 2; 1497 } 1498 1499 /** 1500 * ipr_log_vpd_compact - Log the passed extended VPD compactly. 1501 * @prefix: string to print at start of printk 1502 * @hostrcb: hostrcb pointer 1503 * @vpd: vendor/product id/sn struct 1504 * 1505 * Return value: 1506 * none 1507 **/ 1508 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1509 struct ipr_vpd *vpd) 1510 { 1511 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; 1512 int i = 0; 1513 1514 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1515 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); 1516 1517 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); 1518 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); 1519 1520 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); 1521 buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; 1522 1523 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); 1524 } 1525 1526 /** 1527 * ipr_log_vpd - Log the passed VPD to the error log. 1528 * @vpd: vendor/product id/sn struct 1529 * 1530 * Return value: 1531 * none 1532 **/ 1533 static void ipr_log_vpd(struct ipr_vpd *vpd) 1534 { 1535 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN 1536 + IPR_SERIAL_NUM_LEN]; 1537 1538 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1539 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, 1540 IPR_PROD_ID_LEN); 1541 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; 1542 ipr_err("Vendor/Product ID: %s\n", buffer); 1543 1544 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); 1545 buffer[IPR_SERIAL_NUM_LEN] = '\0'; 1546 ipr_err(" Serial Number: %s\n", buffer); 1547 } 1548 1549 /** 1550 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. 1551 * @prefix: string to print at start of printk 1552 * @hostrcb: hostrcb pointer 1553 * @vpd: vendor/product id/sn/wwn struct 1554 * 1555 * Return value: 1556 * none 1557 **/ 1558 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1559 struct ipr_ext_vpd *vpd) 1560 { 1561 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); 1562 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, 1563 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); 1564 } 1565 1566 /** 1567 * ipr_log_ext_vpd - Log the passed extended VPD to the error log. 1568 * @vpd: vendor/product id/sn/wwn struct 1569 * 1570 * Return value: 1571 * none 1572 **/ 1573 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) 1574 { 1575 ipr_log_vpd(&vpd->vpd); 1576 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), 1577 be32_to_cpu(vpd->wwid[1])); 1578 } 1579 1580 /** 1581 * ipr_log_enhanced_cache_error - Log a cache error. 1582 * @ioa_cfg: ioa config struct 1583 * @hostrcb: hostrcb struct 1584 * 1585 * Return value: 1586 * none 1587 **/ 1588 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1589 struct ipr_hostrcb *hostrcb) 1590 { 1591 struct ipr_hostrcb_type_12_error *error; 1592 1593 if (ioa_cfg->sis64) 1594 error = &hostrcb->hcam.u.error64.u.type_12_error; 1595 else 1596 error = &hostrcb->hcam.u.error.u.type_12_error; 1597 1598 ipr_err("-----Current Configuration-----\n"); 1599 ipr_err("Cache Directory Card Information:\n"); 1600 ipr_log_ext_vpd(&error->ioa_vpd); 1601 ipr_err("Adapter Card Information:\n"); 1602 ipr_log_ext_vpd(&error->cfc_vpd); 1603 1604 ipr_err("-----Expected Configuration-----\n"); 1605 ipr_err("Cache Directory Card Information:\n"); 1606 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); 1607 ipr_err("Adapter Card Information:\n"); 1608 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); 1609 1610 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1611 be32_to_cpu(error->ioa_data[0]), 1612 be32_to_cpu(error->ioa_data[1]), 1613 be32_to_cpu(error->ioa_data[2])); 1614 } 1615 1616 /** 1617 * ipr_log_cache_error - Log a cache error. 1618 * @ioa_cfg: ioa config struct 1619 * @hostrcb: hostrcb struct 1620 * 1621 * Return value: 1622 * none 1623 **/ 1624 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1625 struct ipr_hostrcb *hostrcb) 1626 { 1627 struct ipr_hostrcb_type_02_error *error = 1628 &hostrcb->hcam.u.error.u.type_02_error; 1629 1630 ipr_err("-----Current Configuration-----\n"); 1631 ipr_err("Cache Directory Card Information:\n"); 1632 ipr_log_vpd(&error->ioa_vpd); 1633 ipr_err("Adapter Card Information:\n"); 1634 ipr_log_vpd(&error->cfc_vpd); 1635 1636 ipr_err("-----Expected Configuration-----\n"); 1637 ipr_err("Cache Directory Card Information:\n"); 1638 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); 1639 ipr_err("Adapter Card Information:\n"); 1640 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); 1641 1642 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1643 be32_to_cpu(error->ioa_data[0]), 1644 be32_to_cpu(error->ioa_data[1]), 1645 be32_to_cpu(error->ioa_data[2])); 1646 } 1647 1648 /** 1649 * ipr_log_enhanced_config_error - Log a configuration error. 1650 * @ioa_cfg: ioa config struct 1651 * @hostrcb: hostrcb struct 1652 * 1653 * Return value: 1654 * none 1655 **/ 1656 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, 1657 struct ipr_hostrcb *hostrcb) 1658 { 1659 int errors_logged, i; 1660 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; 1661 struct ipr_hostrcb_type_13_error *error; 1662 1663 error = &hostrcb->hcam.u.error.u.type_13_error; 1664 errors_logged = be32_to_cpu(error->errors_logged); 1665 1666 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1667 be32_to_cpu(error->errors_detected), errors_logged); 1668 1669 dev_entry = error->dev; 1670 1671 for (i = 0; i < errors_logged; i++, dev_entry++) { 1672 ipr_err_separator; 1673 1674 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1675 ipr_log_ext_vpd(&dev_entry->vpd); 1676 1677 ipr_err("-----New Device Information-----\n"); 1678 ipr_log_ext_vpd(&dev_entry->new_vpd); 1679 1680 ipr_err("Cache Directory Card Information:\n"); 1681 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1682 1683 ipr_err("Adapter Card Information:\n"); 1684 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1685 } 1686 } 1687 1688 /** 1689 * ipr_log_sis64_config_error - Log a device error. 1690 * @ioa_cfg: ioa config struct 1691 * @hostrcb: hostrcb struct 1692 * 1693 * Return value: 1694 * none 1695 **/ 1696 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, 1697 struct ipr_hostrcb *hostrcb) 1698 { 1699 int errors_logged, i; 1700 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; 1701 struct ipr_hostrcb_type_23_error *error; 1702 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1703 1704 error = &hostrcb->hcam.u.error64.u.type_23_error; 1705 errors_logged = be32_to_cpu(error->errors_logged); 1706 1707 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1708 be32_to_cpu(error->errors_detected), errors_logged); 1709 1710 dev_entry = error->dev; 1711 1712 for (i = 0; i < errors_logged; i++, dev_entry++) { 1713 ipr_err_separator; 1714 1715 ipr_err("Device %d : %s", i + 1, 1716 __ipr_format_res_path(dev_entry->res_path, 1717 buffer, sizeof(buffer))); 1718 ipr_log_ext_vpd(&dev_entry->vpd); 1719 1720 ipr_err("-----New Device Information-----\n"); 1721 ipr_log_ext_vpd(&dev_entry->new_vpd); 1722 1723 ipr_err("Cache Directory Card Information:\n"); 1724 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1725 1726 ipr_err("Adapter Card Information:\n"); 1727 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1728 } 1729 } 1730 1731 /** 1732 * ipr_log_config_error - Log a configuration error. 1733 * @ioa_cfg: ioa config struct 1734 * @hostrcb: hostrcb struct 1735 * 1736 * Return value: 1737 * none 1738 **/ 1739 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, 1740 struct ipr_hostrcb *hostrcb) 1741 { 1742 int errors_logged, i; 1743 struct ipr_hostrcb_device_data_entry *dev_entry; 1744 struct ipr_hostrcb_type_03_error *error; 1745 1746 error = &hostrcb->hcam.u.error.u.type_03_error; 1747 errors_logged = be32_to_cpu(error->errors_logged); 1748 1749 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1750 be32_to_cpu(error->errors_detected), errors_logged); 1751 1752 dev_entry = error->dev; 1753 1754 for (i = 0; i < errors_logged; i++, dev_entry++) { 1755 ipr_err_separator; 1756 1757 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1758 ipr_log_vpd(&dev_entry->vpd); 1759 1760 ipr_err("-----New Device Information-----\n"); 1761 ipr_log_vpd(&dev_entry->new_vpd); 1762 1763 ipr_err("Cache Directory Card Information:\n"); 1764 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); 1765 1766 ipr_err("Adapter Card Information:\n"); 1767 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); 1768 1769 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", 1770 be32_to_cpu(dev_entry->ioa_data[0]), 1771 be32_to_cpu(dev_entry->ioa_data[1]), 1772 be32_to_cpu(dev_entry->ioa_data[2]), 1773 be32_to_cpu(dev_entry->ioa_data[3]), 1774 be32_to_cpu(dev_entry->ioa_data[4])); 1775 } 1776 } 1777 1778 /** 1779 * ipr_log_enhanced_array_error - Log an array configuration error. 1780 * @ioa_cfg: ioa config struct 1781 * @hostrcb: hostrcb struct 1782 * 1783 * Return value: 1784 * none 1785 **/ 1786 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, 1787 struct ipr_hostrcb *hostrcb) 1788 { 1789 int i, num_entries; 1790 struct ipr_hostrcb_type_14_error *error; 1791 struct ipr_hostrcb_array_data_entry_enhanced *array_entry; 1792 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1793 1794 error = &hostrcb->hcam.u.error.u.type_14_error; 1795 1796 ipr_err_separator; 1797 1798 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1799 error->protection_level, 1800 ioa_cfg->host->host_no, 1801 error->last_func_vset_res_addr.bus, 1802 error->last_func_vset_res_addr.target, 1803 error->last_func_vset_res_addr.lun); 1804 1805 ipr_err_separator; 1806 1807 array_entry = error->array_member; 1808 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1809 ARRAY_SIZE(error->array_member)); 1810 1811 for (i = 0; i < num_entries; i++, array_entry++) { 1812 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1813 continue; 1814 1815 if (be32_to_cpu(error->exposed_mode_adn) == i) 1816 ipr_err("Exposed Array Member %d:\n", i); 1817 else 1818 ipr_err("Array Member %d:\n", i); 1819 1820 ipr_log_ext_vpd(&array_entry->vpd); 1821 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1822 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1823 "Expected Location"); 1824 1825 ipr_err_separator; 1826 } 1827 } 1828 1829 /** 1830 * ipr_log_array_error - Log an array configuration error. 1831 * @ioa_cfg: ioa config struct 1832 * @hostrcb: hostrcb struct 1833 * 1834 * Return value: 1835 * none 1836 **/ 1837 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, 1838 struct ipr_hostrcb *hostrcb) 1839 { 1840 int i; 1841 struct ipr_hostrcb_type_04_error *error; 1842 struct ipr_hostrcb_array_data_entry *array_entry; 1843 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1844 1845 error = &hostrcb->hcam.u.error.u.type_04_error; 1846 1847 ipr_err_separator; 1848 1849 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1850 error->protection_level, 1851 ioa_cfg->host->host_no, 1852 error->last_func_vset_res_addr.bus, 1853 error->last_func_vset_res_addr.target, 1854 error->last_func_vset_res_addr.lun); 1855 1856 ipr_err_separator; 1857 1858 array_entry = error->array_member; 1859 1860 for (i = 0; i < 18; i++) { 1861 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1862 continue; 1863 1864 if (be32_to_cpu(error->exposed_mode_adn) == i) 1865 ipr_err("Exposed Array Member %d:\n", i); 1866 else 1867 ipr_err("Array Member %d:\n", i); 1868 1869 ipr_log_vpd(&array_entry->vpd); 1870 1871 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1872 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1873 "Expected Location"); 1874 1875 ipr_err_separator; 1876 1877 if (i == 9) 1878 array_entry = error->array_member2; 1879 else 1880 array_entry++; 1881 } 1882 } 1883 1884 /** 1885 * ipr_log_hex_data - Log additional hex IOA error data. 1886 * @ioa_cfg: ioa config struct 1887 * @data: IOA error data 1888 * @len: data length 1889 * 1890 * Return value: 1891 * none 1892 **/ 1893 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len) 1894 { 1895 int i; 1896 1897 if (len == 0) 1898 return; 1899 1900 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 1901 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); 1902 1903 for (i = 0; i < len / 4; i += 4) { 1904 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1905 be32_to_cpu(data[i]), 1906 be32_to_cpu(data[i+1]), 1907 be32_to_cpu(data[i+2]), 1908 be32_to_cpu(data[i+3])); 1909 } 1910 } 1911 1912 /** 1913 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. 1914 * @ioa_cfg: ioa config struct 1915 * @hostrcb: hostrcb struct 1916 * 1917 * Return value: 1918 * none 1919 **/ 1920 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1921 struct ipr_hostrcb *hostrcb) 1922 { 1923 struct ipr_hostrcb_type_17_error *error; 1924 1925 if (ioa_cfg->sis64) 1926 error = &hostrcb->hcam.u.error64.u.type_17_error; 1927 else 1928 error = &hostrcb->hcam.u.error.u.type_17_error; 1929 1930 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1931 strim(error->failure_reason); 1932 1933 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1934 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1935 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1936 ipr_log_hex_data(ioa_cfg, error->data, 1937 be32_to_cpu(hostrcb->hcam.length) - 1938 (offsetof(struct ipr_hostrcb_error, u) + 1939 offsetof(struct ipr_hostrcb_type_17_error, data))); 1940 } 1941 1942 /** 1943 * ipr_log_dual_ioa_error - Log a dual adapter error. 1944 * @ioa_cfg: ioa config struct 1945 * @hostrcb: hostrcb struct 1946 * 1947 * Return value: 1948 * none 1949 **/ 1950 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1951 struct ipr_hostrcb *hostrcb) 1952 { 1953 struct ipr_hostrcb_type_07_error *error; 1954 1955 error = &hostrcb->hcam.u.error.u.type_07_error; 1956 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1957 strim(error->failure_reason); 1958 1959 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1960 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1961 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1962 ipr_log_hex_data(ioa_cfg, error->data, 1963 be32_to_cpu(hostrcb->hcam.length) - 1964 (offsetof(struct ipr_hostrcb_error, u) + 1965 offsetof(struct ipr_hostrcb_type_07_error, data))); 1966 } 1967 1968 static const struct { 1969 u8 active; 1970 char *desc; 1971 } path_active_desc[] = { 1972 { IPR_PATH_NO_INFO, "Path" }, 1973 { IPR_PATH_ACTIVE, "Active path" }, 1974 { IPR_PATH_NOT_ACTIVE, "Inactive path" } 1975 }; 1976 1977 static const struct { 1978 u8 state; 1979 char *desc; 1980 } path_state_desc[] = { 1981 { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, 1982 { IPR_PATH_HEALTHY, "is healthy" }, 1983 { IPR_PATH_DEGRADED, "is degraded" }, 1984 { IPR_PATH_FAILED, "is failed" } 1985 }; 1986 1987 /** 1988 * ipr_log_fabric_path - Log a fabric path error 1989 * @hostrcb: hostrcb struct 1990 * @fabric: fabric descriptor 1991 * 1992 * Return value: 1993 * none 1994 **/ 1995 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, 1996 struct ipr_hostrcb_fabric_desc *fabric) 1997 { 1998 int i, j; 1999 u8 path_state = fabric->path_state; 2000 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2001 u8 state = path_state & IPR_PATH_STATE_MASK; 2002 2003 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2004 if (path_active_desc[i].active != active) 2005 continue; 2006 2007 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2008 if (path_state_desc[j].state != state) 2009 continue; 2010 2011 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { 2012 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", 2013 path_active_desc[i].desc, path_state_desc[j].desc, 2014 fabric->ioa_port); 2015 } else if (fabric->cascaded_expander == 0xff) { 2016 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", 2017 path_active_desc[i].desc, path_state_desc[j].desc, 2018 fabric->ioa_port, fabric->phy); 2019 } else if (fabric->phy == 0xff) { 2020 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", 2021 path_active_desc[i].desc, path_state_desc[j].desc, 2022 fabric->ioa_port, fabric->cascaded_expander); 2023 } else { 2024 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", 2025 path_active_desc[i].desc, path_state_desc[j].desc, 2026 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2027 } 2028 return; 2029 } 2030 } 2031 2032 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, 2033 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2034 } 2035 2036 /** 2037 * ipr_log64_fabric_path - Log a fabric path error 2038 * @hostrcb: hostrcb struct 2039 * @fabric: fabric descriptor 2040 * 2041 * Return value: 2042 * none 2043 **/ 2044 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, 2045 struct ipr_hostrcb64_fabric_desc *fabric) 2046 { 2047 int i, j; 2048 u8 path_state = fabric->path_state; 2049 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2050 u8 state = path_state & IPR_PATH_STATE_MASK; 2051 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2052 2053 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2054 if (path_active_desc[i].active != active) 2055 continue; 2056 2057 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2058 if (path_state_desc[j].state != state) 2059 continue; 2060 2061 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 2062 path_active_desc[i].desc, path_state_desc[j].desc, 2063 ipr_format_res_path(hostrcb->ioa_cfg, 2064 fabric->res_path, 2065 buffer, sizeof(buffer))); 2066 return; 2067 } 2068 } 2069 2070 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 2071 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, 2072 buffer, sizeof(buffer))); 2073 } 2074 2075 static const struct { 2076 u8 type; 2077 char *desc; 2078 } path_type_desc[] = { 2079 { IPR_PATH_CFG_IOA_PORT, "IOA port" }, 2080 { IPR_PATH_CFG_EXP_PORT, "Expander port" }, 2081 { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, 2082 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } 2083 }; 2084 2085 static const struct { 2086 u8 status; 2087 char *desc; 2088 } path_status_desc[] = { 2089 { IPR_PATH_CFG_NO_PROB, "Functional" }, 2090 { IPR_PATH_CFG_DEGRADED, "Degraded" }, 2091 { IPR_PATH_CFG_FAILED, "Failed" }, 2092 { IPR_PATH_CFG_SUSPECT, "Suspect" }, 2093 { IPR_PATH_NOT_DETECTED, "Missing" }, 2094 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } 2095 }; 2096 2097 static const char *link_rate[] = { 2098 "unknown", 2099 "disabled", 2100 "phy reset problem", 2101 "spinup hold", 2102 "port selector", 2103 "unknown", 2104 "unknown", 2105 "unknown", 2106 "1.5Gbps", 2107 "3.0Gbps", 2108 "unknown", 2109 "unknown", 2110 "unknown", 2111 "unknown", 2112 "unknown", 2113 "unknown" 2114 }; 2115 2116 /** 2117 * ipr_log_path_elem - Log a fabric path element. 2118 * @hostrcb: hostrcb struct 2119 * @cfg: fabric path element struct 2120 * 2121 * Return value: 2122 * none 2123 **/ 2124 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, 2125 struct ipr_hostrcb_config_element *cfg) 2126 { 2127 int i, j; 2128 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2129 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2130 2131 if (type == IPR_PATH_CFG_NOT_EXIST) 2132 return; 2133 2134 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2135 if (path_type_desc[i].type != type) 2136 continue; 2137 2138 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2139 if (path_status_desc[j].status != status) 2140 continue; 2141 2142 if (type == IPR_PATH_CFG_IOA_PORT) { 2143 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", 2144 path_status_desc[j].desc, path_type_desc[i].desc, 2145 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2146 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2147 } else { 2148 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { 2149 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", 2150 path_status_desc[j].desc, path_type_desc[i].desc, 2151 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2152 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2153 } else if (cfg->cascaded_expander == 0xff) { 2154 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " 2155 "WWN=%08X%08X\n", path_status_desc[j].desc, 2156 path_type_desc[i].desc, cfg->phy, 2157 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2158 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2159 } else if (cfg->phy == 0xff) { 2160 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " 2161 "WWN=%08X%08X\n", path_status_desc[j].desc, 2162 path_type_desc[i].desc, cfg->cascaded_expander, 2163 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2164 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2165 } else { 2166 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " 2167 "WWN=%08X%08X\n", path_status_desc[j].desc, 2168 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, 2169 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2170 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2171 } 2172 } 2173 return; 2174 } 2175 } 2176 2177 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " 2178 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, 2179 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2180 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2181 } 2182 2183 /** 2184 * ipr_log64_path_elem - Log a fabric path element. 2185 * @hostrcb: hostrcb struct 2186 * @cfg: fabric path element struct 2187 * 2188 * Return value: 2189 * none 2190 **/ 2191 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, 2192 struct ipr_hostrcb64_config_element *cfg) 2193 { 2194 int i, j; 2195 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; 2196 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2197 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2198 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2199 2200 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) 2201 return; 2202 2203 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2204 if (path_type_desc[i].type != type) 2205 continue; 2206 2207 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2208 if (path_status_desc[j].status != status) 2209 continue; 2210 2211 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2212 path_status_desc[j].desc, path_type_desc[i].desc, 2213 ipr_format_res_path(hostrcb->ioa_cfg, 2214 cfg->res_path, buffer, sizeof(buffer)), 2215 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2216 be32_to_cpu(cfg->wwid[0]), 2217 be32_to_cpu(cfg->wwid[1])); 2218 return; 2219 } 2220 } 2221 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2222 "WWN=%08X%08X\n", cfg->type_status, 2223 ipr_format_res_path(hostrcb->ioa_cfg, 2224 cfg->res_path, buffer, sizeof(buffer)), 2225 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2226 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2227 } 2228 2229 /** 2230 * ipr_log_fabric_error - Log a fabric error. 2231 * @ioa_cfg: ioa config struct 2232 * @hostrcb: hostrcb struct 2233 * 2234 * Return value: 2235 * none 2236 **/ 2237 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2238 struct ipr_hostrcb *hostrcb) 2239 { 2240 struct ipr_hostrcb_type_20_error *error; 2241 struct ipr_hostrcb_fabric_desc *fabric; 2242 struct ipr_hostrcb_config_element *cfg; 2243 int i, add_len; 2244 2245 error = &hostrcb->hcam.u.error.u.type_20_error; 2246 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2247 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2248 2249 add_len = be32_to_cpu(hostrcb->hcam.length) - 2250 (offsetof(struct ipr_hostrcb_error, u) + 2251 offsetof(struct ipr_hostrcb_type_20_error, desc)); 2252 2253 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2254 ipr_log_fabric_path(hostrcb, fabric); 2255 for_each_fabric_cfg(fabric, cfg) 2256 ipr_log_path_elem(hostrcb, cfg); 2257 2258 add_len -= be16_to_cpu(fabric->length); 2259 fabric = (struct ipr_hostrcb_fabric_desc *) 2260 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2261 } 2262 2263 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 2264 } 2265 2266 /** 2267 * ipr_log_sis64_array_error - Log a sis64 array error. 2268 * @ioa_cfg: ioa config struct 2269 * @hostrcb: hostrcb struct 2270 * 2271 * Return value: 2272 * none 2273 **/ 2274 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, 2275 struct ipr_hostrcb *hostrcb) 2276 { 2277 int i, num_entries; 2278 struct ipr_hostrcb_type_24_error *error; 2279 struct ipr_hostrcb64_array_data_entry *array_entry; 2280 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2281 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 2282 2283 error = &hostrcb->hcam.u.error64.u.type_24_error; 2284 2285 ipr_err_separator; 2286 2287 ipr_err("RAID %s Array Configuration: %s\n", 2288 error->protection_level, 2289 ipr_format_res_path(ioa_cfg, error->last_res_path, 2290 buffer, sizeof(buffer))); 2291 2292 ipr_err_separator; 2293 2294 array_entry = error->array_member; 2295 num_entries = min_t(u32, error->num_entries, 2296 ARRAY_SIZE(error->array_member)); 2297 2298 for (i = 0; i < num_entries; i++, array_entry++) { 2299 2300 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 2301 continue; 2302 2303 if (error->exposed_mode_adn == i) 2304 ipr_err("Exposed Array Member %d:\n", i); 2305 else 2306 ipr_err("Array Member %d:\n", i); 2307 2308 ipr_err("Array Member %d:\n", i); 2309 ipr_log_ext_vpd(&array_entry->vpd); 2310 ipr_err("Current Location: %s\n", 2311 ipr_format_res_path(ioa_cfg, array_entry->res_path, 2312 buffer, sizeof(buffer))); 2313 ipr_err("Expected Location: %s\n", 2314 ipr_format_res_path(ioa_cfg, 2315 array_entry->expected_res_path, 2316 buffer, sizeof(buffer))); 2317 2318 ipr_err_separator; 2319 } 2320 } 2321 2322 /** 2323 * ipr_log_sis64_fabric_error - Log a sis64 fabric error. 2324 * @ioa_cfg: ioa config struct 2325 * @hostrcb: hostrcb struct 2326 * 2327 * Return value: 2328 * none 2329 **/ 2330 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2331 struct ipr_hostrcb *hostrcb) 2332 { 2333 struct ipr_hostrcb_type_30_error *error; 2334 struct ipr_hostrcb64_fabric_desc *fabric; 2335 struct ipr_hostrcb64_config_element *cfg; 2336 int i, add_len; 2337 2338 error = &hostrcb->hcam.u.error64.u.type_30_error; 2339 2340 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2341 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2342 2343 add_len = be32_to_cpu(hostrcb->hcam.length) - 2344 (offsetof(struct ipr_hostrcb64_error, u) + 2345 offsetof(struct ipr_hostrcb_type_30_error, desc)); 2346 2347 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2348 ipr_log64_fabric_path(hostrcb, fabric); 2349 for_each_fabric_cfg(fabric, cfg) 2350 ipr_log64_path_elem(hostrcb, cfg); 2351 2352 add_len -= be16_to_cpu(fabric->length); 2353 fabric = (struct ipr_hostrcb64_fabric_desc *) 2354 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2355 } 2356 2357 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 2358 } 2359 2360 /** 2361 * ipr_log_generic_error - Log an adapter error. 2362 * @ioa_cfg: ioa config struct 2363 * @hostrcb: hostrcb struct 2364 * 2365 * Return value: 2366 * none 2367 **/ 2368 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 2369 struct ipr_hostrcb *hostrcb) 2370 { 2371 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, 2372 be32_to_cpu(hostrcb->hcam.length)); 2373 } 2374 2375 /** 2376 * ipr_log_sis64_device_error - Log a cache error. 2377 * @ioa_cfg: ioa config struct 2378 * @hostrcb: hostrcb struct 2379 * 2380 * Return value: 2381 * none 2382 **/ 2383 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, 2384 struct ipr_hostrcb *hostrcb) 2385 { 2386 struct ipr_hostrcb_type_21_error *error; 2387 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2388 2389 error = &hostrcb->hcam.u.error64.u.type_21_error; 2390 2391 ipr_err("-----Failing Device Information-----\n"); 2392 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", 2393 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), 2394 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); 2395 ipr_err("Device Resource Path: %s\n", 2396 __ipr_format_res_path(error->res_path, 2397 buffer, sizeof(buffer))); 2398 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; 2399 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; 2400 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); 2401 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); 2402 ipr_err("SCSI Sense Data:\n"); 2403 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); 2404 ipr_err("SCSI Command Descriptor Block: \n"); 2405 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); 2406 2407 ipr_err("Additional IOA Data:\n"); 2408 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); 2409 } 2410 2411 /** 2412 * ipr_get_error - Find the specfied IOASC in the ipr_error_table. 2413 * @ioasc: IOASC 2414 * 2415 * This function will return the index of into the ipr_error_table 2416 * for the specified IOASC. If the IOASC is not in the table, 2417 * 0 will be returned, which points to the entry used for unknown errors. 2418 * 2419 * Return value: 2420 * index into the ipr_error_table 2421 **/ 2422 static u32 ipr_get_error(u32 ioasc) 2423 { 2424 int i; 2425 2426 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 2427 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) 2428 return i; 2429 2430 return 0; 2431 } 2432 2433 /** 2434 * ipr_handle_log_data - Log an adapter error. 2435 * @ioa_cfg: ioa config struct 2436 * @hostrcb: hostrcb struct 2437 * 2438 * This function logs an adapter error to the system. 2439 * 2440 * Return value: 2441 * none 2442 **/ 2443 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, 2444 struct ipr_hostrcb *hostrcb) 2445 { 2446 u32 ioasc; 2447 int error_index; 2448 struct ipr_hostrcb_type_21_error *error; 2449 2450 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) 2451 return; 2452 2453 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2454 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2455 2456 if (ioa_cfg->sis64) 2457 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2458 else 2459 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2460 2461 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || 2462 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { 2463 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2464 scsi_report_bus_reset(ioa_cfg->host, 2465 hostrcb->hcam.u.error.fd_res_addr.bus); 2466 } 2467 2468 error_index = ipr_get_error(ioasc); 2469 2470 if (!ipr_error_table[error_index].log_hcam) 2471 return; 2472 2473 if (ioasc == IPR_IOASC_HW_CMD_FAILED && 2474 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { 2475 error = &hostrcb->hcam.u.error64.u.type_21_error; 2476 2477 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && 2478 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 2479 return; 2480 } 2481 2482 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); 2483 2484 /* Set indication we have logged an error */ 2485 ioa_cfg->errors_logged++; 2486 2487 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) 2488 return; 2489 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 2490 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 2491 2492 switch (hostrcb->hcam.overlay_id) { 2493 case IPR_HOST_RCB_OVERLAY_ID_2: 2494 ipr_log_cache_error(ioa_cfg, hostrcb); 2495 break; 2496 case IPR_HOST_RCB_OVERLAY_ID_3: 2497 ipr_log_config_error(ioa_cfg, hostrcb); 2498 break; 2499 case IPR_HOST_RCB_OVERLAY_ID_4: 2500 case IPR_HOST_RCB_OVERLAY_ID_6: 2501 ipr_log_array_error(ioa_cfg, hostrcb); 2502 break; 2503 case IPR_HOST_RCB_OVERLAY_ID_7: 2504 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); 2505 break; 2506 case IPR_HOST_RCB_OVERLAY_ID_12: 2507 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); 2508 break; 2509 case IPR_HOST_RCB_OVERLAY_ID_13: 2510 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); 2511 break; 2512 case IPR_HOST_RCB_OVERLAY_ID_14: 2513 case IPR_HOST_RCB_OVERLAY_ID_16: 2514 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); 2515 break; 2516 case IPR_HOST_RCB_OVERLAY_ID_17: 2517 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 2518 break; 2519 case IPR_HOST_RCB_OVERLAY_ID_20: 2520 ipr_log_fabric_error(ioa_cfg, hostrcb); 2521 break; 2522 case IPR_HOST_RCB_OVERLAY_ID_21: 2523 ipr_log_sis64_device_error(ioa_cfg, hostrcb); 2524 break; 2525 case IPR_HOST_RCB_OVERLAY_ID_23: 2526 ipr_log_sis64_config_error(ioa_cfg, hostrcb); 2527 break; 2528 case IPR_HOST_RCB_OVERLAY_ID_24: 2529 case IPR_HOST_RCB_OVERLAY_ID_26: 2530 ipr_log_sis64_array_error(ioa_cfg, hostrcb); 2531 break; 2532 case IPR_HOST_RCB_OVERLAY_ID_30: 2533 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); 2534 break; 2535 case IPR_HOST_RCB_OVERLAY_ID_1: 2536 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2537 default: 2538 ipr_log_generic_error(ioa_cfg, hostrcb); 2539 break; 2540 } 2541 } 2542 2543 /** 2544 * ipr_process_error - Op done function for an adapter error log. 2545 * @ipr_cmd: ipr command struct 2546 * 2547 * This function is the op done function for an error log host 2548 * controlled async from the adapter. It will log the error and 2549 * send the HCAM back to the adapter. 2550 * 2551 * Return value: 2552 * none 2553 **/ 2554 static void ipr_process_error(struct ipr_cmnd *ipr_cmd) 2555 { 2556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2557 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2558 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2559 u32 fd_ioasc; 2560 2561 if (ioa_cfg->sis64) 2562 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2563 else 2564 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2565 2566 list_del(&hostrcb->queue); 2567 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 2568 2569 if (!ioasc) { 2570 ipr_handle_log_data(ioa_cfg, hostrcb); 2571 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) 2572 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 2573 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && 2574 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { 2575 dev_err(&ioa_cfg->pdev->dev, 2576 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 2577 } 2578 2579 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2580 } 2581 2582 /** 2583 * ipr_timeout - An internally generated op has timed out. 2584 * @ipr_cmd: ipr command struct 2585 * 2586 * This function blocks host requests and initiates an 2587 * adapter reset. 2588 * 2589 * Return value: 2590 * none 2591 **/ 2592 static void ipr_timeout(struct ipr_cmnd *ipr_cmd) 2593 { 2594 unsigned long lock_flags = 0; 2595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2596 2597 ENTER; 2598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2599 2600 ioa_cfg->errors_logged++; 2601 dev_err(&ioa_cfg->pdev->dev, 2602 "Adapter being reset due to command timeout.\n"); 2603 2604 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2605 ioa_cfg->sdt_state = GET_DUMP; 2606 2607 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) 2608 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2609 2610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2611 LEAVE; 2612 } 2613 2614 /** 2615 * ipr_oper_timeout - Adapter timed out transitioning to operational 2616 * @ipr_cmd: ipr command struct 2617 * 2618 * This function blocks host requests and initiates an 2619 * adapter reset. 2620 * 2621 * Return value: 2622 * none 2623 **/ 2624 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) 2625 { 2626 unsigned long lock_flags = 0; 2627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2628 2629 ENTER; 2630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2631 2632 ioa_cfg->errors_logged++; 2633 dev_err(&ioa_cfg->pdev->dev, 2634 "Adapter timed out transitioning to operational.\n"); 2635 2636 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2637 ioa_cfg->sdt_state = GET_DUMP; 2638 2639 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { 2640 if (ipr_fastfail) 2641 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 2642 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2643 } 2644 2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2646 LEAVE; 2647 } 2648 2649 /** 2650 * ipr_find_ses_entry - Find matching SES in SES table 2651 * @res: resource entry struct of SES 2652 * 2653 * Return value: 2654 * pointer to SES table entry / NULL on failure 2655 **/ 2656 static const struct ipr_ses_table_entry * 2657 ipr_find_ses_entry(struct ipr_resource_entry *res) 2658 { 2659 int i, j, matches; 2660 struct ipr_std_inq_vpids *vpids; 2661 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2662 2663 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2664 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2665 if (ste->compare_product_id_byte[j] == 'X') { 2666 vpids = &res->std_inq_data.vpids; 2667 if (vpids->product_id[j] == ste->product_id[j]) 2668 matches++; 2669 else 2670 break; 2671 } else 2672 matches++; 2673 } 2674 2675 if (matches == IPR_PROD_ID_LEN) 2676 return ste; 2677 } 2678 2679 return NULL; 2680 } 2681 2682 /** 2683 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus 2684 * @ioa_cfg: ioa config struct 2685 * @bus: SCSI bus 2686 * @bus_width: bus width 2687 * 2688 * Return value: 2689 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz 2690 * For a 2-byte wide SCSI bus, the maximum transfer speed is 2691 * twice the maximum transfer rate (e.g. for a wide enabled bus, 2692 * max 160MHz = max 320MB/sec). 2693 **/ 2694 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) 2695 { 2696 struct ipr_resource_entry *res; 2697 const struct ipr_ses_table_entry *ste; 2698 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); 2699 2700 /* Loop through each config table entry in the config table buffer */ 2701 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2702 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) 2703 continue; 2704 2705 if (bus != res->bus) 2706 continue; 2707 2708 if (!(ste = ipr_find_ses_entry(res))) 2709 continue; 2710 2711 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); 2712 } 2713 2714 return max_xfer_rate; 2715 } 2716 2717 /** 2718 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA 2719 * @ioa_cfg: ioa config struct 2720 * @max_delay: max delay in micro-seconds to wait 2721 * 2722 * Waits for an IODEBUG ACK from the IOA, doing busy looping. 2723 * 2724 * Return value: 2725 * 0 on success / other on failure 2726 **/ 2727 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) 2728 { 2729 volatile u32 pcii_reg; 2730 int delay = 1; 2731 2732 /* Read interrupt reg until IOA signals IO Debug Acknowledge */ 2733 while (delay < max_delay) { 2734 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 2735 2736 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) 2737 return 0; 2738 2739 /* udelay cannot be used if delay is more than a few milliseconds */ 2740 if ((delay / 1000) > MAX_UDELAY_MS) 2741 mdelay(delay / 1000); 2742 else 2743 udelay(delay); 2744 2745 delay += delay; 2746 } 2747 return -EIO; 2748 } 2749 2750 /** 2751 * ipr_get_sis64_dump_data_section - Dump IOA memory 2752 * @ioa_cfg: ioa config struct 2753 * @start_addr: adapter address to dump 2754 * @dest: destination kernel buffer 2755 * @length_in_words: length to dump in 4 byte words 2756 * 2757 * Return value: 2758 * 0 on success 2759 **/ 2760 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2761 u32 start_addr, 2762 __be32 *dest, u32 length_in_words) 2763 { 2764 int i; 2765 2766 for (i = 0; i < length_in_words; i++) { 2767 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); 2768 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); 2769 dest++; 2770 } 2771 2772 return 0; 2773 } 2774 2775 /** 2776 * ipr_get_ldump_data_section - Dump IOA memory 2777 * @ioa_cfg: ioa config struct 2778 * @start_addr: adapter address to dump 2779 * @dest: destination kernel buffer 2780 * @length_in_words: length to dump in 4 byte words 2781 * 2782 * Return value: 2783 * 0 on success / -EIO on failure 2784 **/ 2785 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2786 u32 start_addr, 2787 __be32 *dest, u32 length_in_words) 2788 { 2789 volatile u32 temp_pcii_reg; 2790 int i, delay = 0; 2791 2792 if (ioa_cfg->sis64) 2793 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, 2794 dest, length_in_words); 2795 2796 /* Write IOA interrupt reg starting LDUMP state */ 2797 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2798 ioa_cfg->regs.set_uproc_interrupt_reg32); 2799 2800 /* Wait for IO debug acknowledge */ 2801 if (ipr_wait_iodbg_ack(ioa_cfg, 2802 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { 2803 dev_err(&ioa_cfg->pdev->dev, 2804 "IOA dump long data transfer timeout\n"); 2805 return -EIO; 2806 } 2807 2808 /* Signal LDUMP interlocked - clear IO debug ack */ 2809 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2810 ioa_cfg->regs.clr_interrupt_reg); 2811 2812 /* Write Mailbox with starting address */ 2813 writel(start_addr, ioa_cfg->ioa_mailbox); 2814 2815 /* Signal address valid - clear IOA Reset alert */ 2816 writel(IPR_UPROCI_RESET_ALERT, 2817 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2818 2819 for (i = 0; i < length_in_words; i++) { 2820 /* Wait for IO debug acknowledge */ 2821 if (ipr_wait_iodbg_ack(ioa_cfg, 2822 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { 2823 dev_err(&ioa_cfg->pdev->dev, 2824 "IOA dump short data transfer timeout\n"); 2825 return -EIO; 2826 } 2827 2828 /* Read data from mailbox and increment destination pointer */ 2829 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); 2830 dest++; 2831 2832 /* For all but the last word of data, signal data received */ 2833 if (i < (length_in_words - 1)) { 2834 /* Signal dump data received - Clear IO debug Ack */ 2835 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2836 ioa_cfg->regs.clr_interrupt_reg); 2837 } 2838 } 2839 2840 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2841 writel(IPR_UPROCI_RESET_ALERT, 2842 ioa_cfg->regs.set_uproc_interrupt_reg32); 2843 2844 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2845 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2846 2847 /* Signal dump data received - Clear IO debug Ack */ 2848 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2849 ioa_cfg->regs.clr_interrupt_reg); 2850 2851 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2852 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2853 temp_pcii_reg = 2854 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 2855 2856 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2857 return 0; 2858 2859 udelay(10); 2860 delay += 10; 2861 } 2862 2863 return 0; 2864 } 2865 2866 #ifdef CONFIG_SCSI_IPR_DUMP 2867 /** 2868 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer 2869 * @ioa_cfg: ioa config struct 2870 * @pci_address: adapter address 2871 * @length: length of data to copy 2872 * 2873 * Copy data from PCI adapter to kernel buffer. 2874 * Note: length MUST be a 4 byte multiple 2875 * Return value: 2876 * 0 on success / other on failure 2877 **/ 2878 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, 2879 unsigned long pci_address, u32 length) 2880 { 2881 int bytes_copied = 0; 2882 int cur_len, rc, rem_len, rem_page_len, max_dump_size; 2883 __be32 *page; 2884 unsigned long lock_flags = 0; 2885 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2886 2887 if (ioa_cfg->sis64) 2888 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 2889 else 2890 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 2891 2892 while (bytes_copied < length && 2893 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { 2894 if (ioa_dump->page_offset >= PAGE_SIZE || 2895 ioa_dump->page_offset == 0) { 2896 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2897 2898 if (!page) { 2899 ipr_trace; 2900 return bytes_copied; 2901 } 2902 2903 ioa_dump->page_offset = 0; 2904 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; 2905 ioa_dump->next_page_index++; 2906 } else 2907 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; 2908 2909 rem_len = length - bytes_copied; 2910 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; 2911 cur_len = min(rem_len, rem_page_len); 2912 2913 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2914 if (ioa_cfg->sdt_state == ABORT_DUMP) { 2915 rc = -EIO; 2916 } else { 2917 rc = ipr_get_ldump_data_section(ioa_cfg, 2918 pci_address + bytes_copied, 2919 &page[ioa_dump->page_offset / 4], 2920 (cur_len / sizeof(u32))); 2921 } 2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2923 2924 if (!rc) { 2925 ioa_dump->page_offset += cur_len; 2926 bytes_copied += cur_len; 2927 } else { 2928 ipr_trace; 2929 break; 2930 } 2931 schedule(); 2932 } 2933 2934 return bytes_copied; 2935 } 2936 2937 /** 2938 * ipr_init_dump_entry_hdr - Initialize a dump entry header. 2939 * @hdr: dump entry header struct 2940 * 2941 * Return value: 2942 * nothing 2943 **/ 2944 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) 2945 { 2946 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; 2947 hdr->num_elems = 1; 2948 hdr->offset = sizeof(*hdr); 2949 hdr->status = IPR_DUMP_STATUS_SUCCESS; 2950 } 2951 2952 /** 2953 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. 2954 * @ioa_cfg: ioa config struct 2955 * @driver_dump: driver dump struct 2956 * 2957 * Return value: 2958 * nothing 2959 **/ 2960 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, 2961 struct ipr_driver_dump *driver_dump) 2962 { 2963 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 2964 2965 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); 2966 driver_dump->ioa_type_entry.hdr.len = 2967 sizeof(struct ipr_dump_ioa_type_entry) - 2968 sizeof(struct ipr_dump_entry_header); 2969 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2970 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; 2971 driver_dump->ioa_type_entry.type = ioa_cfg->type; 2972 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | 2973 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | 2974 ucode_vpd->minor_release[1]; 2975 driver_dump->hdr.num_entries++; 2976 } 2977 2978 /** 2979 * ipr_dump_version_data - Fill in the driver version in the dump. 2980 * @ioa_cfg: ioa config struct 2981 * @driver_dump: driver dump struct 2982 * 2983 * Return value: 2984 * nothing 2985 **/ 2986 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, 2987 struct ipr_driver_dump *driver_dump) 2988 { 2989 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); 2990 driver_dump->version_entry.hdr.len = 2991 sizeof(struct ipr_dump_version_entry) - 2992 sizeof(struct ipr_dump_entry_header); 2993 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 2994 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; 2995 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); 2996 driver_dump->hdr.num_entries++; 2997 } 2998 2999 /** 3000 * ipr_dump_trace_data - Fill in the IOA trace in the dump. 3001 * @ioa_cfg: ioa config struct 3002 * @driver_dump: driver dump struct 3003 * 3004 * Return value: 3005 * nothing 3006 **/ 3007 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, 3008 struct ipr_driver_dump *driver_dump) 3009 { 3010 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); 3011 driver_dump->trace_entry.hdr.len = 3012 sizeof(struct ipr_dump_trace_entry) - 3013 sizeof(struct ipr_dump_entry_header); 3014 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3015 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; 3016 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); 3017 driver_dump->hdr.num_entries++; 3018 } 3019 3020 /** 3021 * ipr_dump_location_data - Fill in the IOA location in the dump. 3022 * @ioa_cfg: ioa config struct 3023 * @driver_dump: driver dump struct 3024 * 3025 * Return value: 3026 * nothing 3027 **/ 3028 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, 3029 struct ipr_driver_dump *driver_dump) 3030 { 3031 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); 3032 driver_dump->location_entry.hdr.len = 3033 sizeof(struct ipr_dump_location_entry) - 3034 sizeof(struct ipr_dump_entry_header); 3035 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3036 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 3037 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); 3038 driver_dump->hdr.num_entries++; 3039 } 3040 3041 /** 3042 * ipr_get_ioa_dump - Perform a dump of the driver and adapter. 3043 * @ioa_cfg: ioa config struct 3044 * @dump: dump struct 3045 * 3046 * Return value: 3047 * nothing 3048 **/ 3049 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) 3050 { 3051 unsigned long start_addr, sdt_word; 3052 unsigned long lock_flags = 0; 3053 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 3054 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 3055 u32 num_entries, max_num_entries, start_off, end_off; 3056 u32 max_dump_size, bytes_to_copy, bytes_copied, rc; 3057 struct ipr_sdt *sdt; 3058 int valid = 1; 3059 int i; 3060 3061 ENTER; 3062 3063 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3064 3065 if (ioa_cfg->sdt_state != READ_DUMP) { 3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3067 return; 3068 } 3069 3070 if (ioa_cfg->sis64) { 3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3072 ssleep(IPR_DUMP_DELAY_SECONDS); 3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3074 } 3075 3076 start_addr = readl(ioa_cfg->ioa_mailbox); 3077 3078 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 3079 dev_err(&ioa_cfg->pdev->dev, 3080 "Invalid dump table format: %lx\n", start_addr); 3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3082 return; 3083 } 3084 3085 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); 3086 3087 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; 3088 3089 /* Initialize the overall dump header */ 3090 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); 3091 driver_dump->hdr.num_entries = 1; 3092 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); 3093 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; 3094 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; 3095 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; 3096 3097 ipr_dump_version_data(ioa_cfg, driver_dump); 3098 ipr_dump_location_data(ioa_cfg, driver_dump); 3099 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); 3100 ipr_dump_trace_data(ioa_cfg, driver_dump); 3101 3102 /* Update dump_header */ 3103 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); 3104 3105 /* IOA Dump entry */ 3106 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 3107 ioa_dump->hdr.len = 0; 3108 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3109 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 3110 3111 /* First entries in sdt are actually a list of dump addresses and 3112 lengths to gather the real dump data. sdt represents the pointer 3113 to the ioa generated dump table. Dump data will be extracted based 3114 on entries in this table */ 3115 sdt = &ioa_dump->sdt; 3116 3117 if (ioa_cfg->sis64) { 3118 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; 3119 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 3120 } else { 3121 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; 3122 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 3123 } 3124 3125 bytes_to_copy = offsetof(struct ipr_sdt, entry) + 3126 (max_num_entries * sizeof(struct ipr_sdt_entry)); 3127 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 3128 bytes_to_copy / sizeof(__be32)); 3129 3130 /* Smart Dump table is ready to use and the first entry is valid */ 3131 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 3132 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 3133 dev_err(&ioa_cfg->pdev->dev, 3134 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 3135 rc, be32_to_cpu(sdt->hdr.state)); 3136 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; 3137 ioa_cfg->sdt_state = DUMP_OBTAINED; 3138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3139 return; 3140 } 3141 3142 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 3143 3144 if (num_entries > max_num_entries) 3145 num_entries = max_num_entries; 3146 3147 /* Update dump length to the actual data to be copied */ 3148 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); 3149 if (ioa_cfg->sis64) 3150 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); 3151 else 3152 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); 3153 3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3155 3156 for (i = 0; i < num_entries; i++) { 3157 if (ioa_dump->hdr.len > max_dump_size) { 3158 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3159 break; 3160 } 3161 3162 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 3163 sdt_word = be32_to_cpu(sdt->entry[i].start_token); 3164 if (ioa_cfg->sis64) 3165 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); 3166 else { 3167 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 3168 end_off = be32_to_cpu(sdt->entry[i].end_token); 3169 3170 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) 3171 bytes_to_copy = end_off - start_off; 3172 else 3173 valid = 0; 3174 } 3175 if (valid) { 3176 if (bytes_to_copy > max_dump_size) { 3177 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3178 continue; 3179 } 3180 3181 /* Copy data from adapter to driver buffers */ 3182 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, 3183 bytes_to_copy); 3184 3185 ioa_dump->hdr.len += bytes_copied; 3186 3187 if (bytes_copied != bytes_to_copy) { 3188 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3189 break; 3190 } 3191 } 3192 } 3193 } 3194 3195 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); 3196 3197 /* Update dump_header */ 3198 driver_dump->hdr.len += ioa_dump->hdr.len; 3199 wmb(); 3200 ioa_cfg->sdt_state = DUMP_OBTAINED; 3201 LEAVE; 3202 } 3203 3204 #else 3205 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) 3206 #endif 3207 3208 /** 3209 * ipr_release_dump - Free adapter dump memory 3210 * @kref: kref struct 3211 * 3212 * Return value: 3213 * nothing 3214 **/ 3215 static void ipr_release_dump(struct kref *kref) 3216 { 3217 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); 3218 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; 3219 unsigned long lock_flags = 0; 3220 int i; 3221 3222 ENTER; 3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3224 ioa_cfg->dump = NULL; 3225 ioa_cfg->sdt_state = INACTIVE; 3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3227 3228 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3229 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3230 3231 vfree(dump->ioa_dump.ioa_data); 3232 kfree(dump); 3233 LEAVE; 3234 } 3235 3236 /** 3237 * ipr_worker_thread - Worker thread 3238 * @work: ioa config struct 3239 * 3240 * Called at task level from a work thread. This function takes care 3241 * of adding and removing device from the mid-layer as configuration 3242 * changes are detected by the adapter. 3243 * 3244 * Return value: 3245 * nothing 3246 **/ 3247 static void ipr_worker_thread(struct work_struct *work) 3248 { 3249 unsigned long lock_flags; 3250 struct ipr_resource_entry *res; 3251 struct scsi_device *sdev; 3252 struct ipr_dump *dump; 3253 struct ipr_ioa_cfg *ioa_cfg = 3254 container_of(work, struct ipr_ioa_cfg, work_q); 3255 u8 bus, target, lun; 3256 int did_work; 3257 3258 ENTER; 3259 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3260 3261 if (ioa_cfg->sdt_state == READ_DUMP) { 3262 dump = ioa_cfg->dump; 3263 if (!dump) { 3264 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3265 return; 3266 } 3267 kref_get(&dump->kref); 3268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3269 ipr_get_ioa_dump(ioa_cfg, dump); 3270 kref_put(&dump->kref, ipr_release_dump); 3271 3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3273 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) 3274 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3276 return; 3277 } 3278 3279 restart: 3280 do { 3281 did_work = 0; 3282 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3284 return; 3285 } 3286 3287 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3288 if (res->del_from_ml && res->sdev) { 3289 did_work = 1; 3290 sdev = res->sdev; 3291 if (!scsi_device_get(sdev)) { 3292 if (!res->add_to_ml) 3293 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 3294 else 3295 res->del_from_ml = 0; 3296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3297 scsi_remove_device(sdev); 3298 scsi_device_put(sdev); 3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3300 } 3301 break; 3302 } 3303 } 3304 } while (did_work); 3305 3306 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3307 if (res->add_to_ml) { 3308 bus = res->bus; 3309 target = res->target; 3310 lun = res->lun; 3311 res->add_to_ml = 0; 3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3313 scsi_add_device(ioa_cfg->host, bus, target, lun); 3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3315 goto restart; 3316 } 3317 } 3318 3319 ioa_cfg->scan_done = 1; 3320 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3321 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3322 LEAVE; 3323 } 3324 3325 #ifdef CONFIG_SCSI_IPR_TRACE 3326 /** 3327 * ipr_read_trace - Dump the adapter trace 3328 * @filp: open sysfs file 3329 * @kobj: kobject struct 3330 * @bin_attr: bin_attribute struct 3331 * @buf: buffer 3332 * @off: offset 3333 * @count: buffer size 3334 * 3335 * Return value: 3336 * number of bytes printed to buffer 3337 **/ 3338 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, 3339 struct bin_attribute *bin_attr, 3340 char *buf, loff_t off, size_t count) 3341 { 3342 struct device *dev = container_of(kobj, struct device, kobj); 3343 struct Scsi_Host *shost = class_to_shost(dev); 3344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3345 unsigned long lock_flags = 0; 3346 ssize_t ret; 3347 3348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3349 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, 3350 IPR_TRACE_SIZE); 3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3352 3353 return ret; 3354 } 3355 3356 static struct bin_attribute ipr_trace_attr = { 3357 .attr = { 3358 .name = "trace", 3359 .mode = S_IRUGO, 3360 }, 3361 .size = 0, 3362 .read = ipr_read_trace, 3363 }; 3364 #endif 3365 3366 /** 3367 * ipr_show_fw_version - Show the firmware version 3368 * @dev: class device struct 3369 * @buf: buffer 3370 * 3371 * Return value: 3372 * number of bytes printed to buffer 3373 **/ 3374 static ssize_t ipr_show_fw_version(struct device *dev, 3375 struct device_attribute *attr, char *buf) 3376 { 3377 struct Scsi_Host *shost = class_to_shost(dev); 3378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3379 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3380 unsigned long lock_flags = 0; 3381 int len; 3382 3383 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3384 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", 3385 ucode_vpd->major_release, ucode_vpd->card_type, 3386 ucode_vpd->minor_release[0], 3387 ucode_vpd->minor_release[1]); 3388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3389 return len; 3390 } 3391 3392 static struct device_attribute ipr_fw_version_attr = { 3393 .attr = { 3394 .name = "fw_version", 3395 .mode = S_IRUGO, 3396 }, 3397 .show = ipr_show_fw_version, 3398 }; 3399 3400 /** 3401 * ipr_show_log_level - Show the adapter's error logging level 3402 * @dev: class device struct 3403 * @buf: buffer 3404 * 3405 * Return value: 3406 * number of bytes printed to buffer 3407 **/ 3408 static ssize_t ipr_show_log_level(struct device *dev, 3409 struct device_attribute *attr, char *buf) 3410 { 3411 struct Scsi_Host *shost = class_to_shost(dev); 3412 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3413 unsigned long lock_flags = 0; 3414 int len; 3415 3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3417 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); 3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3419 return len; 3420 } 3421 3422 /** 3423 * ipr_store_log_level - Change the adapter's error logging level 3424 * @dev: class device struct 3425 * @buf: buffer 3426 * 3427 * Return value: 3428 * number of bytes printed to buffer 3429 **/ 3430 static ssize_t ipr_store_log_level(struct device *dev, 3431 struct device_attribute *attr, 3432 const char *buf, size_t count) 3433 { 3434 struct Scsi_Host *shost = class_to_shost(dev); 3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3436 unsigned long lock_flags = 0; 3437 3438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3439 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); 3440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3441 return strlen(buf); 3442 } 3443 3444 static struct device_attribute ipr_log_level_attr = { 3445 .attr = { 3446 .name = "log_level", 3447 .mode = S_IRUGO | S_IWUSR, 3448 }, 3449 .show = ipr_show_log_level, 3450 .store = ipr_store_log_level 3451 }; 3452 3453 /** 3454 * ipr_store_diagnostics - IOA Diagnostics interface 3455 * @dev: device struct 3456 * @buf: buffer 3457 * @count: buffer size 3458 * 3459 * This function will reset the adapter and wait a reasonable 3460 * amount of time for any errors that the adapter might log. 3461 * 3462 * Return value: 3463 * count on success / other on failure 3464 **/ 3465 static ssize_t ipr_store_diagnostics(struct device *dev, 3466 struct device_attribute *attr, 3467 const char *buf, size_t count) 3468 { 3469 struct Scsi_Host *shost = class_to_shost(dev); 3470 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3471 unsigned long lock_flags = 0; 3472 int rc = count; 3473 3474 if (!capable(CAP_SYS_ADMIN)) 3475 return -EACCES; 3476 3477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3478 while (ioa_cfg->in_reset_reload) { 3479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3480 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3482 } 3483 3484 ioa_cfg->errors_logged = 0; 3485 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3486 3487 if (ioa_cfg->in_reset_reload) { 3488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3490 3491 /* Wait for a second for any errors to be logged */ 3492 msleep(1000); 3493 } else { 3494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3495 return -EIO; 3496 } 3497 3498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3499 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) 3500 rc = -EIO; 3501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3502 3503 return rc; 3504 } 3505 3506 static struct device_attribute ipr_diagnostics_attr = { 3507 .attr = { 3508 .name = "run_diagnostics", 3509 .mode = S_IWUSR, 3510 }, 3511 .store = ipr_store_diagnostics 3512 }; 3513 3514 /** 3515 * ipr_show_adapter_state - Show the adapter's state 3516 * @class_dev: device struct 3517 * @buf: buffer 3518 * 3519 * Return value: 3520 * number of bytes printed to buffer 3521 **/ 3522 static ssize_t ipr_show_adapter_state(struct device *dev, 3523 struct device_attribute *attr, char *buf) 3524 { 3525 struct Scsi_Host *shost = class_to_shost(dev); 3526 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3527 unsigned long lock_flags = 0; 3528 int len; 3529 3530 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3531 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 3532 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3533 else 3534 len = snprintf(buf, PAGE_SIZE, "online\n"); 3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3536 return len; 3537 } 3538 3539 /** 3540 * ipr_store_adapter_state - Change adapter state 3541 * @dev: device struct 3542 * @buf: buffer 3543 * @count: buffer size 3544 * 3545 * This function will change the adapter's state. 3546 * 3547 * Return value: 3548 * count on success / other on failure 3549 **/ 3550 static ssize_t ipr_store_adapter_state(struct device *dev, 3551 struct device_attribute *attr, 3552 const char *buf, size_t count) 3553 { 3554 struct Scsi_Host *shost = class_to_shost(dev); 3555 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3556 unsigned long lock_flags; 3557 int result = count, i; 3558 3559 if (!capable(CAP_SYS_ADMIN)) 3560 return -EACCES; 3561 3562 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3563 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && 3564 !strncmp(buf, "online", 6)) { 3565 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 3566 spin_lock(&ioa_cfg->hrrq[i]._lock); 3567 ioa_cfg->hrrq[i].ioa_is_dead = 0; 3568 spin_unlock(&ioa_cfg->hrrq[i]._lock); 3569 } 3570 wmb(); 3571 ioa_cfg->reset_retries = 0; 3572 ioa_cfg->in_ioa_bringdown = 0; 3573 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3574 } 3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3576 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3577 3578 return result; 3579 } 3580 3581 static struct device_attribute ipr_ioa_state_attr = { 3582 .attr = { 3583 .name = "online_state", 3584 .mode = S_IRUGO | S_IWUSR, 3585 }, 3586 .show = ipr_show_adapter_state, 3587 .store = ipr_store_adapter_state 3588 }; 3589 3590 /** 3591 * ipr_store_reset_adapter - Reset the adapter 3592 * @dev: device struct 3593 * @buf: buffer 3594 * @count: buffer size 3595 * 3596 * This function will reset the adapter. 3597 * 3598 * Return value: 3599 * count on success / other on failure 3600 **/ 3601 static ssize_t ipr_store_reset_adapter(struct device *dev, 3602 struct device_attribute *attr, 3603 const char *buf, size_t count) 3604 { 3605 struct Scsi_Host *shost = class_to_shost(dev); 3606 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3607 unsigned long lock_flags; 3608 int result = count; 3609 3610 if (!capable(CAP_SYS_ADMIN)) 3611 return -EACCES; 3612 3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3614 if (!ioa_cfg->in_reset_reload) 3615 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3617 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3618 3619 return result; 3620 } 3621 3622 static struct device_attribute ipr_ioa_reset_attr = { 3623 .attr = { 3624 .name = "reset_host", 3625 .mode = S_IWUSR, 3626 }, 3627 .store = ipr_store_reset_adapter 3628 }; 3629 3630 static int ipr_iopoll(struct blk_iopoll *iop, int budget); 3631 /** 3632 * ipr_show_iopoll_weight - Show ipr polling mode 3633 * @dev: class device struct 3634 * @buf: buffer 3635 * 3636 * Return value: 3637 * number of bytes printed to buffer 3638 **/ 3639 static ssize_t ipr_show_iopoll_weight(struct device *dev, 3640 struct device_attribute *attr, char *buf) 3641 { 3642 struct Scsi_Host *shost = class_to_shost(dev); 3643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3644 unsigned long lock_flags = 0; 3645 int len; 3646 3647 spin_lock_irqsave(shost->host_lock, lock_flags); 3648 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); 3649 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3650 3651 return len; 3652 } 3653 3654 /** 3655 * ipr_store_iopoll_weight - Change the adapter's polling mode 3656 * @dev: class device struct 3657 * @buf: buffer 3658 * 3659 * Return value: 3660 * number of bytes printed to buffer 3661 **/ 3662 static ssize_t ipr_store_iopoll_weight(struct device *dev, 3663 struct device_attribute *attr, 3664 const char *buf, size_t count) 3665 { 3666 struct Scsi_Host *shost = class_to_shost(dev); 3667 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3668 unsigned long user_iopoll_weight; 3669 unsigned long lock_flags = 0; 3670 int i; 3671 3672 if (!ioa_cfg->sis64) { 3673 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n"); 3674 return -EINVAL; 3675 } 3676 if (kstrtoul(buf, 10, &user_iopoll_weight)) 3677 return -EINVAL; 3678 3679 if (user_iopoll_weight > 256) { 3680 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n"); 3681 return -EINVAL; 3682 } 3683 3684 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { 3685 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n"); 3686 return strlen(buf); 3687 } 3688 3689 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3690 for (i = 1; i < ioa_cfg->hrrq_num; i++) 3691 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); 3692 } 3693 3694 spin_lock_irqsave(shost->host_lock, lock_flags); 3695 ioa_cfg->iopoll_weight = user_iopoll_weight; 3696 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3697 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 3698 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, 3699 ioa_cfg->iopoll_weight, ipr_iopoll); 3700 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); 3701 } 3702 } 3703 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3704 3705 return strlen(buf); 3706 } 3707 3708 static struct device_attribute ipr_iopoll_weight_attr = { 3709 .attr = { 3710 .name = "iopoll_weight", 3711 .mode = S_IRUGO | S_IWUSR, 3712 }, 3713 .show = ipr_show_iopoll_weight, 3714 .store = ipr_store_iopoll_weight 3715 }; 3716 3717 /** 3718 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3719 * @buf_len: buffer length 3720 * 3721 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3722 * list to use for microcode download 3723 * 3724 * Return value: 3725 * pointer to sglist / NULL on failure 3726 **/ 3727 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) 3728 { 3729 int sg_size, order, bsize_elem, num_elem, i, j; 3730 struct ipr_sglist *sglist; 3731 struct scatterlist *scatterlist; 3732 struct page *page; 3733 3734 /* Get the minimum size per scatter/gather element */ 3735 sg_size = buf_len / (IPR_MAX_SGLIST - 1); 3736 3737 /* Get the actual size per element */ 3738 order = get_order(sg_size); 3739 3740 /* Determine the actual number of bytes per element */ 3741 bsize_elem = PAGE_SIZE * (1 << order); 3742 3743 /* Determine the actual number of sg entries needed */ 3744 if (buf_len % bsize_elem) 3745 num_elem = (buf_len / bsize_elem) + 1; 3746 else 3747 num_elem = buf_len / bsize_elem; 3748 3749 /* Allocate a scatter/gather list for the DMA */ 3750 sglist = kzalloc(sizeof(struct ipr_sglist) + 3751 (sizeof(struct scatterlist) * (num_elem - 1)), 3752 GFP_KERNEL); 3753 3754 if (sglist == NULL) { 3755 ipr_trace; 3756 return NULL; 3757 } 3758 3759 scatterlist = sglist->scatterlist; 3760 sg_init_table(scatterlist, num_elem); 3761 3762 sglist->order = order; 3763 sglist->num_sg = num_elem; 3764 3765 /* Allocate a bunch of sg elements */ 3766 for (i = 0; i < num_elem; i++) { 3767 page = alloc_pages(GFP_KERNEL, order); 3768 if (!page) { 3769 ipr_trace; 3770 3771 /* Free up what we already allocated */ 3772 for (j = i - 1; j >= 0; j--) 3773 __free_pages(sg_page(&scatterlist[j]), order); 3774 kfree(sglist); 3775 return NULL; 3776 } 3777 3778 sg_set_page(&scatterlist[i], page, 0, 0); 3779 } 3780 3781 return sglist; 3782 } 3783 3784 /** 3785 * ipr_free_ucode_buffer - Frees a microcode download buffer 3786 * @p_dnld: scatter/gather list pointer 3787 * 3788 * Free a DMA'able ucode download buffer previously allocated with 3789 * ipr_alloc_ucode_buffer 3790 * 3791 * Return value: 3792 * nothing 3793 **/ 3794 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) 3795 { 3796 int i; 3797 3798 for (i = 0; i < sglist->num_sg; i++) 3799 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order); 3800 3801 kfree(sglist); 3802 } 3803 3804 /** 3805 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer 3806 * @sglist: scatter/gather list pointer 3807 * @buffer: buffer pointer 3808 * @len: buffer length 3809 * 3810 * Copy a microcode image from a user buffer into a buffer allocated by 3811 * ipr_alloc_ucode_buffer 3812 * 3813 * Return value: 3814 * 0 on success / other on failure 3815 **/ 3816 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, 3817 u8 *buffer, u32 len) 3818 { 3819 int bsize_elem, i, result = 0; 3820 struct scatterlist *scatterlist; 3821 void *kaddr; 3822 3823 /* Determine the actual number of bytes per element */ 3824 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3825 3826 scatterlist = sglist->scatterlist; 3827 3828 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 3829 struct page *page = sg_page(&scatterlist[i]); 3830 3831 kaddr = kmap(page); 3832 memcpy(kaddr, buffer, bsize_elem); 3833 kunmap(page); 3834 3835 scatterlist[i].length = bsize_elem; 3836 3837 if (result != 0) { 3838 ipr_trace; 3839 return result; 3840 } 3841 } 3842 3843 if (len % bsize_elem) { 3844 struct page *page = sg_page(&scatterlist[i]); 3845 3846 kaddr = kmap(page); 3847 memcpy(kaddr, buffer, len % bsize_elem); 3848 kunmap(page); 3849 3850 scatterlist[i].length = len % bsize_elem; 3851 } 3852 3853 sglist->buffer_len = len; 3854 return result; 3855 } 3856 3857 /** 3858 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL 3859 * @ipr_cmd: ipr command struct 3860 * @sglist: scatter/gather list 3861 * 3862 * Builds a microcode download IOA data list (IOADL). 3863 * 3864 **/ 3865 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, 3866 struct ipr_sglist *sglist) 3867 { 3868 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3869 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 3870 struct scatterlist *scatterlist = sglist->scatterlist; 3871 int i; 3872 3873 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3874 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3875 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3876 3877 ioarcb->ioadl_len = 3878 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3879 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3880 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3881 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); 3882 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); 3883 } 3884 3885 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3886 } 3887 3888 /** 3889 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3890 * @ipr_cmd: ipr command struct 3891 * @sglist: scatter/gather list 3892 * 3893 * Builds a microcode download IOA data list (IOADL). 3894 * 3895 **/ 3896 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, 3897 struct ipr_sglist *sglist) 3898 { 3899 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3900 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3901 struct scatterlist *scatterlist = sglist->scatterlist; 3902 int i; 3903 3904 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3905 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3906 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3907 3908 ioarcb->ioadl_len = 3909 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3910 3911 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3912 ioadl[i].flags_and_data_len = 3913 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); 3914 ioadl[i].address = 3915 cpu_to_be32(sg_dma_address(&scatterlist[i])); 3916 } 3917 3918 ioadl[i-1].flags_and_data_len |= 3919 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3920 } 3921 3922 /** 3923 * ipr_update_ioa_ucode - Update IOA's microcode 3924 * @ioa_cfg: ioa config struct 3925 * @sglist: scatter/gather list 3926 * 3927 * Initiate an adapter reset to update the IOA's microcode 3928 * 3929 * Return value: 3930 * 0 on success / -EIO on failure 3931 **/ 3932 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, 3933 struct ipr_sglist *sglist) 3934 { 3935 unsigned long lock_flags; 3936 3937 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3938 while (ioa_cfg->in_reset_reload) { 3939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3940 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3942 } 3943 3944 if (ioa_cfg->ucode_sglist) { 3945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3946 dev_err(&ioa_cfg->pdev->dev, 3947 "Microcode download already in progress\n"); 3948 return -EIO; 3949 } 3950 3951 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, 3952 sglist->scatterlist, sglist->num_sg, 3953 DMA_TO_DEVICE); 3954 3955 if (!sglist->num_dma_sg) { 3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3957 dev_err(&ioa_cfg->pdev->dev, 3958 "Failed to map microcode download buffer!\n"); 3959 return -EIO; 3960 } 3961 3962 ioa_cfg->ucode_sglist = sglist; 3963 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3965 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3966 3967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3968 ioa_cfg->ucode_sglist = NULL; 3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3970 return 0; 3971 } 3972 3973 /** 3974 * ipr_store_update_fw - Update the firmware on the adapter 3975 * @class_dev: device struct 3976 * @buf: buffer 3977 * @count: buffer size 3978 * 3979 * This function will update the firmware on the adapter. 3980 * 3981 * Return value: 3982 * count on success / other on failure 3983 **/ 3984 static ssize_t ipr_store_update_fw(struct device *dev, 3985 struct device_attribute *attr, 3986 const char *buf, size_t count) 3987 { 3988 struct Scsi_Host *shost = class_to_shost(dev); 3989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3990 struct ipr_ucode_image_header *image_hdr; 3991 const struct firmware *fw_entry; 3992 struct ipr_sglist *sglist; 3993 char fname[100]; 3994 char *src; 3995 int len, result, dnld_size; 3996 3997 if (!capable(CAP_SYS_ADMIN)) 3998 return -EACCES; 3999 4000 len = snprintf(fname, 99, "%s", buf); 4001 fname[len-1] = '\0'; 4002 4003 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4004 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4005 return -EIO; 4006 } 4007 4008 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; 4009 4010 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); 4011 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); 4012 sglist = ipr_alloc_ucode_buffer(dnld_size); 4013 4014 if (!sglist) { 4015 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); 4016 release_firmware(fw_entry); 4017 return -ENOMEM; 4018 } 4019 4020 result = ipr_copy_ucode_buffer(sglist, src, dnld_size); 4021 4022 if (result) { 4023 dev_err(&ioa_cfg->pdev->dev, 4024 "Microcode buffer copy to DMA buffer failed\n"); 4025 goto out; 4026 } 4027 4028 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); 4029 4030 result = ipr_update_ioa_ucode(ioa_cfg, sglist); 4031 4032 if (!result) 4033 result = count; 4034 out: 4035 ipr_free_ucode_buffer(sglist); 4036 release_firmware(fw_entry); 4037 return result; 4038 } 4039 4040 static struct device_attribute ipr_update_fw_attr = { 4041 .attr = { 4042 .name = "update_fw", 4043 .mode = S_IWUSR, 4044 }, 4045 .store = ipr_store_update_fw 4046 }; 4047 4048 /** 4049 * ipr_show_fw_type - Show the adapter's firmware type. 4050 * @dev: class device struct 4051 * @buf: buffer 4052 * 4053 * Return value: 4054 * number of bytes printed to buffer 4055 **/ 4056 static ssize_t ipr_show_fw_type(struct device *dev, 4057 struct device_attribute *attr, char *buf) 4058 { 4059 struct Scsi_Host *shost = class_to_shost(dev); 4060 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4061 unsigned long lock_flags = 0; 4062 int len; 4063 4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4065 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); 4066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4067 return len; 4068 } 4069 4070 static struct device_attribute ipr_ioa_fw_type_attr = { 4071 .attr = { 4072 .name = "fw_type", 4073 .mode = S_IRUGO, 4074 }, 4075 .show = ipr_show_fw_type 4076 }; 4077 4078 static struct device_attribute *ipr_ioa_attrs[] = { 4079 &ipr_fw_version_attr, 4080 &ipr_log_level_attr, 4081 &ipr_diagnostics_attr, 4082 &ipr_ioa_state_attr, 4083 &ipr_ioa_reset_attr, 4084 &ipr_update_fw_attr, 4085 &ipr_ioa_fw_type_attr, 4086 &ipr_iopoll_weight_attr, 4087 NULL, 4088 }; 4089 4090 #ifdef CONFIG_SCSI_IPR_DUMP 4091 /** 4092 * ipr_read_dump - Dump the adapter 4093 * @filp: open sysfs file 4094 * @kobj: kobject struct 4095 * @bin_attr: bin_attribute struct 4096 * @buf: buffer 4097 * @off: offset 4098 * @count: buffer size 4099 * 4100 * Return value: 4101 * number of bytes printed to buffer 4102 **/ 4103 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, 4104 struct bin_attribute *bin_attr, 4105 char *buf, loff_t off, size_t count) 4106 { 4107 struct device *cdev = container_of(kobj, struct device, kobj); 4108 struct Scsi_Host *shost = class_to_shost(cdev); 4109 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4110 struct ipr_dump *dump; 4111 unsigned long lock_flags = 0; 4112 char *src; 4113 int len, sdt_end; 4114 size_t rc = count; 4115 4116 if (!capable(CAP_SYS_ADMIN)) 4117 return -EACCES; 4118 4119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4120 dump = ioa_cfg->dump; 4121 4122 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { 4123 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4124 return 0; 4125 } 4126 kref_get(&dump->kref); 4127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4128 4129 if (off > dump->driver_dump.hdr.len) { 4130 kref_put(&dump->kref, ipr_release_dump); 4131 return 0; 4132 } 4133 4134 if (off + count > dump->driver_dump.hdr.len) { 4135 count = dump->driver_dump.hdr.len - off; 4136 rc = count; 4137 } 4138 4139 if (count && off < sizeof(dump->driver_dump)) { 4140 if (off + count > sizeof(dump->driver_dump)) 4141 len = sizeof(dump->driver_dump) - off; 4142 else 4143 len = count; 4144 src = (u8 *)&dump->driver_dump + off; 4145 memcpy(buf, src, len); 4146 buf += len; 4147 off += len; 4148 count -= len; 4149 } 4150 4151 off -= sizeof(dump->driver_dump); 4152 4153 if (ioa_cfg->sis64) 4154 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4155 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * 4156 sizeof(struct ipr_sdt_entry)); 4157 else 4158 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4159 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); 4160 4161 if (count && off < sdt_end) { 4162 if (off + count > sdt_end) 4163 len = sdt_end - off; 4164 else 4165 len = count; 4166 src = (u8 *)&dump->ioa_dump + off; 4167 memcpy(buf, src, len); 4168 buf += len; 4169 off += len; 4170 count -= len; 4171 } 4172 4173 off -= sdt_end; 4174 4175 while (count) { 4176 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 4177 len = PAGE_ALIGN(off) - off; 4178 else 4179 len = count; 4180 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; 4181 src += off & ~PAGE_MASK; 4182 memcpy(buf, src, len); 4183 buf += len; 4184 off += len; 4185 count -= len; 4186 } 4187 4188 kref_put(&dump->kref, ipr_release_dump); 4189 return rc; 4190 } 4191 4192 /** 4193 * ipr_alloc_dump - Prepare for adapter dump 4194 * @ioa_cfg: ioa config struct 4195 * 4196 * Return value: 4197 * 0 on success / other on failure 4198 **/ 4199 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 4200 { 4201 struct ipr_dump *dump; 4202 __be32 **ioa_data; 4203 unsigned long lock_flags = 0; 4204 4205 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 4206 4207 if (!dump) { 4208 ipr_err("Dump memory allocation failed\n"); 4209 return -ENOMEM; 4210 } 4211 4212 if (ioa_cfg->sis64) 4213 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); 4214 else 4215 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); 4216 4217 if (!ioa_data) { 4218 ipr_err("Dump memory allocation failed\n"); 4219 kfree(dump); 4220 return -ENOMEM; 4221 } 4222 4223 dump->ioa_dump.ioa_data = ioa_data; 4224 4225 kref_init(&dump->kref); 4226 dump->ioa_cfg = ioa_cfg; 4227 4228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4229 4230 if (INACTIVE != ioa_cfg->sdt_state) { 4231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4232 vfree(dump->ioa_dump.ioa_data); 4233 kfree(dump); 4234 return 0; 4235 } 4236 4237 ioa_cfg->dump = dump; 4238 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 4239 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { 4240 ioa_cfg->dump_taken = 1; 4241 schedule_work(&ioa_cfg->work_q); 4242 } 4243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4244 4245 return 0; 4246 } 4247 4248 /** 4249 * ipr_free_dump - Free adapter dump memory 4250 * @ioa_cfg: ioa config struct 4251 * 4252 * Return value: 4253 * 0 on success / other on failure 4254 **/ 4255 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) 4256 { 4257 struct ipr_dump *dump; 4258 unsigned long lock_flags = 0; 4259 4260 ENTER; 4261 4262 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4263 dump = ioa_cfg->dump; 4264 if (!dump) { 4265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4266 return 0; 4267 } 4268 4269 ioa_cfg->dump = NULL; 4270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4271 4272 kref_put(&dump->kref, ipr_release_dump); 4273 4274 LEAVE; 4275 return 0; 4276 } 4277 4278 /** 4279 * ipr_write_dump - Setup dump state of adapter 4280 * @filp: open sysfs file 4281 * @kobj: kobject struct 4282 * @bin_attr: bin_attribute struct 4283 * @buf: buffer 4284 * @off: offset 4285 * @count: buffer size 4286 * 4287 * Return value: 4288 * number of bytes printed to buffer 4289 **/ 4290 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, 4291 struct bin_attribute *bin_attr, 4292 char *buf, loff_t off, size_t count) 4293 { 4294 struct device *cdev = container_of(kobj, struct device, kobj); 4295 struct Scsi_Host *shost = class_to_shost(cdev); 4296 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4297 int rc; 4298 4299 if (!capable(CAP_SYS_ADMIN)) 4300 return -EACCES; 4301 4302 if (buf[0] == '1') 4303 rc = ipr_alloc_dump(ioa_cfg); 4304 else if (buf[0] == '0') 4305 rc = ipr_free_dump(ioa_cfg); 4306 else 4307 return -EINVAL; 4308 4309 if (rc) 4310 return rc; 4311 else 4312 return count; 4313 } 4314 4315 static struct bin_attribute ipr_dump_attr = { 4316 .attr = { 4317 .name = "dump", 4318 .mode = S_IRUSR | S_IWUSR, 4319 }, 4320 .size = 0, 4321 .read = ipr_read_dump, 4322 .write = ipr_write_dump 4323 }; 4324 #else 4325 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; 4326 #endif 4327 4328 /** 4329 * ipr_change_queue_depth - Change the device's queue depth 4330 * @sdev: scsi device struct 4331 * @qdepth: depth to set 4332 * @reason: calling context 4333 * 4334 * Return value: 4335 * actual depth set 4336 **/ 4337 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 4338 { 4339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4340 struct ipr_resource_entry *res; 4341 unsigned long lock_flags = 0; 4342 4343 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4344 res = (struct ipr_resource_entry *)sdev->hostdata; 4345 4346 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN) 4347 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4348 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4349 4350 scsi_change_queue_depth(sdev, qdepth); 4351 return sdev->queue_depth; 4352 } 4353 4354 /** 4355 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4356 * @dev: device struct 4357 * @attr: device attribute structure 4358 * @buf: buffer 4359 * 4360 * Return value: 4361 * number of bytes printed to buffer 4362 **/ 4363 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) 4364 { 4365 struct scsi_device *sdev = to_scsi_device(dev); 4366 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4367 struct ipr_resource_entry *res; 4368 unsigned long lock_flags = 0; 4369 ssize_t len = -ENXIO; 4370 4371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4372 res = (struct ipr_resource_entry *)sdev->hostdata; 4373 if (res) 4374 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); 4375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4376 return len; 4377 } 4378 4379 static struct device_attribute ipr_adapter_handle_attr = { 4380 .attr = { 4381 .name = "adapter_handle", 4382 .mode = S_IRUSR, 4383 }, 4384 .show = ipr_show_adapter_handle 4385 }; 4386 4387 /** 4388 * ipr_show_resource_path - Show the resource path or the resource address for 4389 * this device. 4390 * @dev: device struct 4391 * @attr: device attribute structure 4392 * @buf: buffer 4393 * 4394 * Return value: 4395 * number of bytes printed to buffer 4396 **/ 4397 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) 4398 { 4399 struct scsi_device *sdev = to_scsi_device(dev); 4400 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4401 struct ipr_resource_entry *res; 4402 unsigned long lock_flags = 0; 4403 ssize_t len = -ENXIO; 4404 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4405 4406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4407 res = (struct ipr_resource_entry *)sdev->hostdata; 4408 if (res && ioa_cfg->sis64) 4409 len = snprintf(buf, PAGE_SIZE, "%s\n", 4410 __ipr_format_res_path(res->res_path, buffer, 4411 sizeof(buffer))); 4412 else if (res) 4413 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, 4414 res->bus, res->target, res->lun); 4415 4416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4417 return len; 4418 } 4419 4420 static struct device_attribute ipr_resource_path_attr = { 4421 .attr = { 4422 .name = "resource_path", 4423 .mode = S_IRUGO, 4424 }, 4425 .show = ipr_show_resource_path 4426 }; 4427 4428 /** 4429 * ipr_show_device_id - Show the device_id for this device. 4430 * @dev: device struct 4431 * @attr: device attribute structure 4432 * @buf: buffer 4433 * 4434 * Return value: 4435 * number of bytes printed to buffer 4436 **/ 4437 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) 4438 { 4439 struct scsi_device *sdev = to_scsi_device(dev); 4440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4441 struct ipr_resource_entry *res; 4442 unsigned long lock_flags = 0; 4443 ssize_t len = -ENXIO; 4444 4445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4446 res = (struct ipr_resource_entry *)sdev->hostdata; 4447 if (res && ioa_cfg->sis64) 4448 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id); 4449 else if (res) 4450 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); 4451 4452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4453 return len; 4454 } 4455 4456 static struct device_attribute ipr_device_id_attr = { 4457 .attr = { 4458 .name = "device_id", 4459 .mode = S_IRUGO, 4460 }, 4461 .show = ipr_show_device_id 4462 }; 4463 4464 /** 4465 * ipr_show_resource_type - Show the resource type for this device. 4466 * @dev: device struct 4467 * @attr: device attribute structure 4468 * @buf: buffer 4469 * 4470 * Return value: 4471 * number of bytes printed to buffer 4472 **/ 4473 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) 4474 { 4475 struct scsi_device *sdev = to_scsi_device(dev); 4476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4477 struct ipr_resource_entry *res; 4478 unsigned long lock_flags = 0; 4479 ssize_t len = -ENXIO; 4480 4481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4482 res = (struct ipr_resource_entry *)sdev->hostdata; 4483 4484 if (res) 4485 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); 4486 4487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4488 return len; 4489 } 4490 4491 static struct device_attribute ipr_resource_type_attr = { 4492 .attr = { 4493 .name = "resource_type", 4494 .mode = S_IRUGO, 4495 }, 4496 .show = ipr_show_resource_type 4497 }; 4498 4499 static struct device_attribute *ipr_dev_attrs[] = { 4500 &ipr_adapter_handle_attr, 4501 &ipr_resource_path_attr, 4502 &ipr_device_id_attr, 4503 &ipr_resource_type_attr, 4504 NULL, 4505 }; 4506 4507 /** 4508 * ipr_biosparam - Return the HSC mapping 4509 * @sdev: scsi device struct 4510 * @block_device: block device pointer 4511 * @capacity: capacity of the device 4512 * @parm: Array containing returned HSC values. 4513 * 4514 * This function generates the HSC parms that fdisk uses. 4515 * We want to make sure we return something that places partitions 4516 * on 4k boundaries for best performance with the IOA. 4517 * 4518 * Return value: 4519 * 0 on success 4520 **/ 4521 static int ipr_biosparam(struct scsi_device *sdev, 4522 struct block_device *block_device, 4523 sector_t capacity, int *parm) 4524 { 4525 int heads, sectors; 4526 sector_t cylinders; 4527 4528 heads = 128; 4529 sectors = 32; 4530 4531 cylinders = capacity; 4532 sector_div(cylinders, (128 * 32)); 4533 4534 /* return result */ 4535 parm[0] = heads; 4536 parm[1] = sectors; 4537 parm[2] = cylinders; 4538 4539 return 0; 4540 } 4541 4542 /** 4543 * ipr_find_starget - Find target based on bus/target. 4544 * @starget: scsi target struct 4545 * 4546 * Return value: 4547 * resource entry pointer if found / NULL if not found 4548 **/ 4549 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) 4550 { 4551 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4553 struct ipr_resource_entry *res; 4554 4555 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4556 if ((res->bus == starget->channel) && 4557 (res->target == starget->id)) { 4558 return res; 4559 } 4560 } 4561 4562 return NULL; 4563 } 4564 4565 static struct ata_port_info sata_port_info; 4566 4567 /** 4568 * ipr_target_alloc - Prepare for commands to a SCSI target 4569 * @starget: scsi target struct 4570 * 4571 * If the device is a SATA device, this function allocates an 4572 * ATA port with libata, else it does nothing. 4573 * 4574 * Return value: 4575 * 0 on success / non-0 on failure 4576 **/ 4577 static int ipr_target_alloc(struct scsi_target *starget) 4578 { 4579 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4581 struct ipr_sata_port *sata_port; 4582 struct ata_port *ap; 4583 struct ipr_resource_entry *res; 4584 unsigned long lock_flags; 4585 4586 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4587 res = ipr_find_starget(starget); 4588 starget->hostdata = NULL; 4589 4590 if (res && ipr_is_gata(res)) { 4591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4592 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL); 4593 if (!sata_port) 4594 return -ENOMEM; 4595 4596 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); 4597 if (ap) { 4598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4599 sata_port->ioa_cfg = ioa_cfg; 4600 sata_port->ap = ap; 4601 sata_port->res = res; 4602 4603 res->sata_port = sata_port; 4604 ap->private_data = sata_port; 4605 starget->hostdata = sata_port; 4606 } else { 4607 kfree(sata_port); 4608 return -ENOMEM; 4609 } 4610 } 4611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4612 4613 return 0; 4614 } 4615 4616 /** 4617 * ipr_target_destroy - Destroy a SCSI target 4618 * @starget: scsi target struct 4619 * 4620 * If the device was a SATA device, this function frees the libata 4621 * ATA port, else it does nothing. 4622 * 4623 **/ 4624 static void ipr_target_destroy(struct scsi_target *starget) 4625 { 4626 struct ipr_sata_port *sata_port = starget->hostdata; 4627 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4628 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4629 4630 if (ioa_cfg->sis64) { 4631 if (!ipr_find_starget(starget)) { 4632 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 4633 clear_bit(starget->id, ioa_cfg->array_ids); 4634 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 4635 clear_bit(starget->id, ioa_cfg->vset_ids); 4636 else if (starget->channel == 0) 4637 clear_bit(starget->id, ioa_cfg->target_ids); 4638 } 4639 } 4640 4641 if (sata_port) { 4642 starget->hostdata = NULL; 4643 ata_sas_port_destroy(sata_port->ap); 4644 kfree(sata_port); 4645 } 4646 } 4647 4648 /** 4649 * ipr_find_sdev - Find device based on bus/target/lun. 4650 * @sdev: scsi device struct 4651 * 4652 * Return value: 4653 * resource entry pointer if found / NULL if not found 4654 **/ 4655 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) 4656 { 4657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4658 struct ipr_resource_entry *res; 4659 4660 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4661 if ((res->bus == sdev->channel) && 4662 (res->target == sdev->id) && 4663 (res->lun == sdev->lun)) 4664 return res; 4665 } 4666 4667 return NULL; 4668 } 4669 4670 /** 4671 * ipr_slave_destroy - Unconfigure a SCSI device 4672 * @sdev: scsi device struct 4673 * 4674 * Return value: 4675 * nothing 4676 **/ 4677 static void ipr_slave_destroy(struct scsi_device *sdev) 4678 { 4679 struct ipr_resource_entry *res; 4680 struct ipr_ioa_cfg *ioa_cfg; 4681 unsigned long lock_flags = 0; 4682 4683 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4684 4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4686 res = (struct ipr_resource_entry *) sdev->hostdata; 4687 if (res) { 4688 if (res->sata_port) 4689 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; 4690 sdev->hostdata = NULL; 4691 res->sdev = NULL; 4692 res->sata_port = NULL; 4693 } 4694 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4695 } 4696 4697 /** 4698 * ipr_slave_configure - Configure a SCSI device 4699 * @sdev: scsi device struct 4700 * 4701 * This function configures the specified scsi device. 4702 * 4703 * Return value: 4704 * 0 on success 4705 **/ 4706 static int ipr_slave_configure(struct scsi_device *sdev) 4707 { 4708 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4709 struct ipr_resource_entry *res; 4710 struct ata_port *ap = NULL; 4711 unsigned long lock_flags = 0; 4712 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4713 4714 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4715 res = sdev->hostdata; 4716 if (res) { 4717 if (ipr_is_af_dasd_device(res)) 4718 sdev->type = TYPE_RAID; 4719 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { 4720 sdev->scsi_level = 4; 4721 sdev->no_uld_attach = 1; 4722 } 4723 if (ipr_is_vset_device(res)) { 4724 sdev->scsi_level = SCSI_SPC_3; 4725 blk_queue_rq_timeout(sdev->request_queue, 4726 IPR_VSET_RW_TIMEOUT); 4727 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4728 } 4729 if (ipr_is_gata(res) && res->sata_port) 4730 ap = res->sata_port->ap; 4731 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4732 4733 if (ap) { 4734 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN); 4735 ata_sas_slave_configure(sdev, ap); 4736 } 4737 4738 if (ioa_cfg->sis64) 4739 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4740 ipr_format_res_path(ioa_cfg, 4741 res->res_path, buffer, sizeof(buffer))); 4742 return 0; 4743 } 4744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4745 return 0; 4746 } 4747 4748 /** 4749 * ipr_ata_slave_alloc - Prepare for commands to a SATA device 4750 * @sdev: scsi device struct 4751 * 4752 * This function initializes an ATA port so that future commands 4753 * sent through queuecommand will work. 4754 * 4755 * Return value: 4756 * 0 on success 4757 **/ 4758 static int ipr_ata_slave_alloc(struct scsi_device *sdev) 4759 { 4760 struct ipr_sata_port *sata_port = NULL; 4761 int rc = -ENXIO; 4762 4763 ENTER; 4764 if (sdev->sdev_target) 4765 sata_port = sdev->sdev_target->hostdata; 4766 if (sata_port) { 4767 rc = ata_sas_port_init(sata_port->ap); 4768 if (rc == 0) 4769 rc = ata_sas_sync_probe(sata_port->ap); 4770 } 4771 4772 if (rc) 4773 ipr_slave_destroy(sdev); 4774 4775 LEAVE; 4776 return rc; 4777 } 4778 4779 /** 4780 * ipr_slave_alloc - Prepare for commands to a device. 4781 * @sdev: scsi device struct 4782 * 4783 * This function saves a pointer to the resource entry 4784 * in the scsi device struct if the device exists. We 4785 * can then use this pointer in ipr_queuecommand when 4786 * handling new commands. 4787 * 4788 * Return value: 4789 * 0 on success / -ENXIO if device does not exist 4790 **/ 4791 static int ipr_slave_alloc(struct scsi_device *sdev) 4792 { 4793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4794 struct ipr_resource_entry *res; 4795 unsigned long lock_flags; 4796 int rc = -ENXIO; 4797 4798 sdev->hostdata = NULL; 4799 4800 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4801 4802 res = ipr_find_sdev(sdev); 4803 if (res) { 4804 res->sdev = sdev; 4805 res->add_to_ml = 0; 4806 res->in_erp = 0; 4807 sdev->hostdata = res; 4808 if (!ipr_is_naca_model(res)) 4809 res->needs_sync_complete = 1; 4810 rc = 0; 4811 if (ipr_is_gata(res)) { 4812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4813 return ipr_ata_slave_alloc(sdev); 4814 } 4815 } 4816 4817 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4818 4819 return rc; 4820 } 4821 4822 /** 4823 * ipr_match_lun - Match function for specified LUN 4824 * @ipr_cmd: ipr command struct 4825 * @device: device to match (sdev) 4826 * 4827 * Returns: 4828 * 1 if command matches sdev / 0 if command does not match sdev 4829 **/ 4830 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) 4831 { 4832 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) 4833 return 1; 4834 return 0; 4835 } 4836 4837 /** 4838 * ipr_wait_for_ops - Wait for matching commands to complete 4839 * @ipr_cmd: ipr command struct 4840 * @device: device to match (sdev) 4841 * @match: match function to use 4842 * 4843 * Returns: 4844 * SUCCESS / FAILED 4845 **/ 4846 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, 4847 int (*match)(struct ipr_cmnd *, void *)) 4848 { 4849 struct ipr_cmnd *ipr_cmd; 4850 int wait; 4851 unsigned long flags; 4852 struct ipr_hrr_queue *hrrq; 4853 signed long timeout = IPR_ABORT_TASK_TIMEOUT; 4854 DECLARE_COMPLETION_ONSTACK(comp); 4855 4856 ENTER; 4857 do { 4858 wait = 0; 4859 4860 for_each_hrrq(hrrq, ioa_cfg) { 4861 spin_lock_irqsave(hrrq->lock, flags); 4862 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 4863 if (match(ipr_cmd, device)) { 4864 ipr_cmd->eh_comp = ∁ 4865 wait++; 4866 } 4867 } 4868 spin_unlock_irqrestore(hrrq->lock, flags); 4869 } 4870 4871 if (wait) { 4872 timeout = wait_for_completion_timeout(&comp, timeout); 4873 4874 if (!timeout) { 4875 wait = 0; 4876 4877 for_each_hrrq(hrrq, ioa_cfg) { 4878 spin_lock_irqsave(hrrq->lock, flags); 4879 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 4880 if (match(ipr_cmd, device)) { 4881 ipr_cmd->eh_comp = NULL; 4882 wait++; 4883 } 4884 } 4885 spin_unlock_irqrestore(hrrq->lock, flags); 4886 } 4887 4888 if (wait) 4889 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); 4890 LEAVE; 4891 return wait ? FAILED : SUCCESS; 4892 } 4893 } 4894 } while (wait); 4895 4896 LEAVE; 4897 return SUCCESS; 4898 } 4899 4900 static int ipr_eh_host_reset(struct scsi_cmnd *cmd) 4901 { 4902 struct ipr_ioa_cfg *ioa_cfg; 4903 unsigned long lock_flags = 0; 4904 int rc = SUCCESS; 4905 4906 ENTER; 4907 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 4908 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4909 4910 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 4911 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 4912 dev_err(&ioa_cfg->pdev->dev, 4913 "Adapter being reset as a result of error recovery.\n"); 4914 4915 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 4916 ioa_cfg->sdt_state = GET_DUMP; 4917 } 4918 4919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4920 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 4921 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4922 4923 /* If we got hit with a host reset while we were already resetting 4924 the adapter for some reason, and the reset failed. */ 4925 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 4926 ipr_trace; 4927 rc = FAILED; 4928 } 4929 4930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4931 LEAVE; 4932 return rc; 4933 } 4934 4935 /** 4936 * ipr_device_reset - Reset the device 4937 * @ioa_cfg: ioa config struct 4938 * @res: resource entry struct 4939 * 4940 * This function issues a device reset to the affected device. 4941 * If the device is a SCSI device, a LUN reset will be sent 4942 * to the device first. If that does not work, a target reset 4943 * will be sent. If the device is a SATA device, a PHY reset will 4944 * be sent. 4945 * 4946 * Return value: 4947 * 0 on success / non-zero on failure 4948 **/ 4949 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, 4950 struct ipr_resource_entry *res) 4951 { 4952 struct ipr_cmnd *ipr_cmd; 4953 struct ipr_ioarcb *ioarcb; 4954 struct ipr_cmd_pkt *cmd_pkt; 4955 struct ipr_ioarcb_ata_regs *regs; 4956 u32 ioasc; 4957 4958 ENTER; 4959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4960 ioarcb = &ipr_cmd->ioarcb; 4961 cmd_pkt = &ioarcb->cmd_pkt; 4962 4963 if (ipr_cmd->ioa_cfg->sis64) { 4964 regs = &ipr_cmd->i.ata_ioadl.regs; 4965 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 4966 } else 4967 regs = &ioarcb->u.add_data.u.regs; 4968 4969 ioarcb->res_handle = res->res_handle; 4970 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4971 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4972 if (ipr_is_gata(res)) { 4973 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 4974 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); 4975 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 4976 } 4977 4978 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4979 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 4980 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 4981 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 4982 if (ipr_cmd->ioa_cfg->sis64) 4983 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 4984 sizeof(struct ipr_ioasa_gata)); 4985 else 4986 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 4987 sizeof(struct ipr_ioasa_gata)); 4988 } 4989 4990 LEAVE; 4991 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; 4992 } 4993 4994 /** 4995 * ipr_sata_reset - Reset the SATA port 4996 * @link: SATA link to reset 4997 * @classes: class of the attached device 4998 * 4999 * This function issues a SATA phy reset to the affected ATA link. 5000 * 5001 * Return value: 5002 * 0 on success / non-zero on failure 5003 **/ 5004 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, 5005 unsigned long deadline) 5006 { 5007 struct ipr_sata_port *sata_port = link->ap->private_data; 5008 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 5009 struct ipr_resource_entry *res; 5010 unsigned long lock_flags = 0; 5011 int rc = -ENXIO; 5012 5013 ENTER; 5014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5015 while (ioa_cfg->in_reset_reload) { 5016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5017 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5019 } 5020 5021 res = sata_port->res; 5022 if (res) { 5023 rc = ipr_device_reset(ioa_cfg, res); 5024 *classes = res->ata_class; 5025 } 5026 5027 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5028 LEAVE; 5029 return rc; 5030 } 5031 5032 /** 5033 * ipr_eh_dev_reset - Reset the device 5034 * @scsi_cmd: scsi command struct 5035 * 5036 * This function issues a device reset to the affected device. 5037 * A LUN reset will be sent to the device first. If that does 5038 * not work, a target reset will be sent. 5039 * 5040 * Return value: 5041 * SUCCESS / FAILED 5042 **/ 5043 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) 5044 { 5045 struct ipr_cmnd *ipr_cmd; 5046 struct ipr_ioa_cfg *ioa_cfg; 5047 struct ipr_resource_entry *res; 5048 struct ata_port *ap; 5049 int rc = 0; 5050 struct ipr_hrr_queue *hrrq; 5051 5052 ENTER; 5053 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5054 res = scsi_cmd->device->hostdata; 5055 5056 if (!res) 5057 return FAILED; 5058 5059 /* 5060 * If we are currently going through reset/reload, return failed. This will force the 5061 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the 5062 * reset to complete 5063 */ 5064 if (ioa_cfg->in_reset_reload) 5065 return FAILED; 5066 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5067 return FAILED; 5068 5069 for_each_hrrq(hrrq, ioa_cfg) { 5070 spin_lock(&hrrq->_lock); 5071 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 5072 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 5073 if (ipr_cmd->scsi_cmd) 5074 ipr_cmd->done = ipr_scsi_eh_done; 5075 if (ipr_cmd->qc) 5076 ipr_cmd->done = ipr_sata_eh_done; 5077 if (ipr_cmd->qc && 5078 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 5079 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 5080 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 5081 } 5082 } 5083 } 5084 spin_unlock(&hrrq->_lock); 5085 } 5086 res->resetting_device = 1; 5087 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 5088 5089 if (ipr_is_gata(res) && res->sata_port) { 5090 ap = res->sata_port->ap; 5091 spin_unlock_irq(scsi_cmd->device->host->host_lock); 5092 ata_std_error_handler(ap); 5093 spin_lock_irq(scsi_cmd->device->host->host_lock); 5094 5095 for_each_hrrq(hrrq, ioa_cfg) { 5096 spin_lock(&hrrq->_lock); 5097 list_for_each_entry(ipr_cmd, 5098 &hrrq->hrrq_pending_q, queue) { 5099 if (ipr_cmd->ioarcb.res_handle == 5100 res->res_handle) { 5101 rc = -EIO; 5102 break; 5103 } 5104 } 5105 spin_unlock(&hrrq->_lock); 5106 } 5107 } else 5108 rc = ipr_device_reset(ioa_cfg, res); 5109 res->resetting_device = 0; 5110 res->reset_occurred = 1; 5111 5112 LEAVE; 5113 return rc ? FAILED : SUCCESS; 5114 } 5115 5116 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) 5117 { 5118 int rc; 5119 struct ipr_ioa_cfg *ioa_cfg; 5120 5121 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 5122 5123 spin_lock_irq(cmd->device->host->host_lock); 5124 rc = __ipr_eh_dev_reset(cmd); 5125 spin_unlock_irq(cmd->device->host->host_lock); 5126 5127 if (rc == SUCCESS) 5128 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); 5129 5130 return rc; 5131 } 5132 5133 /** 5134 * ipr_bus_reset_done - Op done function for bus reset. 5135 * @ipr_cmd: ipr command struct 5136 * 5137 * This function is the op done function for a bus reset 5138 * 5139 * Return value: 5140 * none 5141 **/ 5142 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) 5143 { 5144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5145 struct ipr_resource_entry *res; 5146 5147 ENTER; 5148 if (!ioa_cfg->sis64) 5149 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 5150 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { 5151 scsi_report_bus_reset(ioa_cfg->host, res->bus); 5152 break; 5153 } 5154 } 5155 5156 /* 5157 * If abort has not completed, indicate the reset has, else call the 5158 * abort's done function to wake the sleeping eh thread 5159 */ 5160 if (ipr_cmd->sibling->sibling) 5161 ipr_cmd->sibling->sibling = NULL; 5162 else 5163 ipr_cmd->sibling->done(ipr_cmd->sibling); 5164 5165 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5166 LEAVE; 5167 } 5168 5169 /** 5170 * ipr_abort_timeout - An abort task has timed out 5171 * @ipr_cmd: ipr command struct 5172 * 5173 * This function handles when an abort task times out. If this 5174 * happens we issue a bus reset since we have resources tied 5175 * up that must be freed before returning to the midlayer. 5176 * 5177 * Return value: 5178 * none 5179 **/ 5180 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) 5181 { 5182 struct ipr_cmnd *reset_cmd; 5183 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5184 struct ipr_cmd_pkt *cmd_pkt; 5185 unsigned long lock_flags = 0; 5186 5187 ENTER; 5188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5189 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { 5190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5191 return; 5192 } 5193 5194 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); 5195 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5196 ipr_cmd->sibling = reset_cmd; 5197 reset_cmd->sibling = ipr_cmd; 5198 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; 5199 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; 5200 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5201 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5202 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; 5203 5204 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5206 LEAVE; 5207 } 5208 5209 /** 5210 * ipr_cancel_op - Cancel specified op 5211 * @scsi_cmd: scsi command struct 5212 * 5213 * This function cancels specified op. 5214 * 5215 * Return value: 5216 * SUCCESS / FAILED 5217 **/ 5218 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) 5219 { 5220 struct ipr_cmnd *ipr_cmd; 5221 struct ipr_ioa_cfg *ioa_cfg; 5222 struct ipr_resource_entry *res; 5223 struct ipr_cmd_pkt *cmd_pkt; 5224 u32 ioasc, int_reg; 5225 int op_found = 0; 5226 struct ipr_hrr_queue *hrrq; 5227 5228 ENTER; 5229 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5230 res = scsi_cmd->device->hostdata; 5231 5232 /* If we are currently going through reset/reload, return failed. 5233 * This will force the mid-layer to call ipr_eh_host_reset, 5234 * which will then go to sleep and wait for the reset to complete 5235 */ 5236 if (ioa_cfg->in_reset_reload || 5237 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5238 return FAILED; 5239 if (!res) 5240 return FAILED; 5241 5242 /* 5243 * If we are aborting a timed out op, chances are that the timeout was caused 5244 * by a still not detected EEH error. In such cases, reading a register will 5245 * trigger the EEH recovery infrastructure. 5246 */ 5247 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5248 5249 if (!ipr_is_gscsi(res)) 5250 return FAILED; 5251 5252 for_each_hrrq(hrrq, ioa_cfg) { 5253 spin_lock(&hrrq->_lock); 5254 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 5255 if (ipr_cmd->scsi_cmd == scsi_cmd) { 5256 ipr_cmd->done = ipr_scsi_eh_done; 5257 op_found = 1; 5258 break; 5259 } 5260 } 5261 spin_unlock(&hrrq->_lock); 5262 } 5263 5264 if (!op_found) 5265 return SUCCESS; 5266 5267 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5268 ipr_cmd->ioarcb.res_handle = res->res_handle; 5269 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5270 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5271 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5272 ipr_cmd->u.sdev = scsi_cmd->device; 5273 5274 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 5275 scsi_cmd->cmnd[0]); 5276 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 5277 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5278 5279 /* 5280 * If the abort task timed out and we sent a bus reset, we will get 5281 * one the following responses to the abort 5282 */ 5283 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { 5284 ioasc = 0; 5285 ipr_trace; 5286 } 5287 5288 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5289 if (!ipr_is_naca_model(res)) 5290 res->needs_sync_complete = 1; 5291 5292 LEAVE; 5293 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; 5294 } 5295 5296 /** 5297 * ipr_eh_abort - Abort a single op 5298 * @scsi_cmd: scsi command struct 5299 * 5300 * Return value: 5301 * 0 if scan in progress / 1 if scan is complete 5302 **/ 5303 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) 5304 { 5305 unsigned long lock_flags; 5306 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 5307 int rc = 0; 5308 5309 spin_lock_irqsave(shost->host_lock, lock_flags); 5310 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) 5311 rc = 1; 5312 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) 5313 rc = 1; 5314 spin_unlock_irqrestore(shost->host_lock, lock_flags); 5315 return rc; 5316 } 5317 5318 /** 5319 * ipr_eh_host_reset - Reset the host adapter 5320 * @scsi_cmd: scsi command struct 5321 * 5322 * Return value: 5323 * SUCCESS / FAILED 5324 **/ 5325 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) 5326 { 5327 unsigned long flags; 5328 int rc; 5329 struct ipr_ioa_cfg *ioa_cfg; 5330 5331 ENTER; 5332 5333 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5334 5335 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); 5336 rc = ipr_cancel_op(scsi_cmd); 5337 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); 5338 5339 if (rc == SUCCESS) 5340 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); 5341 LEAVE; 5342 return rc; 5343 } 5344 5345 /** 5346 * ipr_handle_other_interrupt - Handle "other" interrupts 5347 * @ioa_cfg: ioa config struct 5348 * @int_reg: interrupt register 5349 * 5350 * Return value: 5351 * IRQ_NONE / IRQ_HANDLED 5352 **/ 5353 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5354 u32 int_reg) 5355 { 5356 irqreturn_t rc = IRQ_HANDLED; 5357 u32 int_mask_reg; 5358 5359 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5360 int_reg &= ~int_mask_reg; 5361 5362 /* If an interrupt on the adapter did not occur, ignore it. 5363 * Or in the case of SIS 64, check for a stage change interrupt. 5364 */ 5365 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { 5366 if (ioa_cfg->sis64) { 5367 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 5368 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5369 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { 5370 5371 /* clear stage change */ 5372 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5373 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5374 list_del(&ioa_cfg->reset_cmd->queue); 5375 del_timer(&ioa_cfg->reset_cmd->timer); 5376 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5377 return IRQ_HANDLED; 5378 } 5379 } 5380 5381 return IRQ_NONE; 5382 } 5383 5384 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5385 /* Mask the interrupt */ 5386 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); 5387 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5388 5389 list_del(&ioa_cfg->reset_cmd->queue); 5390 del_timer(&ioa_cfg->reset_cmd->timer); 5391 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5392 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5393 if (ioa_cfg->clear_isr) { 5394 if (ipr_debug && printk_ratelimit()) 5395 dev_err(&ioa_cfg->pdev->dev, 5396 "Spurious interrupt detected. 0x%08X\n", int_reg); 5397 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5398 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5399 return IRQ_NONE; 5400 } 5401 } else { 5402 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5403 ioa_cfg->ioa_unit_checked = 1; 5404 else if (int_reg & IPR_PCII_NO_HOST_RRQ) 5405 dev_err(&ioa_cfg->pdev->dev, 5406 "No Host RRQ. 0x%08X\n", int_reg); 5407 else 5408 dev_err(&ioa_cfg->pdev->dev, 5409 "Permanent IOA failure. 0x%08X\n", int_reg); 5410 5411 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5412 ioa_cfg->sdt_state = GET_DUMP; 5413 5414 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5416 } 5417 5418 return rc; 5419 } 5420 5421 /** 5422 * ipr_isr_eh - Interrupt service routine error handler 5423 * @ioa_cfg: ioa config struct 5424 * @msg: message to log 5425 * 5426 * Return value: 5427 * none 5428 **/ 5429 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) 5430 { 5431 ioa_cfg->errors_logged++; 5432 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); 5433 5434 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5435 ioa_cfg->sdt_state = GET_DUMP; 5436 5437 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5438 } 5439 5440 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, 5441 struct list_head *doneq) 5442 { 5443 u32 ioasc; 5444 u16 cmd_index; 5445 struct ipr_cmnd *ipr_cmd; 5446 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; 5447 int num_hrrq = 0; 5448 5449 /* If interrupts are disabled, ignore the interrupt */ 5450 if (!hrr_queue->allow_interrupts) 5451 return 0; 5452 5453 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5454 hrr_queue->toggle_bit) { 5455 5456 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & 5457 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> 5458 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 5459 5460 if (unlikely(cmd_index > hrr_queue->max_cmd_id || 5461 cmd_index < hrr_queue->min_cmd_id)) { 5462 ipr_isr_eh(ioa_cfg, 5463 "Invalid response handle from IOA: ", 5464 cmd_index); 5465 break; 5466 } 5467 5468 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5469 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5470 5471 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5472 5473 list_move_tail(&ipr_cmd->queue, doneq); 5474 5475 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { 5476 hrr_queue->hrrq_curr++; 5477 } else { 5478 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; 5479 hrr_queue->toggle_bit ^= 1u; 5480 } 5481 num_hrrq++; 5482 if (budget > 0 && num_hrrq >= budget) 5483 break; 5484 } 5485 5486 return num_hrrq; 5487 } 5488 5489 static int ipr_iopoll(struct blk_iopoll *iop, int budget) 5490 { 5491 struct ipr_ioa_cfg *ioa_cfg; 5492 struct ipr_hrr_queue *hrrq; 5493 struct ipr_cmnd *ipr_cmd, *temp; 5494 unsigned long hrrq_flags; 5495 int completed_ops; 5496 LIST_HEAD(doneq); 5497 5498 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); 5499 ioa_cfg = hrrq->ioa_cfg; 5500 5501 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5502 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); 5503 5504 if (completed_ops < budget) 5505 blk_iopoll_complete(iop); 5506 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5507 5508 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5509 list_del(&ipr_cmd->queue); 5510 del_timer(&ipr_cmd->timer); 5511 ipr_cmd->fast_done(ipr_cmd); 5512 } 5513 5514 return completed_ops; 5515 } 5516 5517 /** 5518 * ipr_isr - Interrupt service routine 5519 * @irq: irq number 5520 * @devp: pointer to ioa config struct 5521 * 5522 * Return value: 5523 * IRQ_NONE / IRQ_HANDLED 5524 **/ 5525 static irqreturn_t ipr_isr(int irq, void *devp) 5526 { 5527 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5528 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5529 unsigned long hrrq_flags = 0; 5530 u32 int_reg = 0; 5531 int num_hrrq = 0; 5532 int irq_none = 0; 5533 struct ipr_cmnd *ipr_cmd, *temp; 5534 irqreturn_t rc = IRQ_NONE; 5535 LIST_HEAD(doneq); 5536 5537 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5538 /* If interrupts are disabled, ignore the interrupt */ 5539 if (!hrrq->allow_interrupts) { 5540 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5541 return IRQ_NONE; 5542 } 5543 5544 while (1) { 5545 if (ipr_process_hrrq(hrrq, -1, &doneq)) { 5546 rc = IRQ_HANDLED; 5547 5548 if (!ioa_cfg->clear_isr) 5549 break; 5550 5551 /* Clear the PCI interrupt */ 5552 num_hrrq = 0; 5553 do { 5554 writel(IPR_PCII_HRRQ_UPDATED, 5555 ioa_cfg->regs.clr_interrupt_reg32); 5556 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5557 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5558 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5559 5560 } else if (rc == IRQ_NONE && irq_none == 0) { 5561 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5562 irq_none++; 5563 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5564 int_reg & IPR_PCII_HRRQ_UPDATED) { 5565 ipr_isr_eh(ioa_cfg, 5566 "Error clearing HRRQ: ", num_hrrq); 5567 rc = IRQ_HANDLED; 5568 break; 5569 } else 5570 break; 5571 } 5572 5573 if (unlikely(rc == IRQ_NONE)) 5574 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5575 5576 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5577 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5578 list_del(&ipr_cmd->queue); 5579 del_timer(&ipr_cmd->timer); 5580 ipr_cmd->fast_done(ipr_cmd); 5581 } 5582 return rc; 5583 } 5584 5585 /** 5586 * ipr_isr_mhrrq - Interrupt service routine 5587 * @irq: irq number 5588 * @devp: pointer to ioa config struct 5589 * 5590 * Return value: 5591 * IRQ_NONE / IRQ_HANDLED 5592 **/ 5593 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) 5594 { 5595 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5596 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5597 unsigned long hrrq_flags = 0; 5598 struct ipr_cmnd *ipr_cmd, *temp; 5599 irqreturn_t rc = IRQ_NONE; 5600 LIST_HEAD(doneq); 5601 5602 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5603 5604 /* If interrupts are disabled, ignore the interrupt */ 5605 if (!hrrq->allow_interrupts) { 5606 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5607 return IRQ_NONE; 5608 } 5609 5610 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 5611 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5612 hrrq->toggle_bit) { 5613 if (!blk_iopoll_sched_prep(&hrrq->iopoll)) 5614 blk_iopoll_sched(&hrrq->iopoll); 5615 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5616 return IRQ_HANDLED; 5617 } 5618 } else { 5619 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5620 hrrq->toggle_bit) 5621 5622 if (ipr_process_hrrq(hrrq, -1, &doneq)) 5623 rc = IRQ_HANDLED; 5624 } 5625 5626 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5627 5628 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5629 list_del(&ipr_cmd->queue); 5630 del_timer(&ipr_cmd->timer); 5631 ipr_cmd->fast_done(ipr_cmd); 5632 } 5633 return rc; 5634 } 5635 5636 /** 5637 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer 5638 * @ioa_cfg: ioa config struct 5639 * @ipr_cmd: ipr command struct 5640 * 5641 * Return value: 5642 * 0 on success / -1 on failure 5643 **/ 5644 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, 5645 struct ipr_cmnd *ipr_cmd) 5646 { 5647 int i, nseg; 5648 struct scatterlist *sg; 5649 u32 length; 5650 u32 ioadl_flags = 0; 5651 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5652 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5653 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5654 5655 length = scsi_bufflen(scsi_cmd); 5656 if (!length) 5657 return 0; 5658 5659 nseg = scsi_dma_map(scsi_cmd); 5660 if (nseg < 0) { 5661 if (printk_ratelimit()) 5662 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5663 return -1; 5664 } 5665 5666 ipr_cmd->dma_use_sg = nseg; 5667 5668 ioarcb->data_transfer_length = cpu_to_be32(length); 5669 ioarcb->ioadl_len = 5670 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 5671 5672 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5673 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5674 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5675 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) 5676 ioadl_flags = IPR_IOADL_FLAGS_READ; 5677 5678 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5679 ioadl64[i].flags = cpu_to_be32(ioadl_flags); 5680 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 5681 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 5682 } 5683 5684 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5685 return 0; 5686 } 5687 5688 /** 5689 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5690 * @ioa_cfg: ioa config struct 5691 * @ipr_cmd: ipr command struct 5692 * 5693 * Return value: 5694 * 0 on success / -1 on failure 5695 **/ 5696 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 5697 struct ipr_cmnd *ipr_cmd) 5698 { 5699 int i, nseg; 5700 struct scatterlist *sg; 5701 u32 length; 5702 u32 ioadl_flags = 0; 5703 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5704 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5705 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 5706 5707 length = scsi_bufflen(scsi_cmd); 5708 if (!length) 5709 return 0; 5710 5711 nseg = scsi_dma_map(scsi_cmd); 5712 if (nseg < 0) { 5713 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5714 return -1; 5715 } 5716 5717 ipr_cmd->dma_use_sg = nseg; 5718 5719 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5720 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5721 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5722 ioarcb->data_transfer_length = cpu_to_be32(length); 5723 ioarcb->ioadl_len = 5724 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5725 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5726 ioadl_flags = IPR_IOADL_FLAGS_READ; 5727 ioarcb->read_data_transfer_length = cpu_to_be32(length); 5728 ioarcb->read_ioadl_len = 5729 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5730 } 5731 5732 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { 5733 ioadl = ioarcb->u.add_data.u.ioadl; 5734 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + 5735 offsetof(struct ipr_ioarcb, u.add_data)); 5736 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5737 } 5738 5739 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5740 ioadl[i].flags_and_data_len = 5741 cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 5742 ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); 5743 } 5744 5745 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5746 return 0; 5747 } 5748 5749 /** 5750 * ipr_erp_done - Process completion of ERP for a device 5751 * @ipr_cmd: ipr command struct 5752 * 5753 * This function copies the sense buffer into the scsi_cmd 5754 * struct and pushes the scsi_done function. 5755 * 5756 * Return value: 5757 * nothing 5758 **/ 5759 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) 5760 { 5761 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5762 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5763 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5764 5765 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5766 scsi_cmd->result |= (DID_ERROR << 16); 5767 scmd_printk(KERN_ERR, scsi_cmd, 5768 "Request Sense failed with IOASC: 0x%08X\n", ioasc); 5769 } else { 5770 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, 5771 SCSI_SENSE_BUFFERSIZE); 5772 } 5773 5774 if (res) { 5775 if (!ipr_is_naca_model(res)) 5776 res->needs_sync_complete = 1; 5777 res->in_erp = 0; 5778 } 5779 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5780 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5781 scsi_cmd->scsi_done(scsi_cmd); 5782 } 5783 5784 /** 5785 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP 5786 * @ipr_cmd: ipr command struct 5787 * 5788 * Return value: 5789 * none 5790 **/ 5791 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 5792 { 5793 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5794 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5795 dma_addr_t dma_addr = ipr_cmd->dma_addr; 5796 5797 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5798 ioarcb->data_transfer_length = 0; 5799 ioarcb->read_data_transfer_length = 0; 5800 ioarcb->ioadl_len = 0; 5801 ioarcb->read_ioadl_len = 0; 5802 ioasa->hdr.ioasc = 0; 5803 ioasa->hdr.residual_data_len = 0; 5804 5805 if (ipr_cmd->ioa_cfg->sis64) 5806 ioarcb->u.sis64_addr_data.data_ioadl_addr = 5807 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 5808 else { 5809 ioarcb->write_ioadl_addr = 5810 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 5811 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5812 } 5813 } 5814 5815 /** 5816 * ipr_erp_request_sense - Send request sense to a device 5817 * @ipr_cmd: ipr command struct 5818 * 5819 * This function sends a request sense to a device as a result 5820 * of a check condition. 5821 * 5822 * Return value: 5823 * nothing 5824 **/ 5825 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 5826 { 5827 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5828 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5829 5830 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5831 ipr_erp_done(ipr_cmd); 5832 return; 5833 } 5834 5835 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5836 5837 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; 5838 cmd_pkt->cdb[0] = REQUEST_SENSE; 5839 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; 5840 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; 5841 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5842 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5843 5844 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, 5845 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); 5846 5847 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5848 IPR_REQUEST_SENSE_TIMEOUT * 2); 5849 } 5850 5851 /** 5852 * ipr_erp_cancel_all - Send cancel all to a device 5853 * @ipr_cmd: ipr command struct 5854 * 5855 * This function sends a cancel all to a device to clear the 5856 * queue. If we are running TCQ on the device, QERR is set to 1, 5857 * which means all outstanding ops have been dropped on the floor. 5858 * Cancel all will return them to us. 5859 * 5860 * Return value: 5861 * nothing 5862 **/ 5863 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) 5864 { 5865 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5866 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5867 struct ipr_cmd_pkt *cmd_pkt; 5868 5869 res->in_erp = 1; 5870 5871 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5872 5873 if (!scsi_cmd->device->simple_tags) { 5874 ipr_erp_request_sense(ipr_cmd); 5875 return; 5876 } 5877 5878 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5879 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5880 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5881 5882 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, 5883 IPR_CANCEL_ALL_TIMEOUT); 5884 } 5885 5886 /** 5887 * ipr_dump_ioasa - Dump contents of IOASA 5888 * @ioa_cfg: ioa config struct 5889 * @ipr_cmd: ipr command struct 5890 * @res: resource entry struct 5891 * 5892 * This function is invoked by the interrupt handler when ops 5893 * fail. It will log the IOASA if appropriate. Only called 5894 * for GPDD ops. 5895 * 5896 * Return value: 5897 * none 5898 **/ 5899 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, 5900 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) 5901 { 5902 int i; 5903 u16 data_len; 5904 u32 ioasc, fd_ioasc; 5905 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5906 __be32 *ioasa_data = (__be32 *)ioasa; 5907 int error_index; 5908 5909 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; 5910 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; 5911 5912 if (0 == ioasc) 5913 return; 5914 5915 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 5916 return; 5917 5918 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) 5919 error_index = ipr_get_error(fd_ioasc); 5920 else 5921 error_index = ipr_get_error(ioasc); 5922 5923 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 5924 /* Don't log an error if the IOA already logged one */ 5925 if (ioasa->hdr.ilid != 0) 5926 return; 5927 5928 if (!ipr_is_gscsi(res)) 5929 return; 5930 5931 if (ipr_error_table[error_index].log_ioasa == 0) 5932 return; 5933 } 5934 5935 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 5936 5937 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); 5938 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) 5939 data_len = sizeof(struct ipr_ioasa64); 5940 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) 5941 data_len = sizeof(struct ipr_ioasa); 5942 5943 ipr_err("IOASA Dump:\n"); 5944 5945 for (i = 0; i < data_len / 4; i += 4) { 5946 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 5947 be32_to_cpu(ioasa_data[i]), 5948 be32_to_cpu(ioasa_data[i+1]), 5949 be32_to_cpu(ioasa_data[i+2]), 5950 be32_to_cpu(ioasa_data[i+3])); 5951 } 5952 } 5953 5954 /** 5955 * ipr_gen_sense - Generate SCSI sense data from an IOASA 5956 * @ioasa: IOASA 5957 * @sense_buf: sense data buffer 5958 * 5959 * Return value: 5960 * none 5961 **/ 5962 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) 5963 { 5964 u32 failing_lba; 5965 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 5966 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 5967 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5968 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); 5969 5970 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 5971 5972 if (ioasc >= IPR_FIRST_DRIVER_IOASC) 5973 return; 5974 5975 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; 5976 5977 if (ipr_is_vset_device(res) && 5978 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && 5979 ioasa->u.vset.failing_lba_hi != 0) { 5980 sense_buf[0] = 0x72; 5981 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); 5982 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); 5983 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); 5984 5985 sense_buf[7] = 12; 5986 sense_buf[8] = 0; 5987 sense_buf[9] = 0x0A; 5988 sense_buf[10] = 0x80; 5989 5990 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); 5991 5992 sense_buf[12] = (failing_lba & 0xff000000) >> 24; 5993 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; 5994 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; 5995 sense_buf[15] = failing_lba & 0x000000ff; 5996 5997 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 5998 5999 sense_buf[16] = (failing_lba & 0xff000000) >> 24; 6000 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; 6001 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; 6002 sense_buf[19] = failing_lba & 0x000000ff; 6003 } else { 6004 sense_buf[0] = 0x70; 6005 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); 6006 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); 6007 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); 6008 6009 /* Illegal request */ 6010 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 6011 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 6012 sense_buf[7] = 10; /* additional length */ 6013 6014 /* IOARCB was in error */ 6015 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) 6016 sense_buf[15] = 0xC0; 6017 else /* Parameter data was invalid */ 6018 sense_buf[15] = 0x80; 6019 6020 sense_buf[16] = 6021 ((IPR_FIELD_POINTER_MASK & 6022 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; 6023 sense_buf[17] = 6024 (IPR_FIELD_POINTER_MASK & 6025 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; 6026 } else { 6027 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 6028 if (ipr_is_vset_device(res)) 6029 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6030 else 6031 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); 6032 6033 sense_buf[0] |= 0x80; /* Or in the Valid bit */ 6034 sense_buf[3] = (failing_lba & 0xff000000) >> 24; 6035 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; 6036 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; 6037 sense_buf[6] = failing_lba & 0x000000ff; 6038 } 6039 6040 sense_buf[7] = 6; /* additional length */ 6041 } 6042 } 6043 } 6044 6045 /** 6046 * ipr_get_autosense - Copy autosense data to sense buffer 6047 * @ipr_cmd: ipr command struct 6048 * 6049 * This function copies the autosense buffer to the buffer 6050 * in the scsi_cmd, if there is autosense available. 6051 * 6052 * Return value: 6053 * 1 if autosense was available / 0 if not 6054 **/ 6055 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 6056 { 6057 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6058 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 6059 6060 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 6061 return 0; 6062 6063 if (ipr_cmd->ioa_cfg->sis64) 6064 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, 6065 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), 6066 SCSI_SENSE_BUFFERSIZE)); 6067 else 6068 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 6069 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 6070 SCSI_SENSE_BUFFERSIZE)); 6071 return 1; 6072 } 6073 6074 /** 6075 * ipr_erp_start - Process an error response for a SCSI op 6076 * @ioa_cfg: ioa config struct 6077 * @ipr_cmd: ipr command struct 6078 * 6079 * This function determines whether or not to initiate ERP 6080 * on the affected device. 6081 * 6082 * Return value: 6083 * nothing 6084 **/ 6085 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, 6086 struct ipr_cmnd *ipr_cmd) 6087 { 6088 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6089 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6090 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6091 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 6092 6093 if (!res) { 6094 ipr_scsi_eh_done(ipr_cmd); 6095 return; 6096 } 6097 6098 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) 6099 ipr_gen_sense(ipr_cmd); 6100 6101 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6102 6103 switch (masked_ioasc) { 6104 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 6105 if (ipr_is_naca_model(res)) 6106 scsi_cmd->result |= (DID_ABORT << 16); 6107 else 6108 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6109 break; 6110 case IPR_IOASC_IR_RESOURCE_HANDLE: 6111 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: 6112 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6113 break; 6114 case IPR_IOASC_HW_SEL_TIMEOUT: 6115 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6116 if (!ipr_is_naca_model(res)) 6117 res->needs_sync_complete = 1; 6118 break; 6119 case IPR_IOASC_SYNC_REQUIRED: 6120 if (!res->in_erp) 6121 res->needs_sync_complete = 1; 6122 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6123 break; 6124 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6125 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6126 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6127 break; 6128 case IPR_IOASC_BUS_WAS_RESET: 6129 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6130 /* 6131 * Report the bus reset and ask for a retry. The device 6132 * will give CC/UA the next command. 6133 */ 6134 if (!res->resetting_device) 6135 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); 6136 scsi_cmd->result |= (DID_ERROR << 16); 6137 if (!ipr_is_naca_model(res)) 6138 res->needs_sync_complete = 1; 6139 break; 6140 case IPR_IOASC_HW_DEV_BUS_STATUS: 6141 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); 6142 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { 6143 if (!ipr_get_autosense(ipr_cmd)) { 6144 if (!ipr_is_naca_model(res)) { 6145 ipr_erp_cancel_all(ipr_cmd); 6146 return; 6147 } 6148 } 6149 } 6150 if (!ipr_is_naca_model(res)) 6151 res->needs_sync_complete = 1; 6152 break; 6153 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 6154 break; 6155 default: 6156 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6157 scsi_cmd->result |= (DID_ERROR << 16); 6158 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 6159 res->needs_sync_complete = 1; 6160 break; 6161 } 6162 6163 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6164 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6165 scsi_cmd->scsi_done(scsi_cmd); 6166 } 6167 6168 /** 6169 * ipr_scsi_done - mid-layer done function 6170 * @ipr_cmd: ipr command struct 6171 * 6172 * This function is invoked by the interrupt handler for 6173 * ops generated by the SCSI mid-layer 6174 * 6175 * Return value: 6176 * none 6177 **/ 6178 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) 6179 { 6180 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6181 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6182 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6183 unsigned long hrrq_flags; 6184 6185 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6186 6187 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6188 scsi_dma_unmap(scsi_cmd); 6189 6190 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6191 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6192 scsi_cmd->scsi_done(scsi_cmd); 6193 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6194 } else { 6195 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6196 ipr_erp_start(ioa_cfg, ipr_cmd); 6197 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6198 } 6199 } 6200 6201 /** 6202 * ipr_queuecommand - Queue a mid-layer request 6203 * @shost: scsi host struct 6204 * @scsi_cmd: scsi command struct 6205 * 6206 * This function queues a request generated by the mid-layer. 6207 * 6208 * Return value: 6209 * 0 on success 6210 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 6211 * SCSI_MLQUEUE_HOST_BUSY if host is busy 6212 **/ 6213 static int ipr_queuecommand(struct Scsi_Host *shost, 6214 struct scsi_cmnd *scsi_cmd) 6215 { 6216 struct ipr_ioa_cfg *ioa_cfg; 6217 struct ipr_resource_entry *res; 6218 struct ipr_ioarcb *ioarcb; 6219 struct ipr_cmnd *ipr_cmd; 6220 unsigned long hrrq_flags, lock_flags; 6221 int rc; 6222 struct ipr_hrr_queue *hrrq; 6223 int hrrq_id; 6224 6225 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6226 6227 scsi_cmd->result = (DID_OK << 16); 6228 res = scsi_cmd->device->hostdata; 6229 6230 if (ipr_is_gata(res) && res->sata_port) { 6231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6232 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 6233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6234 return rc; 6235 } 6236 6237 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6238 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6239 6240 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6241 /* 6242 * We are currently blocking all devices due to a host reset 6243 * We have told the host to stop giving us new requests, but 6244 * ERP ops don't count. FIXME 6245 */ 6246 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { 6247 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6248 return SCSI_MLQUEUE_HOST_BUSY; 6249 } 6250 6251 /* 6252 * FIXME - Create scsi_set_host_offline interface 6253 * and the ioa_is_dead check can be removed 6254 */ 6255 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { 6256 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6257 goto err_nodev; 6258 } 6259 6260 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6261 if (ipr_cmd == NULL) { 6262 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6263 return SCSI_MLQUEUE_HOST_BUSY; 6264 } 6265 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6266 6267 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6268 ioarcb = &ipr_cmd->ioarcb; 6269 6270 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 6271 ipr_cmd->scsi_cmd = scsi_cmd; 6272 ipr_cmd->done = ipr_scsi_eh_done; 6273 6274 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 6275 if (scsi_cmd->underflow == 0) 6276 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6277 6278 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6279 if (ipr_is_gscsi(res) && res->reset_occurred) { 6280 res->reset_occurred = 0; 6281 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 6282 } 6283 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 6284 if (scsi_cmd->flags & SCMD_TAGGED) 6285 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; 6286 else 6287 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; 6288 } 6289 6290 if (scsi_cmd->cmnd[0] >= 0xC0 && 6291 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { 6292 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6293 } 6294 6295 if (ioa_cfg->sis64) 6296 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6297 else 6298 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 6299 6300 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6301 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { 6302 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6303 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6304 if (!rc) 6305 scsi_dma_unmap(scsi_cmd); 6306 return SCSI_MLQUEUE_HOST_BUSY; 6307 } 6308 6309 if (unlikely(hrrq->ioa_is_dead)) { 6310 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6311 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6312 scsi_dma_unmap(scsi_cmd); 6313 goto err_nodev; 6314 } 6315 6316 ioarcb->res_handle = res->res_handle; 6317 if (res->needs_sync_complete) { 6318 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 6319 res->needs_sync_complete = 0; 6320 } 6321 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); 6322 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6323 ipr_send_command(ipr_cmd); 6324 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6325 return 0; 6326 6327 err_nodev: 6328 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6329 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6330 scsi_cmd->result = (DID_NO_CONNECT << 16); 6331 scsi_cmd->scsi_done(scsi_cmd); 6332 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6333 return 0; 6334 } 6335 6336 /** 6337 * ipr_ioctl - IOCTL handler 6338 * @sdev: scsi device struct 6339 * @cmd: IOCTL cmd 6340 * @arg: IOCTL arg 6341 * 6342 * Return value: 6343 * 0 on success / other on failure 6344 **/ 6345 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 6346 { 6347 struct ipr_resource_entry *res; 6348 6349 res = (struct ipr_resource_entry *)sdev->hostdata; 6350 if (res && ipr_is_gata(res)) { 6351 if (cmd == HDIO_GET_IDENTITY) 6352 return -ENOTTY; 6353 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); 6354 } 6355 6356 return -EINVAL; 6357 } 6358 6359 /** 6360 * ipr_info - Get information about the card/driver 6361 * @scsi_host: scsi host struct 6362 * 6363 * Return value: 6364 * pointer to buffer with description string 6365 **/ 6366 static const char *ipr_ioa_info(struct Scsi_Host *host) 6367 { 6368 static char buffer[512]; 6369 struct ipr_ioa_cfg *ioa_cfg; 6370 unsigned long lock_flags = 0; 6371 6372 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; 6373 6374 spin_lock_irqsave(host->host_lock, lock_flags); 6375 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); 6376 spin_unlock_irqrestore(host->host_lock, lock_flags); 6377 6378 return buffer; 6379 } 6380 6381 static struct scsi_host_template driver_template = { 6382 .module = THIS_MODULE, 6383 .name = "IPR", 6384 .info = ipr_ioa_info, 6385 .ioctl = ipr_ioctl, 6386 .queuecommand = ipr_queuecommand, 6387 .eh_abort_handler = ipr_eh_abort, 6388 .eh_device_reset_handler = ipr_eh_dev_reset, 6389 .eh_host_reset_handler = ipr_eh_host_reset, 6390 .slave_alloc = ipr_slave_alloc, 6391 .slave_configure = ipr_slave_configure, 6392 .slave_destroy = ipr_slave_destroy, 6393 .scan_finished = ipr_scan_finished, 6394 .target_alloc = ipr_target_alloc, 6395 .target_destroy = ipr_target_destroy, 6396 .change_queue_depth = ipr_change_queue_depth, 6397 .bios_param = ipr_biosparam, 6398 .can_queue = IPR_MAX_COMMANDS, 6399 .this_id = -1, 6400 .sg_tablesize = IPR_MAX_SGLIST, 6401 .max_sectors = IPR_IOA_MAX_SECTORS, 6402 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 6403 .use_clustering = ENABLE_CLUSTERING, 6404 .shost_attrs = ipr_ioa_attrs, 6405 .sdev_attrs = ipr_dev_attrs, 6406 .proc_name = IPR_NAME, 6407 .use_blk_tags = 1, 6408 }; 6409 6410 /** 6411 * ipr_ata_phy_reset - libata phy_reset handler 6412 * @ap: ata port to reset 6413 * 6414 **/ 6415 static void ipr_ata_phy_reset(struct ata_port *ap) 6416 { 6417 unsigned long flags; 6418 struct ipr_sata_port *sata_port = ap->private_data; 6419 struct ipr_resource_entry *res = sata_port->res; 6420 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6421 int rc; 6422 6423 ENTER; 6424 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6425 while (ioa_cfg->in_reset_reload) { 6426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6427 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6428 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6429 } 6430 6431 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6432 goto out_unlock; 6433 6434 rc = ipr_device_reset(ioa_cfg, res); 6435 6436 if (rc) { 6437 ap->link.device[0].class = ATA_DEV_NONE; 6438 goto out_unlock; 6439 } 6440 6441 ap->link.device[0].class = res->ata_class; 6442 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) 6443 ap->link.device[0].class = ATA_DEV_NONE; 6444 6445 out_unlock: 6446 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6447 LEAVE; 6448 } 6449 6450 /** 6451 * ipr_ata_post_internal - Cleanup after an internal command 6452 * @qc: ATA queued command 6453 * 6454 * Return value: 6455 * none 6456 **/ 6457 static void ipr_ata_post_internal(struct ata_queued_cmd *qc) 6458 { 6459 struct ipr_sata_port *sata_port = qc->ap->private_data; 6460 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6461 struct ipr_cmnd *ipr_cmd; 6462 struct ipr_hrr_queue *hrrq; 6463 unsigned long flags; 6464 6465 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6466 while (ioa_cfg->in_reset_reload) { 6467 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6468 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6469 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6470 } 6471 6472 for_each_hrrq(hrrq, ioa_cfg) { 6473 spin_lock(&hrrq->_lock); 6474 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 6475 if (ipr_cmd->qc == qc) { 6476 ipr_device_reset(ioa_cfg, sata_port->res); 6477 break; 6478 } 6479 } 6480 spin_unlock(&hrrq->_lock); 6481 } 6482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6483 } 6484 6485 /** 6486 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure 6487 * @regs: destination 6488 * @tf: source ATA taskfile 6489 * 6490 * Return value: 6491 * none 6492 **/ 6493 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs, 6494 struct ata_taskfile *tf) 6495 { 6496 regs->feature = tf->feature; 6497 regs->nsect = tf->nsect; 6498 regs->lbal = tf->lbal; 6499 regs->lbam = tf->lbam; 6500 regs->lbah = tf->lbah; 6501 regs->device = tf->device; 6502 regs->command = tf->command; 6503 regs->hob_feature = tf->hob_feature; 6504 regs->hob_nsect = tf->hob_nsect; 6505 regs->hob_lbal = tf->hob_lbal; 6506 regs->hob_lbam = tf->hob_lbam; 6507 regs->hob_lbah = tf->hob_lbah; 6508 regs->ctl = tf->ctl; 6509 } 6510 6511 /** 6512 * ipr_sata_done - done function for SATA commands 6513 * @ipr_cmd: ipr command struct 6514 * 6515 * This function is invoked by the interrupt handler for 6516 * ops generated by the SCSI mid-layer to SATA devices 6517 * 6518 * Return value: 6519 * none 6520 **/ 6521 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) 6522 { 6523 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6524 struct ata_queued_cmd *qc = ipr_cmd->qc; 6525 struct ipr_sata_port *sata_port = qc->ap->private_data; 6526 struct ipr_resource_entry *res = sata_port->res; 6527 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6528 6529 spin_lock(&ipr_cmd->hrrq->_lock); 6530 if (ipr_cmd->ioa_cfg->sis64) 6531 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6532 sizeof(struct ipr_ioasa_gata)); 6533 else 6534 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 6535 sizeof(struct ipr_ioasa_gata)); 6536 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6537 6538 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 6539 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 6540 6541 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6542 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6543 else 6544 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6545 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6546 spin_unlock(&ipr_cmd->hrrq->_lock); 6547 ata_qc_complete(qc); 6548 } 6549 6550 /** 6551 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list 6552 * @ipr_cmd: ipr command struct 6553 * @qc: ATA queued command 6554 * 6555 **/ 6556 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, 6557 struct ata_queued_cmd *qc) 6558 { 6559 u32 ioadl_flags = 0; 6560 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6561 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; 6562 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6563 int len = qc->nbytes; 6564 struct scatterlist *sg; 6565 unsigned int si; 6566 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6567 6568 if (len == 0) 6569 return; 6570 6571 if (qc->dma_dir == DMA_TO_DEVICE) { 6572 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6573 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6574 } else if (qc->dma_dir == DMA_FROM_DEVICE) 6575 ioadl_flags = IPR_IOADL_FLAGS_READ; 6576 6577 ioarcb->data_transfer_length = cpu_to_be32(len); 6578 ioarcb->ioadl_len = 6579 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6580 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6581 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); 6582 6583 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6584 ioadl64->flags = cpu_to_be32(ioadl_flags); 6585 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); 6586 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); 6587 6588 last_ioadl64 = ioadl64; 6589 ioadl64++; 6590 } 6591 6592 if (likely(last_ioadl64)) 6593 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6594 } 6595 6596 /** 6597 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 6598 * @ipr_cmd: ipr command struct 6599 * @qc: ATA queued command 6600 * 6601 **/ 6602 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, 6603 struct ata_queued_cmd *qc) 6604 { 6605 u32 ioadl_flags = 0; 6606 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6607 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 6608 struct ipr_ioadl_desc *last_ioadl = NULL; 6609 int len = qc->nbytes; 6610 struct scatterlist *sg; 6611 unsigned int si; 6612 6613 if (len == 0) 6614 return; 6615 6616 if (qc->dma_dir == DMA_TO_DEVICE) { 6617 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6618 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6619 ioarcb->data_transfer_length = cpu_to_be32(len); 6620 ioarcb->ioadl_len = 6621 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6622 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 6623 ioadl_flags = IPR_IOADL_FLAGS_READ; 6624 ioarcb->read_data_transfer_length = cpu_to_be32(len); 6625 ioarcb->read_ioadl_len = 6626 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6627 } 6628 6629 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6630 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6631 ioadl->address = cpu_to_be32(sg_dma_address(sg)); 6632 6633 last_ioadl = ioadl; 6634 ioadl++; 6635 } 6636 6637 if (likely(last_ioadl)) 6638 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6639 } 6640 6641 /** 6642 * ipr_qc_defer - Get a free ipr_cmd 6643 * @qc: queued command 6644 * 6645 * Return value: 6646 * 0 if success 6647 **/ 6648 static int ipr_qc_defer(struct ata_queued_cmd *qc) 6649 { 6650 struct ata_port *ap = qc->ap; 6651 struct ipr_sata_port *sata_port = ap->private_data; 6652 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6653 struct ipr_cmnd *ipr_cmd; 6654 struct ipr_hrr_queue *hrrq; 6655 int hrrq_id; 6656 6657 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6658 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6659 6660 qc->lldd_task = NULL; 6661 spin_lock(&hrrq->_lock); 6662 if (unlikely(hrrq->ioa_is_dead)) { 6663 spin_unlock(&hrrq->_lock); 6664 return 0; 6665 } 6666 6667 if (unlikely(!hrrq->allow_cmds)) { 6668 spin_unlock(&hrrq->_lock); 6669 return ATA_DEFER_LINK; 6670 } 6671 6672 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6673 if (ipr_cmd == NULL) { 6674 spin_unlock(&hrrq->_lock); 6675 return ATA_DEFER_LINK; 6676 } 6677 6678 qc->lldd_task = ipr_cmd; 6679 spin_unlock(&hrrq->_lock); 6680 return 0; 6681 } 6682 6683 /** 6684 * ipr_qc_issue - Issue a SATA qc to a device 6685 * @qc: queued command 6686 * 6687 * Return value: 6688 * 0 if success 6689 **/ 6690 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) 6691 { 6692 struct ata_port *ap = qc->ap; 6693 struct ipr_sata_port *sata_port = ap->private_data; 6694 struct ipr_resource_entry *res = sata_port->res; 6695 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6696 struct ipr_cmnd *ipr_cmd; 6697 struct ipr_ioarcb *ioarcb; 6698 struct ipr_ioarcb_ata_regs *regs; 6699 6700 if (qc->lldd_task == NULL) 6701 ipr_qc_defer(qc); 6702 6703 ipr_cmd = qc->lldd_task; 6704 if (ipr_cmd == NULL) 6705 return AC_ERR_SYSTEM; 6706 6707 qc->lldd_task = NULL; 6708 spin_lock(&ipr_cmd->hrrq->_lock); 6709 if (unlikely(!ipr_cmd->hrrq->allow_cmds || 6710 ipr_cmd->hrrq->ioa_is_dead)) { 6711 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6712 spin_unlock(&ipr_cmd->hrrq->_lock); 6713 return AC_ERR_SYSTEM; 6714 } 6715 6716 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 6717 ioarcb = &ipr_cmd->ioarcb; 6718 6719 if (ioa_cfg->sis64) { 6720 regs = &ipr_cmd->i.ata_ioadl.regs; 6721 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 6722 } else 6723 regs = &ioarcb->u.add_data.u.regs; 6724 6725 memset(regs, 0, sizeof(*regs)); 6726 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 6727 6728 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 6729 ipr_cmd->qc = qc; 6730 ipr_cmd->done = ipr_sata_done; 6731 ipr_cmd->ioarcb.res_handle = res->res_handle; 6732 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 6733 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6734 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6735 ipr_cmd->dma_use_sg = qc->n_elem; 6736 6737 if (ioa_cfg->sis64) 6738 ipr_build_ata_ioadl64(ipr_cmd, qc); 6739 else 6740 ipr_build_ata_ioadl(ipr_cmd, qc); 6741 6742 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 6743 ipr_copy_sata_tf(regs, &qc->tf); 6744 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 6745 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6746 6747 switch (qc->tf.protocol) { 6748 case ATA_PROT_NODATA: 6749 case ATA_PROT_PIO: 6750 break; 6751 6752 case ATA_PROT_DMA: 6753 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 6754 break; 6755 6756 case ATAPI_PROT_PIO: 6757 case ATAPI_PROT_NODATA: 6758 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 6759 break; 6760 6761 case ATAPI_PROT_DMA: 6762 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 6763 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 6764 break; 6765 6766 default: 6767 WARN_ON(1); 6768 spin_unlock(&ipr_cmd->hrrq->_lock); 6769 return AC_ERR_INVALID; 6770 } 6771 6772 ipr_send_command(ipr_cmd); 6773 spin_unlock(&ipr_cmd->hrrq->_lock); 6774 6775 return 0; 6776 } 6777 6778 /** 6779 * ipr_qc_fill_rtf - Read result TF 6780 * @qc: ATA queued command 6781 * 6782 * Return value: 6783 * true 6784 **/ 6785 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) 6786 { 6787 struct ipr_sata_port *sata_port = qc->ap->private_data; 6788 struct ipr_ioasa_gata *g = &sata_port->ioasa; 6789 struct ata_taskfile *tf = &qc->result_tf; 6790 6791 tf->feature = g->error; 6792 tf->nsect = g->nsect; 6793 tf->lbal = g->lbal; 6794 tf->lbam = g->lbam; 6795 tf->lbah = g->lbah; 6796 tf->device = g->device; 6797 tf->command = g->status; 6798 tf->hob_nsect = g->hob_nsect; 6799 tf->hob_lbal = g->hob_lbal; 6800 tf->hob_lbam = g->hob_lbam; 6801 tf->hob_lbah = g->hob_lbah; 6802 6803 return true; 6804 } 6805 6806 static struct ata_port_operations ipr_sata_ops = { 6807 .phy_reset = ipr_ata_phy_reset, 6808 .hardreset = ipr_sata_reset, 6809 .post_internal_cmd = ipr_ata_post_internal, 6810 .qc_prep = ata_noop_qc_prep, 6811 .qc_defer = ipr_qc_defer, 6812 .qc_issue = ipr_qc_issue, 6813 .qc_fill_rtf = ipr_qc_fill_rtf, 6814 .port_start = ata_sas_port_start, 6815 .port_stop = ata_sas_port_stop 6816 }; 6817 6818 static struct ata_port_info sata_port_info = { 6819 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6820 .pio_mask = ATA_PIO4_ONLY, 6821 .mwdma_mask = ATA_MWDMA2, 6822 .udma_mask = ATA_UDMA6, 6823 .port_ops = &ipr_sata_ops 6824 }; 6825 6826 #ifdef CONFIG_PPC_PSERIES 6827 static const u16 ipr_blocked_processors[] = { 6828 PVR_NORTHSTAR, 6829 PVR_PULSAR, 6830 PVR_POWER4, 6831 PVR_ICESTAR, 6832 PVR_SSTAR, 6833 PVR_POWER4p, 6834 PVR_630, 6835 PVR_630p 6836 }; 6837 6838 /** 6839 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware 6840 * @ioa_cfg: ioa cfg struct 6841 * 6842 * Adapters that use Gemstone revision < 3.1 do not work reliably on 6843 * certain pSeries hardware. This function determines if the given 6844 * adapter is in one of these confgurations or not. 6845 * 6846 * Return value: 6847 * 1 if adapter is not supported / 0 if adapter is supported 6848 **/ 6849 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) 6850 { 6851 int i; 6852 6853 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { 6854 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { 6855 if (pvr_version_is(ipr_blocked_processors[i])) 6856 return 1; 6857 } 6858 } 6859 return 0; 6860 } 6861 #else 6862 #define ipr_invalid_adapter(ioa_cfg) 0 6863 #endif 6864 6865 /** 6866 * ipr_ioa_bringdown_done - IOA bring down completion. 6867 * @ipr_cmd: ipr command struct 6868 * 6869 * This function processes the completion of an adapter bring down. 6870 * It wakes any reset sleepers. 6871 * 6872 * Return value: 6873 * IPR_RC_JOB_RETURN 6874 **/ 6875 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 6876 { 6877 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6878 int i; 6879 6880 ENTER; 6881 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 6882 ipr_trace; 6883 spin_unlock_irq(ioa_cfg->host->host_lock); 6884 scsi_unblock_requests(ioa_cfg->host); 6885 spin_lock_irq(ioa_cfg->host->host_lock); 6886 } 6887 6888 ioa_cfg->in_reset_reload = 0; 6889 ioa_cfg->reset_retries = 0; 6890 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 6891 spin_lock(&ioa_cfg->hrrq[i]._lock); 6892 ioa_cfg->hrrq[i].ioa_is_dead = 1; 6893 spin_unlock(&ioa_cfg->hrrq[i]._lock); 6894 } 6895 wmb(); 6896 6897 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6898 wake_up_all(&ioa_cfg->reset_wait_q); 6899 LEAVE; 6900 6901 return IPR_RC_JOB_RETURN; 6902 } 6903 6904 /** 6905 * ipr_ioa_reset_done - IOA reset completion. 6906 * @ipr_cmd: ipr command struct 6907 * 6908 * This function processes the completion of an adapter reset. 6909 * It schedules any necessary mid-layer add/removes and 6910 * wakes any reset sleepers. 6911 * 6912 * Return value: 6913 * IPR_RC_JOB_RETURN 6914 **/ 6915 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) 6916 { 6917 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6918 struct ipr_resource_entry *res; 6919 struct ipr_hostrcb *hostrcb, *temp; 6920 int i = 0, j; 6921 6922 ENTER; 6923 ioa_cfg->in_reset_reload = 0; 6924 for (j = 0; j < ioa_cfg->hrrq_num; j++) { 6925 spin_lock(&ioa_cfg->hrrq[j]._lock); 6926 ioa_cfg->hrrq[j].allow_cmds = 1; 6927 spin_unlock(&ioa_cfg->hrrq[j]._lock); 6928 } 6929 wmb(); 6930 ioa_cfg->reset_cmd = NULL; 6931 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6932 6933 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 6934 if (res->add_to_ml || res->del_from_ml) { 6935 ipr_trace; 6936 break; 6937 } 6938 } 6939 schedule_work(&ioa_cfg->work_q); 6940 6941 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { 6942 list_del(&hostrcb->queue); 6943 if (i++ < IPR_NUM_LOG_HCAMS) 6944 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 6945 else 6946 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 6947 } 6948 6949 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); 6950 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 6951 6952 ioa_cfg->reset_retries = 0; 6953 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6954 wake_up_all(&ioa_cfg->reset_wait_q); 6955 6956 spin_unlock(ioa_cfg->host->host_lock); 6957 scsi_unblock_requests(ioa_cfg->host); 6958 spin_lock(ioa_cfg->host->host_lock); 6959 6960 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6961 scsi_block_requests(ioa_cfg->host); 6962 6963 schedule_work(&ioa_cfg->work_q); 6964 LEAVE; 6965 return IPR_RC_JOB_RETURN; 6966 } 6967 6968 /** 6969 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer 6970 * @supported_dev: supported device struct 6971 * @vpids: vendor product id struct 6972 * 6973 * Return value: 6974 * none 6975 **/ 6976 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, 6977 struct ipr_std_inq_vpids *vpids) 6978 { 6979 memset(supported_dev, 0, sizeof(struct ipr_supported_device)); 6980 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); 6981 supported_dev->num_records = 1; 6982 supported_dev->data_length = 6983 cpu_to_be16(sizeof(struct ipr_supported_device)); 6984 supported_dev->reserved = 0; 6985 } 6986 6987 /** 6988 * ipr_set_supported_devs - Send Set Supported Devices for a device 6989 * @ipr_cmd: ipr command struct 6990 * 6991 * This function sends a Set Supported Devices to the adapter 6992 * 6993 * Return value: 6994 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6995 **/ 6996 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) 6997 { 6998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6999 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 7000 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7001 struct ipr_resource_entry *res = ipr_cmd->u.res; 7002 7003 ipr_cmd->job_step = ipr_ioa_reset_done; 7004 7005 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { 7006 if (!ipr_is_scsi_disk(res)) 7007 continue; 7008 7009 ipr_cmd->u.res = res; 7010 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); 7011 7012 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7013 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7014 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7015 7016 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 7017 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; 7018 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 7019 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 7020 7021 ipr_init_ioadl(ipr_cmd, 7022 ioa_cfg->vpd_cbs_dma + 7023 offsetof(struct ipr_misc_cbs, supp_dev), 7024 sizeof(struct ipr_supported_device), 7025 IPR_IOADL_FLAGS_WRITE_LAST); 7026 7027 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7028 IPR_SET_SUP_DEVICE_TIMEOUT); 7029 7030 if (!ioa_cfg->sis64) 7031 ipr_cmd->job_step = ipr_set_supported_devs; 7032 LEAVE; 7033 return IPR_RC_JOB_RETURN; 7034 } 7035 7036 LEAVE; 7037 return IPR_RC_JOB_CONTINUE; 7038 } 7039 7040 /** 7041 * ipr_get_mode_page - Locate specified mode page 7042 * @mode_pages: mode page buffer 7043 * @page_code: page code to find 7044 * @len: minimum required length for mode page 7045 * 7046 * Return value: 7047 * pointer to mode page / NULL on failure 7048 **/ 7049 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, 7050 u32 page_code, u32 len) 7051 { 7052 struct ipr_mode_page_hdr *mode_hdr; 7053 u32 page_length; 7054 u32 length; 7055 7056 if (!mode_pages || (mode_pages->hdr.length == 0)) 7057 return NULL; 7058 7059 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; 7060 mode_hdr = (struct ipr_mode_page_hdr *) 7061 (mode_pages->data + mode_pages->hdr.block_desc_len); 7062 7063 while (length) { 7064 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { 7065 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) 7066 return mode_hdr; 7067 break; 7068 } else { 7069 page_length = (sizeof(struct ipr_mode_page_hdr) + 7070 mode_hdr->page_length); 7071 length -= page_length; 7072 mode_hdr = (struct ipr_mode_page_hdr *) 7073 ((unsigned long)mode_hdr + page_length); 7074 } 7075 } 7076 return NULL; 7077 } 7078 7079 /** 7080 * ipr_check_term_power - Check for term power errors 7081 * @ioa_cfg: ioa config struct 7082 * @mode_pages: IOAFP mode pages buffer 7083 * 7084 * Check the IOAFP's mode page 28 for term power errors 7085 * 7086 * Return value: 7087 * nothing 7088 **/ 7089 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, 7090 struct ipr_mode_pages *mode_pages) 7091 { 7092 int i; 7093 int entry_length; 7094 struct ipr_dev_bus_entry *bus; 7095 struct ipr_mode_page28 *mode_page; 7096 7097 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7098 sizeof(struct ipr_mode_page28)); 7099 7100 entry_length = mode_page->entry_length; 7101 7102 bus = mode_page->bus; 7103 7104 for (i = 0; i < mode_page->num_entries; i++) { 7105 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { 7106 dev_err(&ioa_cfg->pdev->dev, 7107 "Term power is absent on scsi bus %d\n", 7108 bus->res_addr.bus); 7109 } 7110 7111 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); 7112 } 7113 } 7114 7115 /** 7116 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table 7117 * @ioa_cfg: ioa config struct 7118 * 7119 * Looks through the config table checking for SES devices. If 7120 * the SES device is in the SES table indicating a maximum SCSI 7121 * bus speed, the speed is limited for the bus. 7122 * 7123 * Return value: 7124 * none 7125 **/ 7126 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) 7127 { 7128 u32 max_xfer_rate; 7129 int i; 7130 7131 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 7132 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, 7133 ioa_cfg->bus_attr[i].bus_width); 7134 7135 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) 7136 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; 7137 } 7138 } 7139 7140 /** 7141 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 7142 * @ioa_cfg: ioa config struct 7143 * @mode_pages: mode page 28 buffer 7144 * 7145 * Updates mode page 28 based on driver configuration 7146 * 7147 * Return value: 7148 * none 7149 **/ 7150 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, 7151 struct ipr_mode_pages *mode_pages) 7152 { 7153 int i, entry_length; 7154 struct ipr_dev_bus_entry *bus; 7155 struct ipr_bus_attributes *bus_attr; 7156 struct ipr_mode_page28 *mode_page; 7157 7158 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7159 sizeof(struct ipr_mode_page28)); 7160 7161 entry_length = mode_page->entry_length; 7162 7163 /* Loop for each device bus entry */ 7164 for (i = 0, bus = mode_page->bus; 7165 i < mode_page->num_entries; 7166 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { 7167 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { 7168 dev_err(&ioa_cfg->pdev->dev, 7169 "Invalid resource address reported: 0x%08X\n", 7170 IPR_GET_PHYS_LOC(bus->res_addr)); 7171 continue; 7172 } 7173 7174 bus_attr = &ioa_cfg->bus_attr[i]; 7175 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; 7176 bus->bus_width = bus_attr->bus_width; 7177 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); 7178 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; 7179 if (bus_attr->qas_enabled) 7180 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; 7181 else 7182 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; 7183 } 7184 } 7185 7186 /** 7187 * ipr_build_mode_select - Build a mode select command 7188 * @ipr_cmd: ipr command struct 7189 * @res_handle: resource handle to send command to 7190 * @parm: Byte 2 of Mode Sense command 7191 * @dma_addr: DMA buffer address 7192 * @xfer_len: data transfer length 7193 * 7194 * Return value: 7195 * none 7196 **/ 7197 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 7198 __be32 res_handle, u8 parm, 7199 dma_addr_t dma_addr, u8 xfer_len) 7200 { 7201 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7202 7203 ioarcb->res_handle = res_handle; 7204 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7205 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7206 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; 7207 ioarcb->cmd_pkt.cdb[1] = parm; 7208 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7209 7210 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); 7211 } 7212 7213 /** 7214 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA 7215 * @ipr_cmd: ipr command struct 7216 * 7217 * This function sets up the SCSI bus attributes and sends 7218 * a Mode Select for Page 28 to activate them. 7219 * 7220 * Return value: 7221 * IPR_RC_JOB_RETURN 7222 **/ 7223 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) 7224 { 7225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7226 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7227 int length; 7228 7229 ENTER; 7230 ipr_scsi_bus_speed_limit(ioa_cfg); 7231 ipr_check_term_power(ioa_cfg, mode_pages); 7232 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 7233 length = mode_pages->hdr.length + 1; 7234 mode_pages->hdr.length = 0; 7235 7236 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7237 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7238 length); 7239 7240 ipr_cmd->job_step = ipr_set_supported_devs; 7241 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7242 struct ipr_resource_entry, queue); 7243 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7244 7245 LEAVE; 7246 return IPR_RC_JOB_RETURN; 7247 } 7248 7249 /** 7250 * ipr_build_mode_sense - Builds a mode sense command 7251 * @ipr_cmd: ipr command struct 7252 * @res: resource entry struct 7253 * @parm: Byte 2 of mode sense command 7254 * @dma_addr: DMA address of mode sense buffer 7255 * @xfer_len: Size of DMA buffer 7256 * 7257 * Return value: 7258 * none 7259 **/ 7260 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 7261 __be32 res_handle, 7262 u8 parm, dma_addr_t dma_addr, u8 xfer_len) 7263 { 7264 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7265 7266 ioarcb->res_handle = res_handle; 7267 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; 7268 ioarcb->cmd_pkt.cdb[2] = parm; 7269 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7270 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7271 7272 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7273 } 7274 7275 /** 7276 * ipr_reset_cmd_failed - Handle failure of IOA reset command 7277 * @ipr_cmd: ipr command struct 7278 * 7279 * This function handles the failure of an IOA bringup command. 7280 * 7281 * Return value: 7282 * IPR_RC_JOB_RETURN 7283 **/ 7284 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 7285 { 7286 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7287 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7288 7289 dev_err(&ioa_cfg->pdev->dev, 7290 "0x%02X failed with IOASC: 0x%08X\n", 7291 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 7292 7293 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7294 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7295 return IPR_RC_JOB_RETURN; 7296 } 7297 7298 /** 7299 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense 7300 * @ipr_cmd: ipr command struct 7301 * 7302 * This function handles the failure of a Mode Sense to the IOAFP. 7303 * Some adapters do not handle all mode pages. 7304 * 7305 * Return value: 7306 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7307 **/ 7308 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 7309 { 7310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7311 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7312 7313 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7314 ipr_cmd->job_step = ipr_set_supported_devs; 7315 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7316 struct ipr_resource_entry, queue); 7317 return IPR_RC_JOB_CONTINUE; 7318 } 7319 7320 return ipr_reset_cmd_failed(ipr_cmd); 7321 } 7322 7323 /** 7324 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA 7325 * @ipr_cmd: ipr command struct 7326 * 7327 * This function send a Page 28 mode sense to the IOA to 7328 * retrieve SCSI bus attributes. 7329 * 7330 * Return value: 7331 * IPR_RC_JOB_RETURN 7332 **/ 7333 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) 7334 { 7335 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7336 7337 ENTER; 7338 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7339 0x28, ioa_cfg->vpd_cbs_dma + 7340 offsetof(struct ipr_misc_cbs, mode_pages), 7341 sizeof(struct ipr_mode_pages)); 7342 7343 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; 7344 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; 7345 7346 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7347 7348 LEAVE; 7349 return IPR_RC_JOB_RETURN; 7350 } 7351 7352 /** 7353 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA 7354 * @ipr_cmd: ipr command struct 7355 * 7356 * This function enables dual IOA RAID support if possible. 7357 * 7358 * Return value: 7359 * IPR_RC_JOB_RETURN 7360 **/ 7361 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) 7362 { 7363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7364 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7365 struct ipr_mode_page24 *mode_page; 7366 int length; 7367 7368 ENTER; 7369 mode_page = ipr_get_mode_page(mode_pages, 0x24, 7370 sizeof(struct ipr_mode_page24)); 7371 7372 if (mode_page) 7373 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; 7374 7375 length = mode_pages->hdr.length + 1; 7376 mode_pages->hdr.length = 0; 7377 7378 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7379 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7380 length); 7381 7382 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7383 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7384 7385 LEAVE; 7386 return IPR_RC_JOB_RETURN; 7387 } 7388 7389 /** 7390 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense 7391 * @ipr_cmd: ipr command struct 7392 * 7393 * This function handles the failure of a Mode Sense to the IOAFP. 7394 * Some adapters do not handle all mode pages. 7395 * 7396 * Return value: 7397 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7398 **/ 7399 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 7400 { 7401 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7402 7403 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7404 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7405 return IPR_RC_JOB_CONTINUE; 7406 } 7407 7408 return ipr_reset_cmd_failed(ipr_cmd); 7409 } 7410 7411 /** 7412 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA 7413 * @ipr_cmd: ipr command struct 7414 * 7415 * This function send a mode sense to the IOA to retrieve 7416 * the IOA Advanced Function Control mode page. 7417 * 7418 * Return value: 7419 * IPR_RC_JOB_RETURN 7420 **/ 7421 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) 7422 { 7423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7424 7425 ENTER; 7426 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7427 0x24, ioa_cfg->vpd_cbs_dma + 7428 offsetof(struct ipr_misc_cbs, mode_pages), 7429 sizeof(struct ipr_mode_pages)); 7430 7431 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; 7432 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; 7433 7434 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7435 7436 LEAVE; 7437 return IPR_RC_JOB_RETURN; 7438 } 7439 7440 /** 7441 * ipr_init_res_table - Initialize the resource table 7442 * @ipr_cmd: ipr command struct 7443 * 7444 * This function looks through the existing resource table, comparing 7445 * it with the config table. This function will take care of old/new 7446 * devices and schedule adding/removing them from the mid-layer 7447 * as appropriate. 7448 * 7449 * Return value: 7450 * IPR_RC_JOB_CONTINUE 7451 **/ 7452 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) 7453 { 7454 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7455 struct ipr_resource_entry *res, *temp; 7456 struct ipr_config_table_entry_wrapper cfgtew; 7457 int entries, found, flag, i; 7458 LIST_HEAD(old_res); 7459 7460 ENTER; 7461 if (ioa_cfg->sis64) 7462 flag = ioa_cfg->u.cfg_table64->hdr64.flags; 7463 else 7464 flag = ioa_cfg->u.cfg_table->hdr.flags; 7465 7466 if (flag & IPR_UCODE_DOWNLOAD_REQ) 7467 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 7468 7469 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 7470 list_move_tail(&res->queue, &old_res); 7471 7472 if (ioa_cfg->sis64) 7473 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); 7474 else 7475 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 7476 7477 for (i = 0; i < entries; i++) { 7478 if (ioa_cfg->sis64) 7479 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; 7480 else 7481 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; 7482 found = 0; 7483 7484 list_for_each_entry_safe(res, temp, &old_res, queue) { 7485 if (ipr_is_same_device(res, &cfgtew)) { 7486 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7487 found = 1; 7488 break; 7489 } 7490 } 7491 7492 if (!found) { 7493 if (list_empty(&ioa_cfg->free_res_q)) { 7494 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); 7495 break; 7496 } 7497 7498 found = 1; 7499 res = list_entry(ioa_cfg->free_res_q.next, 7500 struct ipr_resource_entry, queue); 7501 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7502 ipr_init_res_entry(res, &cfgtew); 7503 res->add_to_ml = 1; 7504 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) 7505 res->sdev->allow_restart = 1; 7506 7507 if (found) 7508 ipr_update_res_entry(res, &cfgtew); 7509 } 7510 7511 list_for_each_entry_safe(res, temp, &old_res, queue) { 7512 if (res->sdev) { 7513 res->del_from_ml = 1; 7514 res->res_handle = IPR_INVALID_RES_HANDLE; 7515 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7516 } 7517 } 7518 7519 list_for_each_entry_safe(res, temp, &old_res, queue) { 7520 ipr_clear_res_target(res); 7521 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 7522 } 7523 7524 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 7525 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 7526 else 7527 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7528 7529 LEAVE; 7530 return IPR_RC_JOB_CONTINUE; 7531 } 7532 7533 /** 7534 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. 7535 * @ipr_cmd: ipr command struct 7536 * 7537 * This function sends a Query IOA Configuration command 7538 * to the adapter to retrieve the IOA configuration table. 7539 * 7540 * Return value: 7541 * IPR_RC_JOB_RETURN 7542 **/ 7543 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) 7544 { 7545 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7546 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7547 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 7548 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7549 7550 ENTER; 7551 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) 7552 ioa_cfg->dual_raid = 1; 7553 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", 7554 ucode_vpd->major_release, ucode_vpd->card_type, 7555 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); 7556 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7557 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7558 7559 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 7560 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; 7561 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 7562 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 7563 7564 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, 7565 IPR_IOADL_FLAGS_READ_LAST); 7566 7567 ipr_cmd->job_step = ipr_init_res_table; 7568 7569 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7570 7571 LEAVE; 7572 return IPR_RC_JOB_RETURN; 7573 } 7574 7575 /** 7576 * ipr_ioafp_inquiry - Send an Inquiry to the adapter. 7577 * @ipr_cmd: ipr command struct 7578 * 7579 * This utility function sends an inquiry to the adapter. 7580 * 7581 * Return value: 7582 * none 7583 **/ 7584 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 7585 dma_addr_t dma_addr, u8 xfer_len) 7586 { 7587 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7588 7589 ENTER; 7590 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7591 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7592 7593 ioarcb->cmd_pkt.cdb[0] = INQUIRY; 7594 ioarcb->cmd_pkt.cdb[1] = flags; 7595 ioarcb->cmd_pkt.cdb[2] = page; 7596 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7597 7598 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7599 7600 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7601 LEAVE; 7602 } 7603 7604 /** 7605 * ipr_inquiry_page_supported - Is the given inquiry page supported 7606 * @page0: inquiry page 0 buffer 7607 * @page: page code. 7608 * 7609 * This function determines if the specified inquiry page is supported. 7610 * 7611 * Return value: 7612 * 1 if page is supported / 0 if not 7613 **/ 7614 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) 7615 { 7616 int i; 7617 7618 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) 7619 if (page0->page[i] == page) 7620 return 1; 7621 7622 return 0; 7623 } 7624 7625 /** 7626 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. 7627 * @ipr_cmd: ipr command struct 7628 * 7629 * This function sends a Page 0xD0 inquiry to the adapter 7630 * to retrieve adapter capabilities. 7631 * 7632 * Return value: 7633 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7634 **/ 7635 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) 7636 { 7637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7638 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 7639 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7640 7641 ENTER; 7642 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; 7643 memset(cap, 0, sizeof(*cap)); 7644 7645 if (ipr_inquiry_page_supported(page0, 0xD0)) { 7646 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, 7647 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), 7648 sizeof(struct ipr_inquiry_cap)); 7649 return IPR_RC_JOB_RETURN; 7650 } 7651 7652 LEAVE; 7653 return IPR_RC_JOB_CONTINUE; 7654 } 7655 7656 /** 7657 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. 7658 * @ipr_cmd: ipr command struct 7659 * 7660 * This function sends a Page 3 inquiry to the adapter 7661 * to retrieve software VPD information. 7662 * 7663 * Return value: 7664 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7665 **/ 7666 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 7667 { 7668 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7669 7670 ENTER; 7671 7672 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 7673 7674 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 7675 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), 7676 sizeof(struct ipr_inquiry_page3)); 7677 7678 LEAVE; 7679 return IPR_RC_JOB_RETURN; 7680 } 7681 7682 /** 7683 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. 7684 * @ipr_cmd: ipr command struct 7685 * 7686 * This function sends a Page 0 inquiry to the adapter 7687 * to retrieve supported inquiry pages. 7688 * 7689 * Return value: 7690 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7691 **/ 7692 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) 7693 { 7694 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7695 char type[5]; 7696 7697 ENTER; 7698 7699 /* Grab the type out of the VPD and store it away */ 7700 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); 7701 type[4] = '\0'; 7702 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 7703 7704 if (ipr_invalid_adapter(ioa_cfg)) { 7705 dev_err(&ioa_cfg->pdev->dev, 7706 "Adapter not supported in this hardware configuration.\n"); 7707 7708 if (!ipr_testmode) { 7709 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 7710 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7711 list_add_tail(&ipr_cmd->queue, 7712 &ioa_cfg->hrrq->hrrq_free_q); 7713 return IPR_RC_JOB_RETURN; 7714 } 7715 } 7716 7717 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 7718 7719 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 7720 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), 7721 sizeof(struct ipr_inquiry_page0)); 7722 7723 LEAVE; 7724 return IPR_RC_JOB_RETURN; 7725 } 7726 7727 /** 7728 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. 7729 * @ipr_cmd: ipr command struct 7730 * 7731 * This function sends a standard inquiry to the adapter. 7732 * 7733 * Return value: 7734 * IPR_RC_JOB_RETURN 7735 **/ 7736 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) 7737 { 7738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7739 7740 ENTER; 7741 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; 7742 7743 ipr_ioafp_inquiry(ipr_cmd, 0, 0, 7744 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), 7745 sizeof(struct ipr_ioa_vpd)); 7746 7747 LEAVE; 7748 return IPR_RC_JOB_RETURN; 7749 } 7750 7751 /** 7752 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. 7753 * @ipr_cmd: ipr command struct 7754 * 7755 * This function send an Identify Host Request Response Queue 7756 * command to establish the HRRQ with the adapter. 7757 * 7758 * Return value: 7759 * IPR_RC_JOB_RETURN 7760 **/ 7761 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) 7762 { 7763 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7764 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7765 struct ipr_hrr_queue *hrrq; 7766 7767 ENTER; 7768 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7769 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 7770 7771 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { 7772 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; 7773 7774 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 7775 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7776 7777 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7778 if (ioa_cfg->sis64) 7779 ioarcb->cmd_pkt.cdb[1] = 0x1; 7780 7781 if (ioa_cfg->nvectors == 1) 7782 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; 7783 else 7784 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; 7785 7786 ioarcb->cmd_pkt.cdb[2] = 7787 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; 7788 ioarcb->cmd_pkt.cdb[3] = 7789 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; 7790 ioarcb->cmd_pkt.cdb[4] = 7791 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; 7792 ioarcb->cmd_pkt.cdb[5] = 7793 ((u64) hrrq->host_rrq_dma) & 0xff; 7794 ioarcb->cmd_pkt.cdb[7] = 7795 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; 7796 ioarcb->cmd_pkt.cdb[8] = 7797 (sizeof(u32) * hrrq->size) & 0xff; 7798 7799 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 7800 ioarcb->cmd_pkt.cdb[9] = 7801 ioa_cfg->identify_hrrq_index; 7802 7803 if (ioa_cfg->sis64) { 7804 ioarcb->cmd_pkt.cdb[10] = 7805 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; 7806 ioarcb->cmd_pkt.cdb[11] = 7807 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; 7808 ioarcb->cmd_pkt.cdb[12] = 7809 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; 7810 ioarcb->cmd_pkt.cdb[13] = 7811 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; 7812 } 7813 7814 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 7815 ioarcb->cmd_pkt.cdb[14] = 7816 ioa_cfg->identify_hrrq_index; 7817 7818 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7819 IPR_INTERNAL_TIMEOUT); 7820 7821 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) 7822 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7823 7824 LEAVE; 7825 return IPR_RC_JOB_RETURN; 7826 } 7827 7828 LEAVE; 7829 return IPR_RC_JOB_CONTINUE; 7830 } 7831 7832 /** 7833 * ipr_reset_timer_done - Adapter reset timer function 7834 * @ipr_cmd: ipr command struct 7835 * 7836 * Description: This function is used in adapter reset processing 7837 * for timing events. If the reset_cmd pointer in the IOA 7838 * config struct is not this adapter's we are doing nested 7839 * resets and fail_all_ops will take care of freeing the 7840 * command block. 7841 * 7842 * Return value: 7843 * none 7844 **/ 7845 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) 7846 { 7847 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7848 unsigned long lock_flags = 0; 7849 7850 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7851 7852 if (ioa_cfg->reset_cmd == ipr_cmd) { 7853 list_del(&ipr_cmd->queue); 7854 ipr_cmd->done(ipr_cmd); 7855 } 7856 7857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7858 } 7859 7860 /** 7861 * ipr_reset_start_timer - Start a timer for adapter reset job 7862 * @ipr_cmd: ipr command struct 7863 * @timeout: timeout value 7864 * 7865 * Description: This function is used in adapter reset processing 7866 * for timing events. If the reset_cmd pointer in the IOA 7867 * config struct is not this adapter's we are doing nested 7868 * resets and fail_all_ops will take care of freeing the 7869 * command block. 7870 * 7871 * Return value: 7872 * none 7873 **/ 7874 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 7875 unsigned long timeout) 7876 { 7877 7878 ENTER; 7879 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 7880 ipr_cmd->done = ipr_reset_ioa_job; 7881 7882 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7883 ipr_cmd->timer.expires = jiffies + timeout; 7884 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; 7885 add_timer(&ipr_cmd->timer); 7886 } 7887 7888 /** 7889 * ipr_init_ioa_mem - Initialize ioa_cfg control block 7890 * @ioa_cfg: ioa cfg struct 7891 * 7892 * Return value: 7893 * nothing 7894 **/ 7895 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 7896 { 7897 struct ipr_hrr_queue *hrrq; 7898 7899 for_each_hrrq(hrrq, ioa_cfg) { 7900 spin_lock(&hrrq->_lock); 7901 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); 7902 7903 /* Initialize Host RRQ pointers */ 7904 hrrq->hrrq_start = hrrq->host_rrq; 7905 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; 7906 hrrq->hrrq_curr = hrrq->hrrq_start; 7907 hrrq->toggle_bit = 1; 7908 spin_unlock(&hrrq->_lock); 7909 } 7910 wmb(); 7911 7912 ioa_cfg->identify_hrrq_index = 0; 7913 if (ioa_cfg->hrrq_num == 1) 7914 atomic_set(&ioa_cfg->hrrq_index, 0); 7915 else 7916 atomic_set(&ioa_cfg->hrrq_index, 1); 7917 7918 /* Zero out config table */ 7919 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 7920 } 7921 7922 /** 7923 * ipr_reset_next_stage - Process IPL stage change based on feedback register. 7924 * @ipr_cmd: ipr command struct 7925 * 7926 * Return value: 7927 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7928 **/ 7929 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) 7930 { 7931 unsigned long stage, stage_time; 7932 u32 feedback; 7933 volatile u32 int_reg; 7934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7935 u64 maskval = 0; 7936 7937 feedback = readl(ioa_cfg->regs.init_feedback_reg); 7938 stage = feedback & IPR_IPL_INIT_STAGE_MASK; 7939 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; 7940 7941 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 7942 7943 /* sanity check the stage_time value */ 7944 if (stage_time == 0) 7945 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; 7946 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 7947 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 7948 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 7949 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 7950 7951 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { 7952 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); 7953 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7954 stage_time = ioa_cfg->transop_timeout; 7955 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7956 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 7957 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 7958 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7959 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7960 maskval = IPR_PCII_IPL_STAGE_CHANGE; 7961 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 7962 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 7963 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7964 return IPR_RC_JOB_CONTINUE; 7965 } 7966 } 7967 7968 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7969 ipr_cmd->timer.expires = jiffies + stage_time * HZ; 7970 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7971 ipr_cmd->done = ipr_reset_ioa_job; 7972 add_timer(&ipr_cmd->timer); 7973 7974 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 7975 7976 return IPR_RC_JOB_RETURN; 7977 } 7978 7979 /** 7980 * ipr_reset_enable_ioa - Enable the IOA following a reset. 7981 * @ipr_cmd: ipr command struct 7982 * 7983 * This function reinitializes some control blocks and 7984 * enables destructive diagnostics on the adapter. 7985 * 7986 * Return value: 7987 * IPR_RC_JOB_RETURN 7988 **/ 7989 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) 7990 { 7991 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7992 volatile u32 int_reg; 7993 volatile u64 maskval; 7994 int i; 7995 7996 ENTER; 7997 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7998 ipr_init_ioa_mem(ioa_cfg); 7999 8000 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8001 spin_lock(&ioa_cfg->hrrq[i]._lock); 8002 ioa_cfg->hrrq[i].allow_interrupts = 1; 8003 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8004 } 8005 wmb(); 8006 if (ioa_cfg->sis64) { 8007 /* Set the adapter to the correct endian mode. */ 8008 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8009 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8010 } 8011 8012 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8013 8014 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8015 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 8016 ioa_cfg->regs.clr_interrupt_mask_reg32); 8017 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8018 return IPR_RC_JOB_CONTINUE; 8019 } 8020 8021 /* Enable destructive diagnostics on IOA */ 8022 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 8023 8024 if (ioa_cfg->sis64) { 8025 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8026 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; 8027 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); 8028 } else 8029 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 8030 8031 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8032 8033 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 8034 8035 if (ioa_cfg->sis64) { 8036 ipr_cmd->job_step = ipr_reset_next_stage; 8037 return IPR_RC_JOB_CONTINUE; 8038 } 8039 8040 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 8041 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 8042 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 8043 ipr_cmd->done = ipr_reset_ioa_job; 8044 add_timer(&ipr_cmd->timer); 8045 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8046 8047 LEAVE; 8048 return IPR_RC_JOB_RETURN; 8049 } 8050 8051 /** 8052 * ipr_reset_wait_for_dump - Wait for a dump to timeout. 8053 * @ipr_cmd: ipr command struct 8054 * 8055 * This function is invoked when an adapter dump has run out 8056 * of processing time. 8057 * 8058 * Return value: 8059 * IPR_RC_JOB_CONTINUE 8060 **/ 8061 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) 8062 { 8063 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8064 8065 if (ioa_cfg->sdt_state == GET_DUMP) 8066 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8067 else if (ioa_cfg->sdt_state == READ_DUMP) 8068 ioa_cfg->sdt_state = ABORT_DUMP; 8069 8070 ioa_cfg->dump_timeout = 1; 8071 ipr_cmd->job_step = ipr_reset_alert; 8072 8073 return IPR_RC_JOB_CONTINUE; 8074 } 8075 8076 /** 8077 * ipr_unit_check_no_data - Log a unit check/no data error log 8078 * @ioa_cfg: ioa config struct 8079 * 8080 * Logs an error indicating the adapter unit checked, but for some 8081 * reason, we were unable to fetch the unit check buffer. 8082 * 8083 * Return value: 8084 * nothing 8085 **/ 8086 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) 8087 { 8088 ioa_cfg->errors_logged++; 8089 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); 8090 } 8091 8092 /** 8093 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA 8094 * @ioa_cfg: ioa config struct 8095 * 8096 * Fetches the unit check buffer from the adapter by clocking the data 8097 * through the mailbox register. 8098 * 8099 * Return value: 8100 * nothing 8101 **/ 8102 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) 8103 { 8104 unsigned long mailbox; 8105 struct ipr_hostrcb *hostrcb; 8106 struct ipr_uc_sdt sdt; 8107 int rc, length; 8108 u32 ioasc; 8109 8110 mailbox = readl(ioa_cfg->ioa_mailbox); 8111 8112 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { 8113 ipr_unit_check_no_data(ioa_cfg); 8114 return; 8115 } 8116 8117 memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); 8118 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 8119 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 8120 8121 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || 8122 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 8123 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 8124 ipr_unit_check_no_data(ioa_cfg); 8125 return; 8126 } 8127 8128 /* Find length of the first sdt entry (UC buffer) */ 8129 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) 8130 length = be32_to_cpu(sdt.entry[0].end_token); 8131 else 8132 length = (be32_to_cpu(sdt.entry[0].end_token) - 8133 be32_to_cpu(sdt.entry[0].start_token)) & 8134 IPR_FMT2_MBX_ADDR_MASK; 8135 8136 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 8137 struct ipr_hostrcb, queue); 8138 list_del(&hostrcb->queue); 8139 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 8140 8141 rc = ipr_get_ldump_data_section(ioa_cfg, 8142 be32_to_cpu(sdt.entry[0].start_token), 8143 (__be32 *)&hostrcb->hcam, 8144 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 8145 8146 if (!rc) { 8147 ipr_handle_log_data(ioa_cfg, hostrcb); 8148 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 8149 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 8150 ioa_cfg->sdt_state == GET_DUMP) 8151 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8152 } else 8153 ipr_unit_check_no_data(ioa_cfg); 8154 8155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 8156 } 8157 8158 /** 8159 * ipr_reset_get_unit_check_job - Call to get the unit check buffer. 8160 * @ipr_cmd: ipr command struct 8161 * 8162 * Description: This function will call to get the unit check buffer. 8163 * 8164 * Return value: 8165 * IPR_RC_JOB_RETURN 8166 **/ 8167 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) 8168 { 8169 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8170 8171 ENTER; 8172 ioa_cfg->ioa_unit_checked = 0; 8173 ipr_get_unit_check_buffer(ioa_cfg); 8174 ipr_cmd->job_step = ipr_reset_alert; 8175 ipr_reset_start_timer(ipr_cmd, 0); 8176 8177 LEAVE; 8178 return IPR_RC_JOB_RETURN; 8179 } 8180 8181 /** 8182 * ipr_reset_restore_cfg_space - Restore PCI config space. 8183 * @ipr_cmd: ipr command struct 8184 * 8185 * Description: This function restores the saved PCI config space of 8186 * the adapter, fails all outstanding ops back to the callers, and 8187 * fetches the dump/unit check if applicable to this reset. 8188 * 8189 * Return value: 8190 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8191 **/ 8192 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 8193 { 8194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8195 u32 int_reg; 8196 8197 ENTER; 8198 ioa_cfg->pdev->state_saved = true; 8199 pci_restore_state(ioa_cfg->pdev); 8200 8201 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 8202 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8203 return IPR_RC_JOB_CONTINUE; 8204 } 8205 8206 ipr_fail_all_ops(ioa_cfg); 8207 8208 if (ioa_cfg->sis64) { 8209 /* Set the adapter to the correct endian mode. */ 8210 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8211 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8212 } 8213 8214 if (ioa_cfg->ioa_unit_checked) { 8215 if (ioa_cfg->sis64) { 8216 ipr_cmd->job_step = ipr_reset_get_unit_check_job; 8217 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); 8218 return IPR_RC_JOB_RETURN; 8219 } else { 8220 ioa_cfg->ioa_unit_checked = 0; 8221 ipr_get_unit_check_buffer(ioa_cfg); 8222 ipr_cmd->job_step = ipr_reset_alert; 8223 ipr_reset_start_timer(ipr_cmd, 0); 8224 return IPR_RC_JOB_RETURN; 8225 } 8226 } 8227 8228 if (ioa_cfg->in_ioa_bringdown) { 8229 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8230 } else { 8231 ipr_cmd->job_step = ipr_reset_enable_ioa; 8232 8233 if (GET_DUMP == ioa_cfg->sdt_state) { 8234 ioa_cfg->sdt_state = READ_DUMP; 8235 ioa_cfg->dump_timeout = 0; 8236 if (ioa_cfg->sis64) 8237 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); 8238 else 8239 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); 8240 ipr_cmd->job_step = ipr_reset_wait_for_dump; 8241 schedule_work(&ioa_cfg->work_q); 8242 return IPR_RC_JOB_RETURN; 8243 } 8244 } 8245 8246 LEAVE; 8247 return IPR_RC_JOB_CONTINUE; 8248 } 8249 8250 /** 8251 * ipr_reset_bist_done - BIST has completed on the adapter. 8252 * @ipr_cmd: ipr command struct 8253 * 8254 * Description: Unblock config space and resume the reset process. 8255 * 8256 * Return value: 8257 * IPR_RC_JOB_CONTINUE 8258 **/ 8259 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) 8260 { 8261 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8262 8263 ENTER; 8264 if (ioa_cfg->cfg_locked) 8265 pci_cfg_access_unlock(ioa_cfg->pdev); 8266 ioa_cfg->cfg_locked = 0; 8267 ipr_cmd->job_step = ipr_reset_restore_cfg_space; 8268 LEAVE; 8269 return IPR_RC_JOB_CONTINUE; 8270 } 8271 8272 /** 8273 * ipr_reset_start_bist - Run BIST on the adapter. 8274 * @ipr_cmd: ipr command struct 8275 * 8276 * Description: This function runs BIST on the adapter, then delays 2 seconds. 8277 * 8278 * Return value: 8279 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8280 **/ 8281 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) 8282 { 8283 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8284 int rc = PCIBIOS_SUCCESSFUL; 8285 8286 ENTER; 8287 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) 8288 writel(IPR_UPROCI_SIS64_START_BIST, 8289 ioa_cfg->regs.set_uproc_interrupt_reg32); 8290 else 8291 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 8292 8293 if (rc == PCIBIOS_SUCCESSFUL) { 8294 ipr_cmd->job_step = ipr_reset_bist_done; 8295 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8296 rc = IPR_RC_JOB_RETURN; 8297 } else { 8298 if (ioa_cfg->cfg_locked) 8299 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); 8300 ioa_cfg->cfg_locked = 0; 8301 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8302 rc = IPR_RC_JOB_CONTINUE; 8303 } 8304 8305 LEAVE; 8306 return rc; 8307 } 8308 8309 /** 8310 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter 8311 * @ipr_cmd: ipr command struct 8312 * 8313 * Description: This clears PCI reset to the adapter and delays two seconds. 8314 * 8315 * Return value: 8316 * IPR_RC_JOB_RETURN 8317 **/ 8318 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) 8319 { 8320 ENTER; 8321 ipr_cmd->job_step = ipr_reset_bist_done; 8322 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8323 LEAVE; 8324 return IPR_RC_JOB_RETURN; 8325 } 8326 8327 /** 8328 * ipr_reset_reset_work - Pulse a PCIe fundamental reset 8329 * @work: work struct 8330 * 8331 * Description: This pulses warm reset to a slot. 8332 * 8333 **/ 8334 static void ipr_reset_reset_work(struct work_struct *work) 8335 { 8336 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); 8337 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8338 struct pci_dev *pdev = ioa_cfg->pdev; 8339 unsigned long lock_flags = 0; 8340 8341 ENTER; 8342 pci_set_pcie_reset_state(pdev, pcie_warm_reset); 8343 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); 8344 pci_set_pcie_reset_state(pdev, pcie_deassert_reset); 8345 8346 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8347 if (ioa_cfg->reset_cmd == ipr_cmd) 8348 ipr_reset_ioa_job(ipr_cmd); 8349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8350 LEAVE; 8351 } 8352 8353 /** 8354 * ipr_reset_slot_reset - Reset the PCI slot of the adapter. 8355 * @ipr_cmd: ipr command struct 8356 * 8357 * Description: This asserts PCI reset to the adapter. 8358 * 8359 * Return value: 8360 * IPR_RC_JOB_RETURN 8361 **/ 8362 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) 8363 { 8364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8365 8366 ENTER; 8367 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); 8368 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); 8369 ipr_cmd->job_step = ipr_reset_slot_reset_done; 8370 LEAVE; 8371 return IPR_RC_JOB_RETURN; 8372 } 8373 8374 /** 8375 * ipr_reset_block_config_access_wait - Wait for permission to block config access 8376 * @ipr_cmd: ipr command struct 8377 * 8378 * Description: This attempts to block config access to the IOA. 8379 * 8380 * Return value: 8381 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8382 **/ 8383 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) 8384 { 8385 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8386 int rc = IPR_RC_JOB_CONTINUE; 8387 8388 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { 8389 ioa_cfg->cfg_locked = 1; 8390 ipr_cmd->job_step = ioa_cfg->reset; 8391 } else { 8392 if (ipr_cmd->u.time_left) { 8393 rc = IPR_RC_JOB_RETURN; 8394 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8395 ipr_reset_start_timer(ipr_cmd, 8396 IPR_CHECK_FOR_RESET_TIMEOUT); 8397 } else { 8398 ipr_cmd->job_step = ioa_cfg->reset; 8399 dev_err(&ioa_cfg->pdev->dev, 8400 "Timed out waiting to lock config access. Resetting anyway.\n"); 8401 } 8402 } 8403 8404 return rc; 8405 } 8406 8407 /** 8408 * ipr_reset_block_config_access - Block config access to the IOA 8409 * @ipr_cmd: ipr command struct 8410 * 8411 * Description: This attempts to block config access to the IOA 8412 * 8413 * Return value: 8414 * IPR_RC_JOB_CONTINUE 8415 **/ 8416 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) 8417 { 8418 ipr_cmd->ioa_cfg->cfg_locked = 0; 8419 ipr_cmd->job_step = ipr_reset_block_config_access_wait; 8420 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8421 return IPR_RC_JOB_CONTINUE; 8422 } 8423 8424 /** 8425 * ipr_reset_allowed - Query whether or not IOA can be reset 8426 * @ioa_cfg: ioa config struct 8427 * 8428 * Return value: 8429 * 0 if reset not allowed / non-zero if reset is allowed 8430 **/ 8431 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) 8432 { 8433 volatile u32 temp_reg; 8434 8435 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8436 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); 8437 } 8438 8439 /** 8440 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. 8441 * @ipr_cmd: ipr command struct 8442 * 8443 * Description: This function waits for adapter permission to run BIST, 8444 * then runs BIST. If the adapter does not give permission after a 8445 * reasonable time, we will reset the adapter anyway. The impact of 8446 * resetting the adapter without warning the adapter is the risk of 8447 * losing the persistent error log on the adapter. If the adapter is 8448 * reset while it is writing to the flash on the adapter, the flash 8449 * segment will have bad ECC and be zeroed. 8450 * 8451 * Return value: 8452 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8453 **/ 8454 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) 8455 { 8456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8457 int rc = IPR_RC_JOB_RETURN; 8458 8459 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { 8460 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8461 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8462 } else { 8463 ipr_cmd->job_step = ipr_reset_block_config_access; 8464 rc = IPR_RC_JOB_CONTINUE; 8465 } 8466 8467 return rc; 8468 } 8469 8470 /** 8471 * ipr_reset_alert - Alert the adapter of a pending reset 8472 * @ipr_cmd: ipr command struct 8473 * 8474 * Description: This function alerts the adapter that it will be reset. 8475 * If memory space is not currently enabled, proceed directly 8476 * to running BIST on the adapter. The timer must always be started 8477 * so we guarantee we do not run BIST from ipr_isr. 8478 * 8479 * Return value: 8480 * IPR_RC_JOB_RETURN 8481 **/ 8482 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) 8483 { 8484 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8485 u16 cmd_reg; 8486 int rc; 8487 8488 ENTER; 8489 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); 8490 8491 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 8492 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 8493 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); 8494 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 8495 } else { 8496 ipr_cmd->job_step = ipr_reset_block_config_access; 8497 } 8498 8499 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8500 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8501 8502 LEAVE; 8503 return IPR_RC_JOB_RETURN; 8504 } 8505 8506 /** 8507 * ipr_reset_quiesce_done - Complete IOA disconnect 8508 * @ipr_cmd: ipr command struct 8509 * 8510 * Description: Freeze the adapter to complete quiesce processing 8511 * 8512 * Return value: 8513 * IPR_RC_JOB_CONTINUE 8514 **/ 8515 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) 8516 { 8517 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8518 8519 ENTER; 8520 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8521 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8522 LEAVE; 8523 return IPR_RC_JOB_CONTINUE; 8524 } 8525 8526 /** 8527 * ipr_reset_cancel_hcam_done - Check for outstanding commands 8528 * @ipr_cmd: ipr command struct 8529 * 8530 * Description: Ensure nothing is outstanding to the IOA and 8531 * proceed with IOA disconnect. Otherwise reset the IOA. 8532 * 8533 * Return value: 8534 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE 8535 **/ 8536 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) 8537 { 8538 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8539 struct ipr_cmnd *loop_cmd; 8540 struct ipr_hrr_queue *hrrq; 8541 int rc = IPR_RC_JOB_CONTINUE; 8542 int count = 0; 8543 8544 ENTER; 8545 ipr_cmd->job_step = ipr_reset_quiesce_done; 8546 8547 for_each_hrrq(hrrq, ioa_cfg) { 8548 spin_lock(&hrrq->_lock); 8549 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { 8550 count++; 8551 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8552 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 8553 rc = IPR_RC_JOB_RETURN; 8554 break; 8555 } 8556 spin_unlock(&hrrq->_lock); 8557 8558 if (count) 8559 break; 8560 } 8561 8562 LEAVE; 8563 return rc; 8564 } 8565 8566 /** 8567 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs 8568 * @ipr_cmd: ipr command struct 8569 * 8570 * Description: Cancel any oustanding HCAMs to the IOA. 8571 * 8572 * Return value: 8573 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8574 **/ 8575 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) 8576 { 8577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8578 int rc = IPR_RC_JOB_CONTINUE; 8579 struct ipr_cmd_pkt *cmd_pkt; 8580 struct ipr_cmnd *hcam_cmd; 8581 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; 8582 8583 ENTER; 8584 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; 8585 8586 if (!hrrq->ioa_is_dead) { 8587 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { 8588 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { 8589 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) 8590 continue; 8591 8592 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8593 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8594 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 8595 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 8596 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; 8597 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; 8598 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; 8599 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; 8600 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; 8601 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; 8602 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; 8603 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; 8604 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; 8605 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; 8606 8607 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 8608 IPR_CANCEL_TIMEOUT); 8609 8610 rc = IPR_RC_JOB_RETURN; 8611 ipr_cmd->job_step = ipr_reset_cancel_hcam; 8612 break; 8613 } 8614 } 8615 } else 8616 ipr_cmd->job_step = ipr_reset_alert; 8617 8618 LEAVE; 8619 return rc; 8620 } 8621 8622 /** 8623 * ipr_reset_ucode_download_done - Microcode download completion 8624 * @ipr_cmd: ipr command struct 8625 * 8626 * Description: This function unmaps the microcode download buffer. 8627 * 8628 * Return value: 8629 * IPR_RC_JOB_CONTINUE 8630 **/ 8631 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) 8632 { 8633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8634 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 8635 8636 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, 8637 sglist->num_sg, DMA_TO_DEVICE); 8638 8639 ipr_cmd->job_step = ipr_reset_alert; 8640 return IPR_RC_JOB_CONTINUE; 8641 } 8642 8643 /** 8644 * ipr_reset_ucode_download - Download microcode to the adapter 8645 * @ipr_cmd: ipr command struct 8646 * 8647 * Description: This function checks to see if it there is microcode 8648 * to download to the adapter. If there is, a download is performed. 8649 * 8650 * Return value: 8651 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8652 **/ 8653 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) 8654 { 8655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8656 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 8657 8658 ENTER; 8659 ipr_cmd->job_step = ipr_reset_alert; 8660 8661 if (!sglist) 8662 return IPR_RC_JOB_CONTINUE; 8663 8664 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8665 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 8666 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; 8667 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; 8668 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; 8669 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 8670 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 8671 8672 if (ioa_cfg->sis64) 8673 ipr_build_ucode_ioadl64(ipr_cmd, sglist); 8674 else 8675 ipr_build_ucode_ioadl(ipr_cmd, sglist); 8676 ipr_cmd->job_step = ipr_reset_ucode_download_done; 8677 8678 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 8679 IPR_WRITE_BUFFER_TIMEOUT); 8680 8681 LEAVE; 8682 return IPR_RC_JOB_RETURN; 8683 } 8684 8685 /** 8686 * ipr_reset_shutdown_ioa - Shutdown the adapter 8687 * @ipr_cmd: ipr command struct 8688 * 8689 * Description: This function issues an adapter shutdown of the 8690 * specified type to the specified adapter as part of the 8691 * adapter reset job. 8692 * 8693 * Return value: 8694 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8695 **/ 8696 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) 8697 { 8698 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8699 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; 8700 unsigned long timeout; 8701 int rc = IPR_RC_JOB_CONTINUE; 8702 8703 ENTER; 8704 if (shutdown_type == IPR_SHUTDOWN_QUIESCE) 8705 ipr_cmd->job_step = ipr_reset_cancel_hcam; 8706 else if (shutdown_type != IPR_SHUTDOWN_NONE && 8707 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 8708 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8709 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8710 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 8711 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; 8712 8713 if (shutdown_type == IPR_SHUTDOWN_NORMAL) 8714 timeout = IPR_SHUTDOWN_TIMEOUT; 8715 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) 8716 timeout = IPR_INTERNAL_TIMEOUT; 8717 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 8718 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; 8719 else 8720 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; 8721 8722 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); 8723 8724 rc = IPR_RC_JOB_RETURN; 8725 ipr_cmd->job_step = ipr_reset_ucode_download; 8726 } else 8727 ipr_cmd->job_step = ipr_reset_alert; 8728 8729 LEAVE; 8730 return rc; 8731 } 8732 8733 /** 8734 * ipr_reset_ioa_job - Adapter reset job 8735 * @ipr_cmd: ipr command struct 8736 * 8737 * Description: This function is the job router for the adapter reset job. 8738 * 8739 * Return value: 8740 * none 8741 **/ 8742 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) 8743 { 8744 u32 rc, ioasc; 8745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8746 8747 do { 8748 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 8749 8750 if (ioa_cfg->reset_cmd != ipr_cmd) { 8751 /* 8752 * We are doing nested adapter resets and this is 8753 * not the current reset job. 8754 */ 8755 list_add_tail(&ipr_cmd->queue, 8756 &ipr_cmd->hrrq->hrrq_free_q); 8757 return; 8758 } 8759 8760 if (IPR_IOASC_SENSE_KEY(ioasc)) { 8761 rc = ipr_cmd->job_step_failed(ipr_cmd); 8762 if (rc == IPR_RC_JOB_RETURN) 8763 return; 8764 } 8765 8766 ipr_reinit_ipr_cmnd(ipr_cmd); 8767 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; 8768 rc = ipr_cmd->job_step(ipr_cmd); 8769 } while (rc == IPR_RC_JOB_CONTINUE); 8770 } 8771 8772 /** 8773 * _ipr_initiate_ioa_reset - Initiate an adapter reset 8774 * @ioa_cfg: ioa config struct 8775 * @job_step: first job step of reset job 8776 * @shutdown_type: shutdown type 8777 * 8778 * Description: This function will initiate the reset of the given adapter 8779 * starting at the selected job step. 8780 * If the caller needs to wait on the completion of the reset, 8781 * the caller must sleep on the reset_wait_q. 8782 * 8783 * Return value: 8784 * none 8785 **/ 8786 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8787 int (*job_step) (struct ipr_cmnd *), 8788 enum ipr_shutdown_type shutdown_type) 8789 { 8790 struct ipr_cmnd *ipr_cmd; 8791 int i; 8792 8793 ioa_cfg->in_reset_reload = 1; 8794 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8795 spin_lock(&ioa_cfg->hrrq[i]._lock); 8796 ioa_cfg->hrrq[i].allow_cmds = 0; 8797 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8798 } 8799 wmb(); 8800 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) 8801 scsi_block_requests(ioa_cfg->host); 8802 8803 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 8804 ioa_cfg->reset_cmd = ipr_cmd; 8805 ipr_cmd->job_step = job_step; 8806 ipr_cmd->u.shutdown_type = shutdown_type; 8807 8808 ipr_reset_ioa_job(ipr_cmd); 8809 } 8810 8811 /** 8812 * ipr_initiate_ioa_reset - Initiate an adapter reset 8813 * @ioa_cfg: ioa config struct 8814 * @shutdown_type: shutdown type 8815 * 8816 * Description: This function will initiate the reset of the given adapter. 8817 * If the caller needs to wait on the completion of the reset, 8818 * the caller must sleep on the reset_wait_q. 8819 * 8820 * Return value: 8821 * none 8822 **/ 8823 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8824 enum ipr_shutdown_type shutdown_type) 8825 { 8826 int i; 8827 8828 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 8829 return; 8830 8831 if (ioa_cfg->in_reset_reload) { 8832 if (ioa_cfg->sdt_state == GET_DUMP) 8833 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8834 else if (ioa_cfg->sdt_state == READ_DUMP) 8835 ioa_cfg->sdt_state = ABORT_DUMP; 8836 } 8837 8838 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { 8839 dev_err(&ioa_cfg->pdev->dev, 8840 "IOA taken offline - error recovery failed\n"); 8841 8842 ioa_cfg->reset_retries = 0; 8843 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8844 spin_lock(&ioa_cfg->hrrq[i]._lock); 8845 ioa_cfg->hrrq[i].ioa_is_dead = 1; 8846 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8847 } 8848 wmb(); 8849 8850 if (ioa_cfg->in_ioa_bringdown) { 8851 ioa_cfg->reset_cmd = NULL; 8852 ioa_cfg->in_reset_reload = 0; 8853 ipr_fail_all_ops(ioa_cfg); 8854 wake_up_all(&ioa_cfg->reset_wait_q); 8855 8856 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 8857 spin_unlock_irq(ioa_cfg->host->host_lock); 8858 scsi_unblock_requests(ioa_cfg->host); 8859 spin_lock_irq(ioa_cfg->host->host_lock); 8860 } 8861 return; 8862 } else { 8863 ioa_cfg->in_ioa_bringdown = 1; 8864 shutdown_type = IPR_SHUTDOWN_NONE; 8865 } 8866 } 8867 8868 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, 8869 shutdown_type); 8870 } 8871 8872 /** 8873 * ipr_reset_freeze - Hold off all I/O activity 8874 * @ipr_cmd: ipr command struct 8875 * 8876 * Description: If the PCI slot is frozen, hold off all I/O 8877 * activity; then, as soon as the slot is available again, 8878 * initiate an adapter reset. 8879 */ 8880 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 8881 { 8882 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8883 int i; 8884 8885 /* Disallow new interrupts, avoid loop */ 8886 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8887 spin_lock(&ioa_cfg->hrrq[i]._lock); 8888 ioa_cfg->hrrq[i].allow_interrupts = 0; 8889 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8890 } 8891 wmb(); 8892 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8893 ipr_cmd->done = ipr_reset_ioa_job; 8894 return IPR_RC_JOB_RETURN; 8895 } 8896 8897 /** 8898 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled 8899 * @pdev: PCI device struct 8900 * 8901 * Description: This routine is called to tell us that the MMIO 8902 * access to the IOA has been restored 8903 */ 8904 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) 8905 { 8906 unsigned long flags = 0; 8907 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8908 8909 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8910 if (!ioa_cfg->probe_done) 8911 pci_save_state(pdev); 8912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8913 return PCI_ERS_RESULT_NEED_RESET; 8914 } 8915 8916 /** 8917 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 8918 * @pdev: PCI device struct 8919 * 8920 * Description: This routine is called to tell us that the PCI bus 8921 * is down. Can't do anything here, except put the device driver 8922 * into a holding pattern, waiting for the PCI bus to come back. 8923 */ 8924 static void ipr_pci_frozen(struct pci_dev *pdev) 8925 { 8926 unsigned long flags = 0; 8927 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8928 8929 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8930 if (ioa_cfg->probe_done) 8931 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 8932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8933 } 8934 8935 /** 8936 * ipr_pci_slot_reset - Called when PCI slot has been reset. 8937 * @pdev: PCI device struct 8938 * 8939 * Description: This routine is called by the pci error recovery 8940 * code after the PCI slot has been reset, just before we 8941 * should resume normal operations. 8942 */ 8943 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) 8944 { 8945 unsigned long flags = 0; 8946 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8947 8948 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8949 if (ioa_cfg->probe_done) { 8950 if (ioa_cfg->needs_warm_reset) 8951 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8952 else 8953 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 8954 IPR_SHUTDOWN_NONE); 8955 } else 8956 wake_up_all(&ioa_cfg->eeh_wait_q); 8957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8958 return PCI_ERS_RESULT_RECOVERED; 8959 } 8960 8961 /** 8962 * ipr_pci_perm_failure - Called when PCI slot is dead for good. 8963 * @pdev: PCI device struct 8964 * 8965 * Description: This routine is called when the PCI bus has 8966 * permanently failed. 8967 */ 8968 static void ipr_pci_perm_failure(struct pci_dev *pdev) 8969 { 8970 unsigned long flags = 0; 8971 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8972 int i; 8973 8974 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8975 if (ioa_cfg->probe_done) { 8976 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8977 ioa_cfg->sdt_state = ABORT_DUMP; 8978 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; 8979 ioa_cfg->in_ioa_bringdown = 1; 8980 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8981 spin_lock(&ioa_cfg->hrrq[i]._lock); 8982 ioa_cfg->hrrq[i].allow_cmds = 0; 8983 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8984 } 8985 wmb(); 8986 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8987 } else 8988 wake_up_all(&ioa_cfg->eeh_wait_q); 8989 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8990 } 8991 8992 /** 8993 * ipr_pci_error_detected - Called when a PCI error is detected. 8994 * @pdev: PCI device struct 8995 * @state: PCI channel state 8996 * 8997 * Description: Called when a PCI error is detected. 8998 * 8999 * Return value: 9000 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 9001 */ 9002 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, 9003 pci_channel_state_t state) 9004 { 9005 switch (state) { 9006 case pci_channel_io_frozen: 9007 ipr_pci_frozen(pdev); 9008 return PCI_ERS_RESULT_CAN_RECOVER; 9009 case pci_channel_io_perm_failure: 9010 ipr_pci_perm_failure(pdev); 9011 return PCI_ERS_RESULT_DISCONNECT; 9012 break; 9013 default: 9014 break; 9015 } 9016 return PCI_ERS_RESULT_NEED_RESET; 9017 } 9018 9019 /** 9020 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) 9021 * @ioa_cfg: ioa cfg struct 9022 * 9023 * Description: This is the second phase of adapter intialization 9024 * This function takes care of initilizing the adapter to the point 9025 * where it can accept new commands. 9026 9027 * Return value: 9028 * 0 on success / -EIO on failure 9029 **/ 9030 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) 9031 { 9032 int rc = 0; 9033 unsigned long host_lock_flags = 0; 9034 9035 ENTER; 9036 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9037 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 9038 ioa_cfg->probe_done = 1; 9039 if (ioa_cfg->needs_hard_reset) { 9040 ioa_cfg->needs_hard_reset = 0; 9041 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9042 } else 9043 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 9044 IPR_SHUTDOWN_NONE); 9045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9046 9047 LEAVE; 9048 return rc; 9049 } 9050 9051 /** 9052 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter 9053 * @ioa_cfg: ioa config struct 9054 * 9055 * Return value: 9056 * none 9057 **/ 9058 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9059 { 9060 int i; 9061 9062 if (ioa_cfg->ipr_cmnd_list) { 9063 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9064 if (ioa_cfg->ipr_cmnd_list[i]) 9065 dma_pool_free(ioa_cfg->ipr_cmd_pool, 9066 ioa_cfg->ipr_cmnd_list[i], 9067 ioa_cfg->ipr_cmnd_list_dma[i]); 9068 9069 ioa_cfg->ipr_cmnd_list[i] = NULL; 9070 } 9071 } 9072 9073 if (ioa_cfg->ipr_cmd_pool) 9074 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); 9075 9076 kfree(ioa_cfg->ipr_cmnd_list); 9077 kfree(ioa_cfg->ipr_cmnd_list_dma); 9078 ioa_cfg->ipr_cmnd_list = NULL; 9079 ioa_cfg->ipr_cmnd_list_dma = NULL; 9080 ioa_cfg->ipr_cmd_pool = NULL; 9081 } 9082 9083 /** 9084 * ipr_free_mem - Frees memory allocated for an adapter 9085 * @ioa_cfg: ioa cfg struct 9086 * 9087 * Return value: 9088 * nothing 9089 **/ 9090 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) 9091 { 9092 int i; 9093 9094 kfree(ioa_cfg->res_entries); 9095 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), 9096 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9097 ipr_free_cmd_blks(ioa_cfg); 9098 9099 for (i = 0; i < ioa_cfg->hrrq_num; i++) 9100 dma_free_coherent(&ioa_cfg->pdev->dev, 9101 sizeof(u32) * ioa_cfg->hrrq[i].size, 9102 ioa_cfg->hrrq[i].host_rrq, 9103 ioa_cfg->hrrq[i].host_rrq_dma); 9104 9105 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, 9106 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9107 9108 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9109 dma_free_coherent(&ioa_cfg->pdev->dev, 9110 sizeof(struct ipr_hostrcb), 9111 ioa_cfg->hostrcb[i], 9112 ioa_cfg->hostrcb_dma[i]); 9113 } 9114 9115 ipr_free_dump(ioa_cfg); 9116 kfree(ioa_cfg->trace); 9117 } 9118 9119 /** 9120 * ipr_free_irqs - Free all allocated IRQs for the adapter. 9121 * @ioa_cfg: ipr cfg struct 9122 * 9123 * This function frees all allocated IRQs for the 9124 * specified adapter. 9125 * 9126 * Return value: 9127 * none 9128 **/ 9129 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) 9130 { 9131 struct pci_dev *pdev = ioa_cfg->pdev; 9132 9133 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9134 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9135 int i; 9136 for (i = 0; i < ioa_cfg->nvectors; i++) 9137 free_irq(ioa_cfg->vectors_info[i].vec, 9138 &ioa_cfg->hrrq[i]); 9139 } else 9140 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); 9141 9142 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 9143 pci_disable_msi(pdev); 9144 ioa_cfg->intr_flag &= ~IPR_USE_MSI; 9145 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { 9146 pci_disable_msix(pdev); 9147 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; 9148 } 9149 } 9150 9151 /** 9152 * ipr_free_all_resources - Free all allocated resources for an adapter. 9153 * @ipr_cmd: ipr command struct 9154 * 9155 * This function frees all allocated resources for the 9156 * specified adapter. 9157 * 9158 * Return value: 9159 * none 9160 **/ 9161 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) 9162 { 9163 struct pci_dev *pdev = ioa_cfg->pdev; 9164 9165 ENTER; 9166 ipr_free_irqs(ioa_cfg); 9167 if (ioa_cfg->reset_work_q) 9168 destroy_workqueue(ioa_cfg->reset_work_q); 9169 iounmap(ioa_cfg->hdw_dma_regs); 9170 pci_release_regions(pdev); 9171 ipr_free_mem(ioa_cfg); 9172 scsi_host_put(ioa_cfg->host); 9173 pci_disable_device(pdev); 9174 LEAVE; 9175 } 9176 9177 /** 9178 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter 9179 * @ioa_cfg: ioa config struct 9180 * 9181 * Return value: 9182 * 0 on success / -ENOMEM on allocation failure 9183 **/ 9184 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9185 { 9186 struct ipr_cmnd *ipr_cmd; 9187 struct ipr_ioarcb *ioarcb; 9188 dma_addr_t dma_addr; 9189 int i, entries_each_hrrq, hrrq_id = 0; 9190 9191 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, 9192 sizeof(struct ipr_cmnd), 512, 0); 9193 9194 if (!ioa_cfg->ipr_cmd_pool) 9195 return -ENOMEM; 9196 9197 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); 9198 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); 9199 9200 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { 9201 ipr_free_cmd_blks(ioa_cfg); 9202 return -ENOMEM; 9203 } 9204 9205 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9206 if (ioa_cfg->hrrq_num > 1) { 9207 if (i == 0) { 9208 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; 9209 ioa_cfg->hrrq[i].min_cmd_id = 0; 9210 ioa_cfg->hrrq[i].max_cmd_id = 9211 (entries_each_hrrq - 1); 9212 } else { 9213 entries_each_hrrq = 9214 IPR_NUM_BASE_CMD_BLKS/ 9215 (ioa_cfg->hrrq_num - 1); 9216 ioa_cfg->hrrq[i].min_cmd_id = 9217 IPR_NUM_INTERNAL_CMD_BLKS + 9218 (i - 1) * entries_each_hrrq; 9219 ioa_cfg->hrrq[i].max_cmd_id = 9220 (IPR_NUM_INTERNAL_CMD_BLKS + 9221 i * entries_each_hrrq - 1); 9222 } 9223 } else { 9224 entries_each_hrrq = IPR_NUM_CMD_BLKS; 9225 ioa_cfg->hrrq[i].min_cmd_id = 0; 9226 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); 9227 } 9228 ioa_cfg->hrrq[i].size = entries_each_hrrq; 9229 } 9230 9231 BUG_ON(ioa_cfg->hrrq_num == 0); 9232 9233 i = IPR_NUM_CMD_BLKS - 9234 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; 9235 if (i > 0) { 9236 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; 9237 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; 9238 } 9239 9240 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9241 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 9242 9243 if (!ipr_cmd) { 9244 ipr_free_cmd_blks(ioa_cfg); 9245 return -ENOMEM; 9246 } 9247 9248 memset(ipr_cmd, 0, sizeof(*ipr_cmd)); 9249 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; 9250 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 9251 9252 ioarcb = &ipr_cmd->ioarcb; 9253 ipr_cmd->dma_addr = dma_addr; 9254 if (ioa_cfg->sis64) 9255 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); 9256 else 9257 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 9258 9259 ioarcb->host_response_handle = cpu_to_be32(i << 2); 9260 if (ioa_cfg->sis64) { 9261 ioarcb->u.sis64_addr_data.data_ioadl_addr = 9262 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 9263 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 9264 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); 9265 } else { 9266 ioarcb->write_ioadl_addr = 9267 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 9268 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 9269 ioarcb->ioasa_host_pci_addr = 9270 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); 9271 } 9272 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 9273 ipr_cmd->cmd_index = i; 9274 ipr_cmd->ioa_cfg = ioa_cfg; 9275 ipr_cmd->sense_buffer_dma = dma_addr + 9276 offsetof(struct ipr_cmnd, sense_buffer); 9277 9278 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; 9279 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; 9280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 9281 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) 9282 hrrq_id++; 9283 } 9284 9285 return 0; 9286 } 9287 9288 /** 9289 * ipr_alloc_mem - Allocate memory for an adapter 9290 * @ioa_cfg: ioa config struct 9291 * 9292 * Return value: 9293 * 0 on success / non-zero for error 9294 **/ 9295 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) 9296 { 9297 struct pci_dev *pdev = ioa_cfg->pdev; 9298 int i, rc = -ENOMEM; 9299 9300 ENTER; 9301 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 9302 ioa_cfg->max_devs_supported, GFP_KERNEL); 9303 9304 if (!ioa_cfg->res_entries) 9305 goto out; 9306 9307 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 9308 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 9309 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 9310 } 9311 9312 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, 9313 sizeof(struct ipr_misc_cbs), 9314 &ioa_cfg->vpd_cbs_dma, 9315 GFP_KERNEL); 9316 9317 if (!ioa_cfg->vpd_cbs) 9318 goto out_free_res_entries; 9319 9320 if (ipr_alloc_cmd_blks(ioa_cfg)) 9321 goto out_free_vpd_cbs; 9322 9323 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9324 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, 9325 sizeof(u32) * ioa_cfg->hrrq[i].size, 9326 &ioa_cfg->hrrq[i].host_rrq_dma, 9327 GFP_KERNEL); 9328 9329 if (!ioa_cfg->hrrq[i].host_rrq) { 9330 while (--i > 0) 9331 dma_free_coherent(&pdev->dev, 9332 sizeof(u32) * ioa_cfg->hrrq[i].size, 9333 ioa_cfg->hrrq[i].host_rrq, 9334 ioa_cfg->hrrq[i].host_rrq_dma); 9335 goto out_ipr_free_cmd_blocks; 9336 } 9337 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; 9338 } 9339 9340 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, 9341 ioa_cfg->cfg_table_size, 9342 &ioa_cfg->cfg_table_dma, 9343 GFP_KERNEL); 9344 9345 if (!ioa_cfg->u.cfg_table) 9346 goto out_free_host_rrq; 9347 9348 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9349 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, 9350 sizeof(struct ipr_hostrcb), 9351 &ioa_cfg->hostrcb_dma[i], 9352 GFP_KERNEL); 9353 9354 if (!ioa_cfg->hostrcb[i]) 9355 goto out_free_hostrcb_dma; 9356 9357 ioa_cfg->hostrcb[i]->hostrcb_dma = 9358 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 9359 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; 9360 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 9361 } 9362 9363 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * 9364 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); 9365 9366 if (!ioa_cfg->trace) 9367 goto out_free_hostrcb_dma; 9368 9369 rc = 0; 9370 out: 9371 LEAVE; 9372 return rc; 9373 9374 out_free_hostrcb_dma: 9375 while (i-- > 0) { 9376 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), 9377 ioa_cfg->hostrcb[i], 9378 ioa_cfg->hostrcb_dma[i]); 9379 } 9380 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, 9381 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9382 out_free_host_rrq: 9383 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9384 dma_free_coherent(&pdev->dev, 9385 sizeof(u32) * ioa_cfg->hrrq[i].size, 9386 ioa_cfg->hrrq[i].host_rrq, 9387 ioa_cfg->hrrq[i].host_rrq_dma); 9388 } 9389 out_ipr_free_cmd_blocks: 9390 ipr_free_cmd_blks(ioa_cfg); 9391 out_free_vpd_cbs: 9392 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), 9393 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9394 out_free_res_entries: 9395 kfree(ioa_cfg->res_entries); 9396 goto out; 9397 } 9398 9399 /** 9400 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values 9401 * @ioa_cfg: ioa config struct 9402 * 9403 * Return value: 9404 * none 9405 **/ 9406 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) 9407 { 9408 int i; 9409 9410 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 9411 ioa_cfg->bus_attr[i].bus = i; 9412 ioa_cfg->bus_attr[i].qas_enabled = 0; 9413 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; 9414 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) 9415 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; 9416 else 9417 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; 9418 } 9419 } 9420 9421 /** 9422 * ipr_init_regs - Initialize IOA registers 9423 * @ioa_cfg: ioa config struct 9424 * 9425 * Return value: 9426 * none 9427 **/ 9428 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) 9429 { 9430 const struct ipr_interrupt_offsets *p; 9431 struct ipr_interrupts *t; 9432 void __iomem *base; 9433 9434 p = &ioa_cfg->chip_cfg->regs; 9435 t = &ioa_cfg->regs; 9436 base = ioa_cfg->hdw_dma_regs; 9437 9438 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 9439 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 9440 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 9441 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 9442 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; 9443 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 9444 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; 9445 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 9446 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; 9447 t->ioarrin_reg = base + p->ioarrin_reg; 9448 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 9449 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; 9450 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 9451 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; 9452 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 9453 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; 9454 9455 if (ioa_cfg->sis64) { 9456 t->init_feedback_reg = base + p->init_feedback_reg; 9457 t->dump_addr_reg = base + p->dump_addr_reg; 9458 t->dump_data_reg = base + p->dump_data_reg; 9459 t->endian_swap_reg = base + p->endian_swap_reg; 9460 } 9461 } 9462 9463 /** 9464 * ipr_init_ioa_cfg - Initialize IOA config struct 9465 * @ioa_cfg: ioa config struct 9466 * @host: scsi host struct 9467 * @pdev: PCI dev struct 9468 * 9469 * Return value: 9470 * none 9471 **/ 9472 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 9473 struct Scsi_Host *host, struct pci_dev *pdev) 9474 { 9475 int i; 9476 9477 ioa_cfg->host = host; 9478 ioa_cfg->pdev = pdev; 9479 ioa_cfg->log_level = ipr_log_level; 9480 ioa_cfg->doorbell = IPR_DOORBELL; 9481 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 9482 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 9483 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 9484 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 9485 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 9486 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 9487 9488 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 9489 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 9490 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9491 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9492 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9493 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9494 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9495 init_waitqueue_head(&ioa_cfg->eeh_wait_q); 9496 ioa_cfg->sdt_state = INACTIVE; 9497 9498 ipr_initialize_bus_attr(ioa_cfg); 9499 ioa_cfg->max_devs_supported = ipr_max_devs; 9500 9501 if (ioa_cfg->sis64) { 9502 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; 9503 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 9504 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 9505 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 9506 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 9507 + ((sizeof(struct ipr_config_table_entry64) 9508 * ioa_cfg->max_devs_supported))); 9509 } else { 9510 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 9511 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 9512 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 9513 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 9514 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) 9515 + ((sizeof(struct ipr_config_table_entry) 9516 * ioa_cfg->max_devs_supported))); 9517 } 9518 9519 host->max_channel = IPR_VSET_BUS; 9520 host->unique_id = host->host_no; 9521 host->max_cmd_len = IPR_MAX_CDB_LEN; 9522 host->can_queue = ioa_cfg->max_cmds; 9523 pci_set_drvdata(pdev, ioa_cfg); 9524 9525 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { 9526 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); 9527 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); 9528 spin_lock_init(&ioa_cfg->hrrq[i]._lock); 9529 if (i == 0) 9530 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; 9531 else 9532 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; 9533 } 9534 } 9535 9536 /** 9537 * ipr_get_chip_info - Find adapter chip information 9538 * @dev_id: PCI device id struct 9539 * 9540 * Return value: 9541 * ptr to chip information on success / NULL on failure 9542 **/ 9543 static const struct ipr_chip_t * 9544 ipr_get_chip_info(const struct pci_device_id *dev_id) 9545 { 9546 int i; 9547 9548 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 9549 if (ipr_chip[i].vendor == dev_id->vendor && 9550 ipr_chip[i].device == dev_id->device) 9551 return &ipr_chip[i]; 9552 return NULL; 9553 } 9554 9555 /** 9556 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete 9557 * during probe time 9558 * @ioa_cfg: ioa config struct 9559 * 9560 * Return value: 9561 * None 9562 **/ 9563 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) 9564 { 9565 struct pci_dev *pdev = ioa_cfg->pdev; 9566 9567 if (pci_channel_offline(pdev)) { 9568 wait_event_timeout(ioa_cfg->eeh_wait_q, 9569 !pci_channel_offline(pdev), 9570 IPR_PCI_ERROR_RECOVERY_TIMEOUT); 9571 pci_restore_state(pdev); 9572 } 9573 } 9574 9575 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) 9576 { 9577 struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; 9578 int i, vectors; 9579 9580 for (i = 0; i < ARRAY_SIZE(entries); ++i) 9581 entries[i].entry = i; 9582 9583 vectors = pci_enable_msix_range(ioa_cfg->pdev, 9584 entries, 1, ipr_number_of_msix); 9585 if (vectors < 0) { 9586 ipr_wait_for_pci_err_recovery(ioa_cfg); 9587 return vectors; 9588 } 9589 9590 for (i = 0; i < vectors; i++) 9591 ioa_cfg->vectors_info[i].vec = entries[i].vector; 9592 ioa_cfg->nvectors = vectors; 9593 9594 return 0; 9595 } 9596 9597 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) 9598 { 9599 int i, vectors; 9600 9601 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); 9602 if (vectors < 0) { 9603 ipr_wait_for_pci_err_recovery(ioa_cfg); 9604 return vectors; 9605 } 9606 9607 for (i = 0; i < vectors; i++) 9608 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; 9609 ioa_cfg->nvectors = vectors; 9610 9611 return 0; 9612 } 9613 9614 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) 9615 { 9616 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; 9617 9618 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { 9619 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, 9620 "host%d-%d", ioa_cfg->host->host_no, vec_idx); 9621 ioa_cfg->vectors_info[vec_idx]. 9622 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; 9623 } 9624 } 9625 9626 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg) 9627 { 9628 int i, rc; 9629 9630 for (i = 1; i < ioa_cfg->nvectors; i++) { 9631 rc = request_irq(ioa_cfg->vectors_info[i].vec, 9632 ipr_isr_mhrrq, 9633 0, 9634 ioa_cfg->vectors_info[i].desc, 9635 &ioa_cfg->hrrq[i]); 9636 if (rc) { 9637 while (--i >= 0) 9638 free_irq(ioa_cfg->vectors_info[i].vec, 9639 &ioa_cfg->hrrq[i]); 9640 return rc; 9641 } 9642 } 9643 return 0; 9644 } 9645 9646 /** 9647 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 9648 * @pdev: PCI device struct 9649 * 9650 * Description: Simply set the msi_received flag to 1 indicating that 9651 * Message Signaled Interrupts are supported. 9652 * 9653 * Return value: 9654 * 0 on success / non-zero on failure 9655 **/ 9656 static irqreturn_t ipr_test_intr(int irq, void *devp) 9657 { 9658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 9659 unsigned long lock_flags = 0; 9660 irqreturn_t rc = IRQ_HANDLED; 9661 9662 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); 9663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9664 9665 ioa_cfg->msi_received = 1; 9666 wake_up(&ioa_cfg->msi_wait_q); 9667 9668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9669 return rc; 9670 } 9671 9672 /** 9673 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 9674 * @pdev: PCI device struct 9675 * 9676 * Description: The return value from pci_enable_msi_range() can not always be 9677 * trusted. This routine sets up and initiates a test interrupt to determine 9678 * if the interrupt is received via the ipr_test_intr() service routine. 9679 * If the tests fails, the driver will fall back to LSI. 9680 * 9681 * Return value: 9682 * 0 on success / non-zero on failure 9683 **/ 9684 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) 9685 { 9686 int rc; 9687 volatile u32 int_reg; 9688 unsigned long lock_flags = 0; 9689 9690 ENTER; 9691 9692 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9693 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9694 ioa_cfg->msi_received = 0; 9695 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9696 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); 9697 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 9698 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9699 9700 if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9701 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 9702 else 9703 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 9704 if (rc) { 9705 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 9706 return rc; 9707 } else if (ipr_debug) 9708 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 9709 9710 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 9711 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 9712 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 9713 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9714 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9715 9716 if (!ioa_cfg->msi_received) { 9717 /* MSI test failed */ 9718 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 9719 rc = -EOPNOTSUPP; 9720 } else if (ipr_debug) 9721 dev_info(&pdev->dev, "MSI test succeeded.\n"); 9722 9723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9724 9725 if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9726 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); 9727 else 9728 free_irq(pdev->irq, ioa_cfg); 9729 9730 LEAVE; 9731 9732 return rc; 9733 } 9734 9735 /* ipr_probe_ioa - Allocates memory and does first stage of initialization 9736 * @pdev: PCI device struct 9737 * @dev_id: PCI device id struct 9738 * 9739 * Return value: 9740 * 0 on success / non-zero on failure 9741 **/ 9742 static int ipr_probe_ioa(struct pci_dev *pdev, 9743 const struct pci_device_id *dev_id) 9744 { 9745 struct ipr_ioa_cfg *ioa_cfg; 9746 struct Scsi_Host *host; 9747 unsigned long ipr_regs_pci; 9748 void __iomem *ipr_regs; 9749 int rc = PCIBIOS_SUCCESSFUL; 9750 volatile u32 mask, uproc, interrupts; 9751 unsigned long lock_flags, driver_lock_flags; 9752 9753 ENTER; 9754 9755 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 9756 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 9757 9758 if (!host) { 9759 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 9760 rc = -ENOMEM; 9761 goto out; 9762 } 9763 9764 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 9765 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 9766 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); 9767 9768 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 9769 9770 if (!ioa_cfg->ipr_chip) { 9771 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 9772 dev_id->vendor, dev_id->device); 9773 goto out_scsi_host_put; 9774 } 9775 9776 /* set SIS 32 or SIS 64 */ 9777 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 9778 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 9779 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; 9780 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; 9781 9782 if (ipr_transop_timeout) 9783 ioa_cfg->transop_timeout = ipr_transop_timeout; 9784 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) 9785 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; 9786 else 9787 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; 9788 9789 ioa_cfg->revid = pdev->revision; 9790 9791 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 9792 9793 ipr_regs_pci = pci_resource_start(pdev, 0); 9794 9795 rc = pci_request_regions(pdev, IPR_NAME); 9796 if (rc < 0) { 9797 dev_err(&pdev->dev, 9798 "Couldn't register memory range of registers\n"); 9799 goto out_scsi_host_put; 9800 } 9801 9802 rc = pci_enable_device(pdev); 9803 9804 if (rc || pci_channel_offline(pdev)) { 9805 if (pci_channel_offline(pdev)) { 9806 ipr_wait_for_pci_err_recovery(ioa_cfg); 9807 rc = pci_enable_device(pdev); 9808 } 9809 9810 if (rc) { 9811 dev_err(&pdev->dev, "Cannot enable adapter\n"); 9812 ipr_wait_for_pci_err_recovery(ioa_cfg); 9813 goto out_release_regions; 9814 } 9815 } 9816 9817 ipr_regs = pci_ioremap_bar(pdev, 0); 9818 9819 if (!ipr_regs) { 9820 dev_err(&pdev->dev, 9821 "Couldn't map memory range of registers\n"); 9822 rc = -ENOMEM; 9823 goto out_disable; 9824 } 9825 9826 ioa_cfg->hdw_dma_regs = ipr_regs; 9827 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 9828 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 9829 9830 ipr_init_regs(ioa_cfg); 9831 9832 if (ioa_cfg->sis64) { 9833 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9834 if (rc < 0) { 9835 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); 9836 rc = dma_set_mask_and_coherent(&pdev->dev, 9837 DMA_BIT_MASK(32)); 9838 } 9839 } else 9840 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9841 9842 if (rc < 0) { 9843 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 9844 goto cleanup_nomem; 9845 } 9846 9847 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 9848 ioa_cfg->chip_cfg->cache_line_size); 9849 9850 if (rc != PCIBIOS_SUCCESSFUL) { 9851 dev_err(&pdev->dev, "Write of cache line size failed\n"); 9852 ipr_wait_for_pci_err_recovery(ioa_cfg); 9853 rc = -EIO; 9854 goto cleanup_nomem; 9855 } 9856 9857 /* Issue MMIO read to ensure card is not in EEH */ 9858 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 9859 ipr_wait_for_pci_err_recovery(ioa_cfg); 9860 9861 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { 9862 dev_err(&pdev->dev, "The max number of MSIX is %d\n", 9863 IPR_MAX_MSIX_VECTORS); 9864 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; 9865 } 9866 9867 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 9868 ipr_enable_msix(ioa_cfg) == 0) 9869 ioa_cfg->intr_flag = IPR_USE_MSIX; 9870 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 9871 ipr_enable_msi(ioa_cfg) == 0) 9872 ioa_cfg->intr_flag = IPR_USE_MSI; 9873 else { 9874 ioa_cfg->intr_flag = IPR_USE_LSI; 9875 ioa_cfg->nvectors = 1; 9876 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 9877 } 9878 9879 pci_set_master(pdev); 9880 9881 if (pci_channel_offline(pdev)) { 9882 ipr_wait_for_pci_err_recovery(ioa_cfg); 9883 pci_set_master(pdev); 9884 if (pci_channel_offline(pdev)) { 9885 rc = -EIO; 9886 goto out_msi_disable; 9887 } 9888 } 9889 9890 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9891 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9892 rc = ipr_test_msi(ioa_cfg, pdev); 9893 if (rc == -EOPNOTSUPP) { 9894 ipr_wait_for_pci_err_recovery(ioa_cfg); 9895 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 9896 ioa_cfg->intr_flag &= ~IPR_USE_MSI; 9897 pci_disable_msi(pdev); 9898 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { 9899 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; 9900 pci_disable_msix(pdev); 9901 } 9902 9903 ioa_cfg->intr_flag = IPR_USE_LSI; 9904 ioa_cfg->nvectors = 1; 9905 } 9906 else if (rc) 9907 goto out_msi_disable; 9908 else { 9909 if (ioa_cfg->intr_flag == IPR_USE_MSI) 9910 dev_info(&pdev->dev, 9911 "Request for %d MSIs succeeded with starting IRQ: %d\n", 9912 ioa_cfg->nvectors, pdev->irq); 9913 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9914 dev_info(&pdev->dev, 9915 "Request for %d MSIXs succeeded.", 9916 ioa_cfg->nvectors); 9917 } 9918 } 9919 9920 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, 9921 (unsigned int)num_online_cpus(), 9922 (unsigned int)IPR_MAX_HRRQ_NUM); 9923 9924 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 9925 goto out_msi_disable; 9926 9927 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 9928 goto out_msi_disable; 9929 9930 rc = ipr_alloc_mem(ioa_cfg); 9931 if (rc < 0) { 9932 dev_err(&pdev->dev, 9933 "Couldn't allocate enough memory for device driver!\n"); 9934 goto out_msi_disable; 9935 } 9936 9937 /* Save away PCI config space for use following IOA reset */ 9938 rc = pci_save_state(pdev); 9939 9940 if (rc != PCIBIOS_SUCCESSFUL) { 9941 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 9942 rc = -EIO; 9943 goto cleanup_nolog; 9944 } 9945 9946 /* 9947 * If HRRQ updated interrupt is not masked, or reset alert is set, 9948 * the card is in an unknown state and needs a hard reset 9949 */ 9950 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 9951 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); 9952 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 9953 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 9954 ioa_cfg->needs_hard_reset = 1; 9955 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) 9956 ioa_cfg->needs_hard_reset = 1; 9957 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 9958 ioa_cfg->ioa_unit_checked = 1; 9959 9960 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9961 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9963 9964 if (ioa_cfg->intr_flag == IPR_USE_MSI 9965 || ioa_cfg->intr_flag == IPR_USE_MSIX) { 9966 name_msi_vectors(ioa_cfg); 9967 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr, 9968 0, 9969 ioa_cfg->vectors_info[0].desc, 9970 &ioa_cfg->hrrq[0]); 9971 if (!rc) 9972 rc = ipr_request_other_msi_irqs(ioa_cfg); 9973 } else { 9974 rc = request_irq(pdev->irq, ipr_isr, 9975 IRQF_SHARED, 9976 IPR_NAME, &ioa_cfg->hrrq[0]); 9977 } 9978 if (rc) { 9979 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 9980 pdev->irq, rc); 9981 goto cleanup_nolog; 9982 } 9983 9984 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || 9985 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { 9986 ioa_cfg->needs_warm_reset = 1; 9987 ioa_cfg->reset = ipr_reset_slot_reset; 9988 9989 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", 9990 WQ_MEM_RECLAIM, host->host_no); 9991 9992 if (!ioa_cfg->reset_work_q) { 9993 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); 9994 goto out_free_irq; 9995 } 9996 } else 9997 ioa_cfg->reset = ipr_reset_start_bist; 9998 9999 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10000 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); 10001 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10002 10003 LEAVE; 10004 out: 10005 return rc; 10006 10007 out_free_irq: 10008 ipr_free_irqs(ioa_cfg); 10009 cleanup_nolog: 10010 ipr_free_mem(ioa_cfg); 10011 out_msi_disable: 10012 ipr_wait_for_pci_err_recovery(ioa_cfg); 10013 if (ioa_cfg->intr_flag == IPR_USE_MSI) 10014 pci_disable_msi(pdev); 10015 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 10016 pci_disable_msix(pdev); 10017 cleanup_nomem: 10018 iounmap(ipr_regs); 10019 out_disable: 10020 pci_disable_device(pdev); 10021 out_release_regions: 10022 pci_release_regions(pdev); 10023 out_scsi_host_put: 10024 scsi_host_put(host); 10025 goto out; 10026 } 10027 10028 /** 10029 * ipr_initiate_ioa_bringdown - Bring down an adapter 10030 * @ioa_cfg: ioa config struct 10031 * @shutdown_type: shutdown type 10032 * 10033 * Description: This function will initiate bringing down the adapter. 10034 * This consists of issuing an IOA shutdown to the adapter 10035 * to flush the cache, and running BIST. 10036 * If the caller needs to wait on the completion of the reset, 10037 * the caller must sleep on the reset_wait_q. 10038 * 10039 * Return value: 10040 * none 10041 **/ 10042 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, 10043 enum ipr_shutdown_type shutdown_type) 10044 { 10045 ENTER; 10046 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 10047 ioa_cfg->sdt_state = ABORT_DUMP; 10048 ioa_cfg->reset_retries = 0; 10049 ioa_cfg->in_ioa_bringdown = 1; 10050 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); 10051 LEAVE; 10052 } 10053 10054 /** 10055 * __ipr_remove - Remove a single adapter 10056 * @pdev: pci device struct 10057 * 10058 * Adapter hot plug remove entry point. 10059 * 10060 * Return value: 10061 * none 10062 **/ 10063 static void __ipr_remove(struct pci_dev *pdev) 10064 { 10065 unsigned long host_lock_flags = 0; 10066 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10067 int i; 10068 unsigned long driver_lock_flags; 10069 ENTER; 10070 10071 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10072 while (ioa_cfg->in_reset_reload) { 10073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10074 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10075 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10076 } 10077 10078 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 10079 spin_lock(&ioa_cfg->hrrq[i]._lock); 10080 ioa_cfg->hrrq[i].removing_ioa = 1; 10081 spin_unlock(&ioa_cfg->hrrq[i]._lock); 10082 } 10083 wmb(); 10084 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 10085 10086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10087 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10088 flush_work(&ioa_cfg->work_q); 10089 if (ioa_cfg->reset_work_q) 10090 flush_workqueue(ioa_cfg->reset_work_q); 10091 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 10092 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10093 10094 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10095 list_del(&ioa_cfg->queue); 10096 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10097 10098 if (ioa_cfg->sdt_state == ABORT_DUMP) 10099 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 10100 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10101 10102 ipr_free_all_resources(ioa_cfg); 10103 10104 LEAVE; 10105 } 10106 10107 /** 10108 * ipr_remove - IOA hot plug remove entry point 10109 * @pdev: pci device struct 10110 * 10111 * Adapter hot plug remove entry point. 10112 * 10113 * Return value: 10114 * none 10115 **/ 10116 static void ipr_remove(struct pci_dev *pdev) 10117 { 10118 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10119 10120 ENTER; 10121 10122 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10123 &ipr_trace_attr); 10124 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10125 &ipr_dump_attr); 10126 scsi_remove_host(ioa_cfg->host); 10127 10128 __ipr_remove(pdev); 10129 10130 LEAVE; 10131 } 10132 10133 /** 10134 * ipr_probe - Adapter hot plug add entry point 10135 * 10136 * Return value: 10137 * 0 on success / non-zero on failure 10138 **/ 10139 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 10140 { 10141 struct ipr_ioa_cfg *ioa_cfg; 10142 int rc, i; 10143 10144 rc = ipr_probe_ioa(pdev, dev_id); 10145 10146 if (rc) 10147 return rc; 10148 10149 ioa_cfg = pci_get_drvdata(pdev); 10150 rc = ipr_probe_ioa_part2(ioa_cfg); 10151 10152 if (rc) { 10153 __ipr_remove(pdev); 10154 return rc; 10155 } 10156 10157 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); 10158 10159 if (rc) { 10160 __ipr_remove(pdev); 10161 return rc; 10162 } 10163 10164 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, 10165 &ipr_trace_attr); 10166 10167 if (rc) { 10168 scsi_remove_host(ioa_cfg->host); 10169 __ipr_remove(pdev); 10170 return rc; 10171 } 10172 10173 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, 10174 &ipr_dump_attr); 10175 10176 if (rc) { 10177 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10178 &ipr_trace_attr); 10179 scsi_remove_host(ioa_cfg->host); 10180 __ipr_remove(pdev); 10181 return rc; 10182 } 10183 10184 scsi_scan_host(ioa_cfg->host); 10185 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 10186 10187 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10188 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 10189 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, 10190 ioa_cfg->iopoll_weight, ipr_iopoll); 10191 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); 10192 } 10193 } 10194 10195 schedule_work(&ioa_cfg->work_q); 10196 return 0; 10197 } 10198 10199 /** 10200 * ipr_shutdown - Shutdown handler. 10201 * @pdev: pci device struct 10202 * 10203 * This function is invoked upon system shutdown/reboot. It will issue 10204 * an adapter shutdown to the adapter to flush the write cache. 10205 * 10206 * Return value: 10207 * none 10208 **/ 10209 static void ipr_shutdown(struct pci_dev *pdev) 10210 { 10211 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10212 unsigned long lock_flags = 0; 10213 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; 10214 int i; 10215 10216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10217 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10218 ioa_cfg->iopoll_weight = 0; 10219 for (i = 1; i < ioa_cfg->hrrq_num; i++) 10220 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); 10221 } 10222 10223 while (ioa_cfg->in_reset_reload) { 10224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10225 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10226 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10227 } 10228 10229 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) 10230 shutdown_type = IPR_SHUTDOWN_QUIESCE; 10231 10232 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); 10233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10234 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10235 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { 10236 ipr_free_irqs(ioa_cfg); 10237 pci_disable_device(ioa_cfg->pdev); 10238 } 10239 } 10240 10241 static struct pci_device_id ipr_pci_table[] = { 10242 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10243 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, 10244 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10245 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, 10246 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10247 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, 10248 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10249 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, 10250 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10251 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, 10252 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10253 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, 10254 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10255 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 10256 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10257 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 10258 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10259 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10260 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10261 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10262 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10263 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10264 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10265 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10266 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10267 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10268 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10269 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10270 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10271 IPR_USE_LONG_TRANSOP_TIMEOUT}, 10272 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10273 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10274 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10275 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10276 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 10277 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10278 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10279 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 10280 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10281 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, 10282 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10283 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 10284 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, 10285 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 10286 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 10287 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10288 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 10289 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10290 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 10291 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10292 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10293 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 10294 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10295 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10296 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, 10297 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10298 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 10299 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10300 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 10301 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10302 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 }, 10303 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10304 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 10305 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10306 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 10307 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10308 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 10309 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10310 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 10311 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10312 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 10313 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10314 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, 10315 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10316 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 10317 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10318 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 }, 10319 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10320 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 }, 10321 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10322 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, 10323 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10324 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, 10325 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10326 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, 10327 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10328 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, 10329 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10330 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, 10331 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10332 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, 10333 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10334 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, 10335 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10336 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, 10337 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10338 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, 10339 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10340 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, 10341 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10342 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, 10343 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10344 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, 10345 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10346 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, 10347 { } 10348 }; 10349 MODULE_DEVICE_TABLE(pci, ipr_pci_table); 10350 10351 static const struct pci_error_handlers ipr_err_handler = { 10352 .error_detected = ipr_pci_error_detected, 10353 .mmio_enabled = ipr_pci_mmio_enabled, 10354 .slot_reset = ipr_pci_slot_reset, 10355 }; 10356 10357 static struct pci_driver ipr_driver = { 10358 .name = IPR_NAME, 10359 .id_table = ipr_pci_table, 10360 .probe = ipr_probe, 10361 .remove = ipr_remove, 10362 .shutdown = ipr_shutdown, 10363 .err_handler = &ipr_err_handler, 10364 }; 10365 10366 /** 10367 * ipr_halt_done - Shutdown prepare completion 10368 * 10369 * Return value: 10370 * none 10371 **/ 10372 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 10373 { 10374 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 10375 } 10376 10377 /** 10378 * ipr_halt - Issue shutdown prepare to all adapters 10379 * 10380 * Return value: 10381 * NOTIFY_OK on success / NOTIFY_DONE on failure 10382 **/ 10383 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) 10384 { 10385 struct ipr_cmnd *ipr_cmd; 10386 struct ipr_ioa_cfg *ioa_cfg; 10387 unsigned long flags = 0, driver_lock_flags; 10388 10389 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 10390 return NOTIFY_DONE; 10391 10392 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10393 10394 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 10395 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10396 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 10397 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { 10398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10399 continue; 10400 } 10401 10402 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 10403 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 10404 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 10405 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 10406 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 10407 10408 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 10409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10410 } 10411 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10412 10413 return NOTIFY_OK; 10414 } 10415 10416 static struct notifier_block ipr_notifier = { 10417 ipr_halt, NULL, 0 10418 }; 10419 10420 /** 10421 * ipr_init - Module entry point 10422 * 10423 * Return value: 10424 * 0 on success / negative value on failure 10425 **/ 10426 static int __init ipr_init(void) 10427 { 10428 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 10429 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 10430 10431 register_reboot_notifier(&ipr_notifier); 10432 return pci_register_driver(&ipr_driver); 10433 } 10434 10435 /** 10436 * ipr_exit - Module unload 10437 * 10438 * Module unload entry point. 10439 * 10440 * Return value: 10441 * none 10442 **/ 10443 static void __exit ipr_exit(void) 10444 { 10445 unregister_reboot_notifier(&ipr_notifier); 10446 pci_unregister_driver(&ipr_driver); 10447 } 10448 10449 module_init(ipr_init); 10450 module_exit(ipr_exit); 10451