1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/jiffies.h> 51 #include <linux/percpu-defs.h> 52 #include <linux/percpu.h> 53 #include <asm/unaligned.h> 54 #include <asm/div64.h> 55 #include "hpsa_cmd.h" 56 #include "hpsa.h" 57 58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 59 #define HPSA_DRIVER_VERSION "3.4.4-1" 60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 61 #define HPSA "hpsa" 62 63 /* How long to wait (in milliseconds) for board to go into simple mode */ 64 #define MAX_CONFIG_WAIT 30000 65 #define MAX_IOCTL_CONFIG_WAIT 1000 66 67 /*define how many times we will try a command because of bus resets */ 68 #define MAX_CMD_RETRIES 3 69 70 /* Embedded module documentation macros - see modules.h */ 71 MODULE_AUTHOR("Hewlett-Packard Company"); 72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 73 HPSA_DRIVER_VERSION); 74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 75 MODULE_VERSION(HPSA_DRIVER_VERSION); 76 MODULE_LICENSE("GPL"); 77 78 static int hpsa_allow_any; 79 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 80 MODULE_PARM_DESC(hpsa_allow_any, 81 "Allow hpsa driver to access unknown HP Smart Array hardware"); 82 static int hpsa_simple_mode; 83 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 84 MODULE_PARM_DESC(hpsa_simple_mode, 85 "Use 'simple mode' rather than 'performant mode'"); 86 87 /* define the PCI info for the cards we can control */ 88 static const struct pci_device_id hpsa_pci_device_id[] = { 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, 126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 133 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 134 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 135 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 136 {0,} 137 }; 138 139 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 140 141 /* board_id = Subsystem Device ID & Vendor ID 142 * product = Marketing Name for the board 143 * access = Address of the struct of function pointers 144 */ 145 static struct board_type products[] = { 146 {0x3241103C, "Smart Array P212", &SA5_access}, 147 {0x3243103C, "Smart Array P410", &SA5_access}, 148 {0x3245103C, "Smart Array P410i", &SA5_access}, 149 {0x3247103C, "Smart Array P411", &SA5_access}, 150 {0x3249103C, "Smart Array P812", &SA5_access}, 151 {0x324A103C, "Smart Array P712m", &SA5_access}, 152 {0x324B103C, "Smart Array P711m", &SA5_access}, 153 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ 154 {0x3350103C, "Smart Array P222", &SA5_access}, 155 {0x3351103C, "Smart Array P420", &SA5_access}, 156 {0x3352103C, "Smart Array P421", &SA5_access}, 157 {0x3353103C, "Smart Array P822", &SA5_access}, 158 {0x3354103C, "Smart Array P420i", &SA5_access}, 159 {0x3355103C, "Smart Array P220i", &SA5_access}, 160 {0x3356103C, "Smart Array P721m", &SA5_access}, 161 {0x1921103C, "Smart Array P830i", &SA5_access}, 162 {0x1922103C, "Smart Array P430", &SA5_access}, 163 {0x1923103C, "Smart Array P431", &SA5_access}, 164 {0x1924103C, "Smart Array P830", &SA5_access}, 165 {0x1926103C, "Smart Array P731m", &SA5_access}, 166 {0x1928103C, "Smart Array P230i", &SA5_access}, 167 {0x1929103C, "Smart Array P530", &SA5_access}, 168 {0x21BD103C, "Smart Array", &SA5_access}, 169 {0x21BE103C, "Smart Array", &SA5_access}, 170 {0x21BF103C, "Smart Array", &SA5_access}, 171 {0x21C0103C, "Smart Array", &SA5_access}, 172 {0x21C1103C, "Smart Array", &SA5_access}, 173 {0x21C2103C, "Smart Array", &SA5_access}, 174 {0x21C3103C, "Smart Array", &SA5_access}, 175 {0x21C4103C, "Smart Array", &SA5_access}, 176 {0x21C5103C, "Smart Array", &SA5_access}, 177 {0x21C6103C, "Smart Array", &SA5_access}, 178 {0x21C7103C, "Smart Array", &SA5_access}, 179 {0x21C8103C, "Smart Array", &SA5_access}, 180 {0x21C9103C, "Smart Array", &SA5_access}, 181 {0x21CA103C, "Smart Array", &SA5_access}, 182 {0x21CB103C, "Smart Array", &SA5_access}, 183 {0x21CC103C, "Smart Array", &SA5_access}, 184 {0x21CD103C, "Smart Array", &SA5_access}, 185 {0x21CE103C, "Smart Array", &SA5_access}, 186 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 187 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 188 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 189 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 190 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 191 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 192 }; 193 194 static int number_of_controllers; 195 196 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 197 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 198 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 199 200 #ifdef CONFIG_COMPAT 201 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, 202 void __user *arg); 203 #endif 204 205 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 206 static struct CommandList *cmd_alloc(struct ctlr_info *h); 207 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 208 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 209 int cmd_type); 210 static void hpsa_free_cmd_pool(struct ctlr_info *h); 211 #define VPD_PAGE (1 << 8) 212 213 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 214 static void hpsa_scan_start(struct Scsi_Host *); 215 static int hpsa_scan_finished(struct Scsi_Host *sh, 216 unsigned long elapsed_time); 217 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 218 219 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 220 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 221 static int hpsa_slave_alloc(struct scsi_device *sdev); 222 static void hpsa_slave_destroy(struct scsi_device *sdev); 223 224 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 225 static int check_for_unit_attention(struct ctlr_info *h, 226 struct CommandList *c); 227 static void check_ioctl_unit_attention(struct ctlr_info *h, 228 struct CommandList *c); 229 /* performant mode helper functions */ 230 static void calc_bucket_map(int *bucket, int num_buckets, 231 int nsgs, int min_blocks, u32 *bucket_map); 232 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 233 static inline u32 next_command(struct ctlr_info *h, u8 q); 234 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 235 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 236 u64 *cfg_offset); 237 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 238 unsigned long *memory_bar); 239 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 240 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 241 int wait_for_ready); 242 static inline void finish_cmd(struct CommandList *c); 243 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 244 #define BOARD_NOT_READY 0 245 #define BOARD_READY 1 246 static void hpsa_drain_accel_commands(struct ctlr_info *h); 247 static void hpsa_flush_cache(struct ctlr_info *h); 248 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 250 u8 *scsi3addr); 251 252 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 253 { 254 unsigned long *priv = shost_priv(sdev->host); 255 return (struct ctlr_info *) *priv; 256 } 257 258 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 259 { 260 unsigned long *priv = shost_priv(sh); 261 return (struct ctlr_info *) *priv; 262 } 263 264 static int check_for_unit_attention(struct ctlr_info *h, 265 struct CommandList *c) 266 { 267 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 268 return 0; 269 270 switch (c->err_info->SenseInfo[12]) { 271 case STATE_CHANGED: 272 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 273 "detected, command retried\n", h->ctlr); 274 break; 275 case LUN_FAILED: 276 dev_warn(&h->pdev->dev, 277 HPSA "%d: LUN failure detected\n", h->ctlr); 278 break; 279 case REPORT_LUNS_CHANGED: 280 dev_warn(&h->pdev->dev, 281 HPSA "%d: report LUN data changed\n", h->ctlr); 282 /* 283 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 284 * target (array) devices. 285 */ 286 break; 287 case POWER_OR_RESET: 288 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 289 "or device reset detected\n", h->ctlr); 290 break; 291 case UNIT_ATTENTION_CLEARED: 292 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 293 "cleared by another initiator\n", h->ctlr); 294 break; 295 default: 296 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 297 "unit attention detected\n", h->ctlr); 298 break; 299 } 300 return 1; 301 } 302 303 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 304 { 305 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 306 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 307 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 308 return 0; 309 dev_warn(&h->pdev->dev, HPSA "device busy"); 310 return 1; 311 } 312 313 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 314 struct device_attribute *attr, 315 const char *buf, size_t count) 316 { 317 int status, len; 318 struct ctlr_info *h; 319 struct Scsi_Host *shost = class_to_shost(dev); 320 char tmpbuf[10]; 321 322 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 323 return -EACCES; 324 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 325 strncpy(tmpbuf, buf, len); 326 tmpbuf[len] = '\0'; 327 if (sscanf(tmpbuf, "%d", &status) != 1) 328 return -EINVAL; 329 h = shost_to_hba(shost); 330 h->acciopath_status = !!status; 331 dev_warn(&h->pdev->dev, 332 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 333 h->acciopath_status ? "enabled" : "disabled"); 334 return count; 335 } 336 337 static ssize_t host_store_raid_offload_debug(struct device *dev, 338 struct device_attribute *attr, 339 const char *buf, size_t count) 340 { 341 int debug_level, len; 342 struct ctlr_info *h; 343 struct Scsi_Host *shost = class_to_shost(dev); 344 char tmpbuf[10]; 345 346 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 347 return -EACCES; 348 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 349 strncpy(tmpbuf, buf, len); 350 tmpbuf[len] = '\0'; 351 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 352 return -EINVAL; 353 if (debug_level < 0) 354 debug_level = 0; 355 h = shost_to_hba(shost); 356 h->raid_offload_debug = debug_level; 357 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 358 h->raid_offload_debug); 359 return count; 360 } 361 362 static ssize_t host_store_rescan(struct device *dev, 363 struct device_attribute *attr, 364 const char *buf, size_t count) 365 { 366 struct ctlr_info *h; 367 struct Scsi_Host *shost = class_to_shost(dev); 368 h = shost_to_hba(shost); 369 hpsa_scan_start(h->scsi_host); 370 return count; 371 } 372 373 static ssize_t host_show_firmware_revision(struct device *dev, 374 struct device_attribute *attr, char *buf) 375 { 376 struct ctlr_info *h; 377 struct Scsi_Host *shost = class_to_shost(dev); 378 unsigned char *fwrev; 379 380 h = shost_to_hba(shost); 381 if (!h->hba_inquiry_data) 382 return 0; 383 fwrev = &h->hba_inquiry_data[32]; 384 return snprintf(buf, 20, "%c%c%c%c\n", 385 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 386 } 387 388 static ssize_t host_show_commands_outstanding(struct device *dev, 389 struct device_attribute *attr, char *buf) 390 { 391 struct Scsi_Host *shost = class_to_shost(dev); 392 struct ctlr_info *h = shost_to_hba(shost); 393 394 return snprintf(buf, 20, "%d\n", 395 atomic_read(&h->commands_outstanding)); 396 } 397 398 static ssize_t host_show_transport_mode(struct device *dev, 399 struct device_attribute *attr, char *buf) 400 { 401 struct ctlr_info *h; 402 struct Scsi_Host *shost = class_to_shost(dev); 403 404 h = shost_to_hba(shost); 405 return snprintf(buf, 20, "%s\n", 406 h->transMethod & CFGTBL_Trans_Performant ? 407 "performant" : "simple"); 408 } 409 410 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 411 struct device_attribute *attr, char *buf) 412 { 413 struct ctlr_info *h; 414 struct Scsi_Host *shost = class_to_shost(dev); 415 416 h = shost_to_hba(shost); 417 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 418 (h->acciopath_status == 1) ? "enabled" : "disabled"); 419 } 420 421 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 422 static u32 unresettable_controller[] = { 423 0x324a103C, /* Smart Array P712m */ 424 0x324b103C, /* SmartArray P711m */ 425 0x3223103C, /* Smart Array P800 */ 426 0x3234103C, /* Smart Array P400 */ 427 0x3235103C, /* Smart Array P400i */ 428 0x3211103C, /* Smart Array E200i */ 429 0x3212103C, /* Smart Array E200 */ 430 0x3213103C, /* Smart Array E200i */ 431 0x3214103C, /* Smart Array E200i */ 432 0x3215103C, /* Smart Array E200i */ 433 0x3237103C, /* Smart Array E500 */ 434 0x323D103C, /* Smart Array P700m */ 435 0x40800E11, /* Smart Array 5i */ 436 0x409C0E11, /* Smart Array 6400 */ 437 0x409D0E11, /* Smart Array 6400 EM */ 438 0x40700E11, /* Smart Array 5300 */ 439 0x40820E11, /* Smart Array 532 */ 440 0x40830E11, /* Smart Array 5312 */ 441 0x409A0E11, /* Smart Array 641 */ 442 0x409B0E11, /* Smart Array 642 */ 443 0x40910E11, /* Smart Array 6i */ 444 }; 445 446 /* List of controllers which cannot even be soft reset */ 447 static u32 soft_unresettable_controller[] = { 448 0x40800E11, /* Smart Array 5i */ 449 0x40700E11, /* Smart Array 5300 */ 450 0x40820E11, /* Smart Array 532 */ 451 0x40830E11, /* Smart Array 5312 */ 452 0x409A0E11, /* Smart Array 641 */ 453 0x409B0E11, /* Smart Array 642 */ 454 0x40910E11, /* Smart Array 6i */ 455 /* Exclude 640x boards. These are two pci devices in one slot 456 * which share a battery backed cache module. One controls the 457 * cache, the other accesses the cache through the one that controls 458 * it. If we reset the one controlling the cache, the other will 459 * likely not be happy. Just forbid resetting this conjoined mess. 460 * The 640x isn't really supported by hpsa anyway. 461 */ 462 0x409C0E11, /* Smart Array 6400 */ 463 0x409D0E11, /* Smart Array 6400 EM */ 464 }; 465 466 static int ctlr_is_hard_resettable(u32 board_id) 467 { 468 int i; 469 470 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 471 if (unresettable_controller[i] == board_id) 472 return 0; 473 return 1; 474 } 475 476 static int ctlr_is_soft_resettable(u32 board_id) 477 { 478 int i; 479 480 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 481 if (soft_unresettable_controller[i] == board_id) 482 return 0; 483 return 1; 484 } 485 486 static int ctlr_is_resettable(u32 board_id) 487 { 488 return ctlr_is_hard_resettable(board_id) || 489 ctlr_is_soft_resettable(board_id); 490 } 491 492 static ssize_t host_show_resettable(struct device *dev, 493 struct device_attribute *attr, char *buf) 494 { 495 struct ctlr_info *h; 496 struct Scsi_Host *shost = class_to_shost(dev); 497 498 h = shost_to_hba(shost); 499 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 500 } 501 502 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 503 { 504 return (scsi3addr[3] & 0xC0) == 0x40; 505 } 506 507 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", 508 "1(+0)ADM", "UNKNOWN" 509 }; 510 #define HPSA_RAID_0 0 511 #define HPSA_RAID_4 1 512 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 513 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 514 #define HPSA_RAID_51 4 515 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 516 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 517 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 518 519 static ssize_t raid_level_show(struct device *dev, 520 struct device_attribute *attr, char *buf) 521 { 522 ssize_t l = 0; 523 unsigned char rlevel; 524 struct ctlr_info *h; 525 struct scsi_device *sdev; 526 struct hpsa_scsi_dev_t *hdev; 527 unsigned long flags; 528 529 sdev = to_scsi_device(dev); 530 h = sdev_to_hba(sdev); 531 spin_lock_irqsave(&h->lock, flags); 532 hdev = sdev->hostdata; 533 if (!hdev) { 534 spin_unlock_irqrestore(&h->lock, flags); 535 return -ENODEV; 536 } 537 538 /* Is this even a logical drive? */ 539 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 540 spin_unlock_irqrestore(&h->lock, flags); 541 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 542 return l; 543 } 544 545 rlevel = hdev->raid_level; 546 spin_unlock_irqrestore(&h->lock, flags); 547 if (rlevel > RAID_UNKNOWN) 548 rlevel = RAID_UNKNOWN; 549 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 550 return l; 551 } 552 553 static ssize_t lunid_show(struct device *dev, 554 struct device_attribute *attr, char *buf) 555 { 556 struct ctlr_info *h; 557 struct scsi_device *sdev; 558 struct hpsa_scsi_dev_t *hdev; 559 unsigned long flags; 560 unsigned char lunid[8]; 561 562 sdev = to_scsi_device(dev); 563 h = sdev_to_hba(sdev); 564 spin_lock_irqsave(&h->lock, flags); 565 hdev = sdev->hostdata; 566 if (!hdev) { 567 spin_unlock_irqrestore(&h->lock, flags); 568 return -ENODEV; 569 } 570 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 571 spin_unlock_irqrestore(&h->lock, flags); 572 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 573 lunid[0], lunid[1], lunid[2], lunid[3], 574 lunid[4], lunid[5], lunid[6], lunid[7]); 575 } 576 577 static ssize_t unique_id_show(struct device *dev, 578 struct device_attribute *attr, char *buf) 579 { 580 struct ctlr_info *h; 581 struct scsi_device *sdev; 582 struct hpsa_scsi_dev_t *hdev; 583 unsigned long flags; 584 unsigned char sn[16]; 585 586 sdev = to_scsi_device(dev); 587 h = sdev_to_hba(sdev); 588 spin_lock_irqsave(&h->lock, flags); 589 hdev = sdev->hostdata; 590 if (!hdev) { 591 spin_unlock_irqrestore(&h->lock, flags); 592 return -ENODEV; 593 } 594 memcpy(sn, hdev->device_id, sizeof(sn)); 595 spin_unlock_irqrestore(&h->lock, flags); 596 return snprintf(buf, 16 * 2 + 2, 597 "%02X%02X%02X%02X%02X%02X%02X%02X" 598 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 599 sn[0], sn[1], sn[2], sn[3], 600 sn[4], sn[5], sn[6], sn[7], 601 sn[8], sn[9], sn[10], sn[11], 602 sn[12], sn[13], sn[14], sn[15]); 603 } 604 605 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 606 struct device_attribute *attr, char *buf) 607 { 608 struct ctlr_info *h; 609 struct scsi_device *sdev; 610 struct hpsa_scsi_dev_t *hdev; 611 unsigned long flags; 612 int offload_enabled; 613 614 sdev = to_scsi_device(dev); 615 h = sdev_to_hba(sdev); 616 spin_lock_irqsave(&h->lock, flags); 617 hdev = sdev->hostdata; 618 if (!hdev) { 619 spin_unlock_irqrestore(&h->lock, flags); 620 return -ENODEV; 621 } 622 offload_enabled = hdev->offload_enabled; 623 spin_unlock_irqrestore(&h->lock, flags); 624 return snprintf(buf, 20, "%d\n", offload_enabled); 625 } 626 627 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 628 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 629 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 630 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 631 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 632 host_show_hp_ssd_smart_path_enabled, NULL); 633 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 634 host_show_hp_ssd_smart_path_status, 635 host_store_hp_ssd_smart_path_status); 636 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 637 host_store_raid_offload_debug); 638 static DEVICE_ATTR(firmware_revision, S_IRUGO, 639 host_show_firmware_revision, NULL); 640 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 641 host_show_commands_outstanding, NULL); 642 static DEVICE_ATTR(transport_mode, S_IRUGO, 643 host_show_transport_mode, NULL); 644 static DEVICE_ATTR(resettable, S_IRUGO, 645 host_show_resettable, NULL); 646 647 static struct device_attribute *hpsa_sdev_attrs[] = { 648 &dev_attr_raid_level, 649 &dev_attr_lunid, 650 &dev_attr_unique_id, 651 &dev_attr_hp_ssd_smart_path_enabled, 652 NULL, 653 }; 654 655 static struct device_attribute *hpsa_shost_attrs[] = { 656 &dev_attr_rescan, 657 &dev_attr_firmware_revision, 658 &dev_attr_commands_outstanding, 659 &dev_attr_transport_mode, 660 &dev_attr_resettable, 661 &dev_attr_hp_ssd_smart_path_status, 662 &dev_attr_raid_offload_debug, 663 NULL, 664 }; 665 666 static struct scsi_host_template hpsa_driver_template = { 667 .module = THIS_MODULE, 668 .name = HPSA, 669 .proc_name = HPSA, 670 .queuecommand = hpsa_scsi_queue_command, 671 .scan_start = hpsa_scan_start, 672 .scan_finished = hpsa_scan_finished, 673 .change_queue_depth = hpsa_change_queue_depth, 674 .this_id = -1, 675 .use_clustering = ENABLE_CLUSTERING, 676 .eh_abort_handler = hpsa_eh_abort_handler, 677 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 678 .ioctl = hpsa_ioctl, 679 .slave_alloc = hpsa_slave_alloc, 680 .slave_destroy = hpsa_slave_destroy, 681 #ifdef CONFIG_COMPAT 682 .compat_ioctl = hpsa_compat_ioctl, 683 #endif 684 .sdev_attrs = hpsa_sdev_attrs, 685 .shost_attrs = hpsa_shost_attrs, 686 .max_sectors = 8192, 687 .no_write_same = 1, 688 }; 689 690 static inline u32 next_command(struct ctlr_info *h, u8 q) 691 { 692 u32 a; 693 struct reply_queue_buffer *rq = &h->reply_queue[q]; 694 695 if (h->transMethod & CFGTBL_Trans_io_accel1) 696 return h->access.command_completed(h, q); 697 698 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 699 return h->access.command_completed(h, q); 700 701 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 702 a = rq->head[rq->current_entry]; 703 rq->current_entry++; 704 atomic_dec(&h->commands_outstanding); 705 } else { 706 a = FIFO_EMPTY; 707 } 708 /* Check for wraparound */ 709 if (rq->current_entry == h->max_commands) { 710 rq->current_entry = 0; 711 rq->wraparound ^= 1; 712 } 713 return a; 714 } 715 716 /* 717 * There are some special bits in the bus address of the 718 * command that we have to set for the controller to know 719 * how to process the command: 720 * 721 * Normal performant mode: 722 * bit 0: 1 means performant mode, 0 means simple mode. 723 * bits 1-3 = block fetch table entry 724 * bits 4-6 = command type (== 0) 725 * 726 * ioaccel1 mode: 727 * bit 0 = "performant mode" bit. 728 * bits 1-3 = block fetch table entry 729 * bits 4-6 = command type (== 110) 730 * (command type is needed because ioaccel1 mode 731 * commands are submitted through the same register as normal 732 * mode commands, so this is how the controller knows whether 733 * the command is normal mode or ioaccel1 mode.) 734 * 735 * ioaccel2 mode: 736 * bit 0 = "performant mode" bit. 737 * bits 1-4 = block fetch table entry (note extra bit) 738 * bits 4-6 = not needed, because ioaccel2 mode has 739 * a separate special register for submitting commands. 740 */ 741 742 /* set_performant_mode: Modify the tag for cciss performant 743 * set bit 0 for pull model, bits 3-1 for block fetch 744 * register number 745 */ 746 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 747 { 748 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 749 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 750 if (likely(h->msix_vector > 0)) 751 c->Header.ReplyQueue = 752 raw_smp_processor_id() % h->nreply_queues; 753 } 754 } 755 756 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 757 struct CommandList *c) 758 { 759 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 760 761 /* Tell the controller to post the reply to the queue for this 762 * processor. This seems to give the best I/O throughput. 763 */ 764 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 765 /* Set the bits in the address sent down to include: 766 * - performant mode bit (bit 0) 767 * - pull count (bits 1-3) 768 * - command type (bits 4-6) 769 */ 770 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 771 IOACCEL1_BUSADDR_CMDTYPE; 772 } 773 774 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 775 struct CommandList *c) 776 { 777 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 778 779 /* Tell the controller to post the reply to the queue for this 780 * processor. This seems to give the best I/O throughput. 781 */ 782 cp->reply_queue = smp_processor_id() % h->nreply_queues; 783 /* Set the bits in the address sent down to include: 784 * - performant mode bit not used in ioaccel mode 2 785 * - pull count (bits 0-3) 786 * - command type isn't needed for ioaccel2 787 */ 788 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 789 } 790 791 static int is_firmware_flash_cmd(u8 *cdb) 792 { 793 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 794 } 795 796 /* 797 * During firmware flash, the heartbeat register may not update as frequently 798 * as it should. So we dial down lockup detection during firmware flash. and 799 * dial it back up when firmware flash completes. 800 */ 801 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 802 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 803 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 804 struct CommandList *c) 805 { 806 if (!is_firmware_flash_cmd(c->Request.CDB)) 807 return; 808 atomic_inc(&h->firmware_flash_in_progress); 809 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 810 } 811 812 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 813 struct CommandList *c) 814 { 815 if (is_firmware_flash_cmd(c->Request.CDB) && 816 atomic_dec_and_test(&h->firmware_flash_in_progress)) 817 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 818 } 819 820 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 821 struct CommandList *c) 822 { 823 switch (c->cmd_type) { 824 case CMD_IOACCEL1: 825 set_ioaccel1_performant_mode(h, c); 826 break; 827 case CMD_IOACCEL2: 828 set_ioaccel2_performant_mode(h, c); 829 break; 830 default: 831 set_performant_mode(h, c); 832 } 833 dial_down_lockup_detection_during_fw_flash(h, c); 834 atomic_inc(&h->commands_outstanding); 835 h->access.submit_command(h, c); 836 } 837 838 static inline int is_hba_lunid(unsigned char scsi3addr[]) 839 { 840 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 841 } 842 843 static inline int is_scsi_rev_5(struct ctlr_info *h) 844 { 845 if (!h->hba_inquiry_data) 846 return 0; 847 if ((h->hba_inquiry_data[2] & 0x07) == 5) 848 return 1; 849 return 0; 850 } 851 852 static int hpsa_find_target_lun(struct ctlr_info *h, 853 unsigned char scsi3addr[], int bus, int *target, int *lun) 854 { 855 /* finds an unused bus, target, lun for a new physical device 856 * assumes h->devlock is held 857 */ 858 int i, found = 0; 859 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 860 861 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 862 863 for (i = 0; i < h->ndevices; i++) { 864 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 865 __set_bit(h->dev[i]->target, lun_taken); 866 } 867 868 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 869 if (i < HPSA_MAX_DEVICES) { 870 /* *bus = 1; */ 871 *target = i; 872 *lun = 0; 873 found = 1; 874 } 875 return !found; 876 } 877 878 /* Add an entry into h->dev[] array. */ 879 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 880 struct hpsa_scsi_dev_t *device, 881 struct hpsa_scsi_dev_t *added[], int *nadded) 882 { 883 /* assumes h->devlock is held */ 884 int n = h->ndevices; 885 int i; 886 unsigned char addr1[8], addr2[8]; 887 struct hpsa_scsi_dev_t *sd; 888 889 if (n >= HPSA_MAX_DEVICES) { 890 dev_err(&h->pdev->dev, "too many devices, some will be " 891 "inaccessible.\n"); 892 return -1; 893 } 894 895 /* physical devices do not have lun or target assigned until now. */ 896 if (device->lun != -1) 897 /* Logical device, lun is already assigned. */ 898 goto lun_assigned; 899 900 /* If this device a non-zero lun of a multi-lun device 901 * byte 4 of the 8-byte LUN addr will contain the logical 902 * unit no, zero otherwise. 903 */ 904 if (device->scsi3addr[4] == 0) { 905 /* This is not a non-zero lun of a multi-lun device */ 906 if (hpsa_find_target_lun(h, device->scsi3addr, 907 device->bus, &device->target, &device->lun) != 0) 908 return -1; 909 goto lun_assigned; 910 } 911 912 /* This is a non-zero lun of a multi-lun device. 913 * Search through our list and find the device which 914 * has the same 8 byte LUN address, excepting byte 4. 915 * Assign the same bus and target for this new LUN. 916 * Use the logical unit number from the firmware. 917 */ 918 memcpy(addr1, device->scsi3addr, 8); 919 addr1[4] = 0; 920 for (i = 0; i < n; i++) { 921 sd = h->dev[i]; 922 memcpy(addr2, sd->scsi3addr, 8); 923 addr2[4] = 0; 924 /* differ only in byte 4? */ 925 if (memcmp(addr1, addr2, 8) == 0) { 926 device->bus = sd->bus; 927 device->target = sd->target; 928 device->lun = device->scsi3addr[4]; 929 break; 930 } 931 } 932 if (device->lun == -1) { 933 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 934 " suspect firmware bug or unsupported hardware " 935 "configuration.\n"); 936 return -1; 937 } 938 939 lun_assigned: 940 941 h->dev[n] = device; 942 h->ndevices++; 943 added[*nadded] = device; 944 (*nadded)++; 945 946 /* initially, (before registering with scsi layer) we don't 947 * know our hostno and we don't want to print anything first 948 * time anyway (the scsi layer's inquiries will show that info) 949 */ 950 /* if (hostno != -1) */ 951 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 952 scsi_device_type(device->devtype), hostno, 953 device->bus, device->target, device->lun); 954 return 0; 955 } 956 957 /* Update an entry in h->dev[] array. */ 958 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 959 int entry, struct hpsa_scsi_dev_t *new_entry) 960 { 961 /* assumes h->devlock is held */ 962 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 963 964 /* Raid level changed. */ 965 h->dev[entry]->raid_level = new_entry->raid_level; 966 967 /* Raid offload parameters changed. */ 968 h->dev[entry]->offload_config = new_entry->offload_config; 969 h->dev[entry]->offload_enabled = new_entry->offload_enabled; 970 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 971 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 972 h->dev[entry]->raid_map = new_entry->raid_map; 973 974 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 975 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 976 new_entry->target, new_entry->lun); 977 } 978 979 /* Replace an entry from h->dev[] array. */ 980 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 981 int entry, struct hpsa_scsi_dev_t *new_entry, 982 struct hpsa_scsi_dev_t *added[], int *nadded, 983 struct hpsa_scsi_dev_t *removed[], int *nremoved) 984 { 985 /* assumes h->devlock is held */ 986 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 987 removed[*nremoved] = h->dev[entry]; 988 (*nremoved)++; 989 990 /* 991 * New physical devices won't have target/lun assigned yet 992 * so we need to preserve the values in the slot we are replacing. 993 */ 994 if (new_entry->target == -1) { 995 new_entry->target = h->dev[entry]->target; 996 new_entry->lun = h->dev[entry]->lun; 997 } 998 999 h->dev[entry] = new_entry; 1000 added[*nadded] = new_entry; 1001 (*nadded)++; 1002 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 1003 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1004 new_entry->target, new_entry->lun); 1005 } 1006 1007 /* Remove an entry from h->dev[] array. */ 1008 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 1009 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1010 { 1011 /* assumes h->devlock is held */ 1012 int i; 1013 struct hpsa_scsi_dev_t *sd; 1014 1015 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1016 1017 sd = h->dev[entry]; 1018 removed[*nremoved] = h->dev[entry]; 1019 (*nremoved)++; 1020 1021 for (i = entry; i < h->ndevices-1; i++) 1022 h->dev[i] = h->dev[i+1]; 1023 h->ndevices--; 1024 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 1025 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 1026 sd->lun); 1027 } 1028 1029 #define SCSI3ADDR_EQ(a, b) ( \ 1030 (a)[7] == (b)[7] && \ 1031 (a)[6] == (b)[6] && \ 1032 (a)[5] == (b)[5] && \ 1033 (a)[4] == (b)[4] && \ 1034 (a)[3] == (b)[3] && \ 1035 (a)[2] == (b)[2] && \ 1036 (a)[1] == (b)[1] && \ 1037 (a)[0] == (b)[0]) 1038 1039 static void fixup_botched_add(struct ctlr_info *h, 1040 struct hpsa_scsi_dev_t *added) 1041 { 1042 /* called when scsi_add_device fails in order to re-adjust 1043 * h->dev[] to match the mid layer's view. 1044 */ 1045 unsigned long flags; 1046 int i, j; 1047 1048 spin_lock_irqsave(&h->lock, flags); 1049 for (i = 0; i < h->ndevices; i++) { 1050 if (h->dev[i] == added) { 1051 for (j = i; j < h->ndevices-1; j++) 1052 h->dev[j] = h->dev[j+1]; 1053 h->ndevices--; 1054 break; 1055 } 1056 } 1057 spin_unlock_irqrestore(&h->lock, flags); 1058 kfree(added); 1059 } 1060 1061 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1062 struct hpsa_scsi_dev_t *dev2) 1063 { 1064 /* we compare everything except lun and target as these 1065 * are not yet assigned. Compare parts likely 1066 * to differ first 1067 */ 1068 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1069 sizeof(dev1->scsi3addr)) != 0) 1070 return 0; 1071 if (memcmp(dev1->device_id, dev2->device_id, 1072 sizeof(dev1->device_id)) != 0) 1073 return 0; 1074 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1075 return 0; 1076 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1077 return 0; 1078 if (dev1->devtype != dev2->devtype) 1079 return 0; 1080 if (dev1->bus != dev2->bus) 1081 return 0; 1082 return 1; 1083 } 1084 1085 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1086 struct hpsa_scsi_dev_t *dev2) 1087 { 1088 /* Device attributes that can change, but don't mean 1089 * that the device is a different device, nor that the OS 1090 * needs to be told anything about the change. 1091 */ 1092 if (dev1->raid_level != dev2->raid_level) 1093 return 1; 1094 if (dev1->offload_config != dev2->offload_config) 1095 return 1; 1096 if (dev1->offload_enabled != dev2->offload_enabled) 1097 return 1; 1098 return 0; 1099 } 1100 1101 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1102 * and return needle location in *index. If scsi3addr matches, but not 1103 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1104 * location in *index. 1105 * In the case of a minor device attribute change, such as RAID level, just 1106 * return DEVICE_UPDATED, along with the updated device's location in index. 1107 * If needle not found, return DEVICE_NOT_FOUND. 1108 */ 1109 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1110 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1111 int *index) 1112 { 1113 int i; 1114 #define DEVICE_NOT_FOUND 0 1115 #define DEVICE_CHANGED 1 1116 #define DEVICE_SAME 2 1117 #define DEVICE_UPDATED 3 1118 for (i = 0; i < haystack_size; i++) { 1119 if (haystack[i] == NULL) /* previously removed. */ 1120 continue; 1121 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1122 *index = i; 1123 if (device_is_the_same(needle, haystack[i])) { 1124 if (device_updated(needle, haystack[i])) 1125 return DEVICE_UPDATED; 1126 return DEVICE_SAME; 1127 } else { 1128 /* Keep offline devices offline */ 1129 if (needle->volume_offline) 1130 return DEVICE_NOT_FOUND; 1131 return DEVICE_CHANGED; 1132 } 1133 } 1134 } 1135 *index = -1; 1136 return DEVICE_NOT_FOUND; 1137 } 1138 1139 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1140 unsigned char scsi3addr[]) 1141 { 1142 struct offline_device_entry *device; 1143 unsigned long flags; 1144 1145 /* Check to see if device is already on the list */ 1146 spin_lock_irqsave(&h->offline_device_lock, flags); 1147 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1148 if (memcmp(device->scsi3addr, scsi3addr, 1149 sizeof(device->scsi3addr)) == 0) { 1150 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1151 return; 1152 } 1153 } 1154 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1155 1156 /* Device is not on the list, add it. */ 1157 device = kmalloc(sizeof(*device), GFP_KERNEL); 1158 if (!device) { 1159 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); 1160 return; 1161 } 1162 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1163 spin_lock_irqsave(&h->offline_device_lock, flags); 1164 list_add_tail(&device->offline_list, &h->offline_device_list); 1165 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1166 } 1167 1168 /* Print a message explaining various offline volume states */ 1169 static void hpsa_show_volume_status(struct ctlr_info *h, 1170 struct hpsa_scsi_dev_t *sd) 1171 { 1172 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1173 dev_info(&h->pdev->dev, 1174 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1175 h->scsi_host->host_no, 1176 sd->bus, sd->target, sd->lun); 1177 switch (sd->volume_offline) { 1178 case HPSA_LV_OK: 1179 break; 1180 case HPSA_LV_UNDERGOING_ERASE: 1181 dev_info(&h->pdev->dev, 1182 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1183 h->scsi_host->host_no, 1184 sd->bus, sd->target, sd->lun); 1185 break; 1186 case HPSA_LV_UNDERGOING_RPI: 1187 dev_info(&h->pdev->dev, 1188 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", 1189 h->scsi_host->host_no, 1190 sd->bus, sd->target, sd->lun); 1191 break; 1192 case HPSA_LV_PENDING_RPI: 1193 dev_info(&h->pdev->dev, 1194 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1195 h->scsi_host->host_no, 1196 sd->bus, sd->target, sd->lun); 1197 break; 1198 case HPSA_LV_ENCRYPTED_NO_KEY: 1199 dev_info(&h->pdev->dev, 1200 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1201 h->scsi_host->host_no, 1202 sd->bus, sd->target, sd->lun); 1203 break; 1204 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1205 dev_info(&h->pdev->dev, 1206 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1207 h->scsi_host->host_no, 1208 sd->bus, sd->target, sd->lun); 1209 break; 1210 case HPSA_LV_UNDERGOING_ENCRYPTION: 1211 dev_info(&h->pdev->dev, 1212 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1213 h->scsi_host->host_no, 1214 sd->bus, sd->target, sd->lun); 1215 break; 1216 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1217 dev_info(&h->pdev->dev, 1218 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1219 h->scsi_host->host_no, 1220 sd->bus, sd->target, sd->lun); 1221 break; 1222 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1223 dev_info(&h->pdev->dev, 1224 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1225 h->scsi_host->host_no, 1226 sd->bus, sd->target, sd->lun); 1227 break; 1228 case HPSA_LV_PENDING_ENCRYPTION: 1229 dev_info(&h->pdev->dev, 1230 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1231 h->scsi_host->host_no, 1232 sd->bus, sd->target, sd->lun); 1233 break; 1234 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1235 dev_info(&h->pdev->dev, 1236 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1237 h->scsi_host->host_no, 1238 sd->bus, sd->target, sd->lun); 1239 break; 1240 } 1241 } 1242 1243 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1244 struct hpsa_scsi_dev_t *sd[], int nsds) 1245 { 1246 /* sd contains scsi3 addresses and devtypes, and inquiry 1247 * data. This function takes what's in sd to be the current 1248 * reality and updates h->dev[] to reflect that reality. 1249 */ 1250 int i, entry, device_change, changes = 0; 1251 struct hpsa_scsi_dev_t *csd; 1252 unsigned long flags; 1253 struct hpsa_scsi_dev_t **added, **removed; 1254 int nadded, nremoved; 1255 struct Scsi_Host *sh = NULL; 1256 1257 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1258 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1259 1260 if (!added || !removed) { 1261 dev_warn(&h->pdev->dev, "out of memory in " 1262 "adjust_hpsa_scsi_table\n"); 1263 goto free_and_out; 1264 } 1265 1266 spin_lock_irqsave(&h->devlock, flags); 1267 1268 /* find any devices in h->dev[] that are not in 1269 * sd[] and remove them from h->dev[], and for any 1270 * devices which have changed, remove the old device 1271 * info and add the new device info. 1272 * If minor device attributes change, just update 1273 * the existing device structure. 1274 */ 1275 i = 0; 1276 nremoved = 0; 1277 nadded = 0; 1278 while (i < h->ndevices) { 1279 csd = h->dev[i]; 1280 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1281 if (device_change == DEVICE_NOT_FOUND) { 1282 changes++; 1283 hpsa_scsi_remove_entry(h, hostno, i, 1284 removed, &nremoved); 1285 continue; /* remove ^^^, hence i not incremented */ 1286 } else if (device_change == DEVICE_CHANGED) { 1287 changes++; 1288 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 1289 added, &nadded, removed, &nremoved); 1290 /* Set it to NULL to prevent it from being freed 1291 * at the bottom of hpsa_update_scsi_devices() 1292 */ 1293 sd[entry] = NULL; 1294 } else if (device_change == DEVICE_UPDATED) { 1295 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 1296 } 1297 i++; 1298 } 1299 1300 /* Now, make sure every device listed in sd[] is also 1301 * listed in h->dev[], adding them if they aren't found 1302 */ 1303 1304 for (i = 0; i < nsds; i++) { 1305 if (!sd[i]) /* if already added above. */ 1306 continue; 1307 1308 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1309 * as the SCSI mid-layer does not handle such devices well. 1310 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1311 * at 160Hz, and prevents the system from coming up. 1312 */ 1313 if (sd[i]->volume_offline) { 1314 hpsa_show_volume_status(h, sd[i]); 1315 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", 1316 h->scsi_host->host_no, 1317 sd[i]->bus, sd[i]->target, sd[i]->lun); 1318 continue; 1319 } 1320 1321 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1322 h->ndevices, &entry); 1323 if (device_change == DEVICE_NOT_FOUND) { 1324 changes++; 1325 if (hpsa_scsi_add_entry(h, hostno, sd[i], 1326 added, &nadded) != 0) 1327 break; 1328 sd[i] = NULL; /* prevent from being freed later. */ 1329 } else if (device_change == DEVICE_CHANGED) { 1330 /* should never happen... */ 1331 changes++; 1332 dev_warn(&h->pdev->dev, 1333 "device unexpectedly changed.\n"); 1334 /* but if it does happen, we just ignore that device */ 1335 } 1336 } 1337 spin_unlock_irqrestore(&h->devlock, flags); 1338 1339 /* Monitor devices which are in one of several NOT READY states to be 1340 * brought online later. This must be done without holding h->devlock, 1341 * so don't touch h->dev[] 1342 */ 1343 for (i = 0; i < nsds; i++) { 1344 if (!sd[i]) /* if already added above. */ 1345 continue; 1346 if (sd[i]->volume_offline) 1347 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 1348 } 1349 1350 /* Don't notify scsi mid layer of any changes the first time through 1351 * (or if there are no changes) scsi_scan_host will do it later the 1352 * first time through. 1353 */ 1354 if (hostno == -1 || !changes) 1355 goto free_and_out; 1356 1357 sh = h->scsi_host; 1358 /* Notify scsi mid layer of any removed devices */ 1359 for (i = 0; i < nremoved; i++) { 1360 struct scsi_device *sdev = 1361 scsi_device_lookup(sh, removed[i]->bus, 1362 removed[i]->target, removed[i]->lun); 1363 if (sdev != NULL) { 1364 scsi_remove_device(sdev); 1365 scsi_device_put(sdev); 1366 } else { 1367 /* We don't expect to get here. 1368 * future cmds to this device will get selection 1369 * timeout as if the device was gone. 1370 */ 1371 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1372 " for removal.", hostno, removed[i]->bus, 1373 removed[i]->target, removed[i]->lun); 1374 } 1375 kfree(removed[i]); 1376 removed[i] = NULL; 1377 } 1378 1379 /* Notify scsi mid layer of any added devices */ 1380 for (i = 0; i < nadded; i++) { 1381 if (scsi_add_device(sh, added[i]->bus, 1382 added[i]->target, added[i]->lun) == 0) 1383 continue; 1384 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1385 "device not added.\n", hostno, added[i]->bus, 1386 added[i]->target, added[i]->lun); 1387 /* now we have to remove it from h->dev, 1388 * since it didn't get added to scsi mid layer 1389 */ 1390 fixup_botched_add(h, added[i]); 1391 } 1392 1393 free_and_out: 1394 kfree(added); 1395 kfree(removed); 1396 } 1397 1398 /* 1399 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1400 * Assume's h->devlock is held. 1401 */ 1402 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1403 int bus, int target, int lun) 1404 { 1405 int i; 1406 struct hpsa_scsi_dev_t *sd; 1407 1408 for (i = 0; i < h->ndevices; i++) { 1409 sd = h->dev[i]; 1410 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1411 return sd; 1412 } 1413 return NULL; 1414 } 1415 1416 /* link sdev->hostdata to our per-device structure. */ 1417 static int hpsa_slave_alloc(struct scsi_device *sdev) 1418 { 1419 struct hpsa_scsi_dev_t *sd; 1420 unsigned long flags; 1421 struct ctlr_info *h; 1422 1423 h = sdev_to_hba(sdev); 1424 spin_lock_irqsave(&h->devlock, flags); 1425 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1426 sdev_id(sdev), sdev->lun); 1427 if (sd != NULL) 1428 sdev->hostdata = sd; 1429 spin_unlock_irqrestore(&h->devlock, flags); 1430 return 0; 1431 } 1432 1433 static void hpsa_slave_destroy(struct scsi_device *sdev) 1434 { 1435 /* nothing to do. */ 1436 } 1437 1438 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1439 { 1440 int i; 1441 1442 if (!h->cmd_sg_list) 1443 return; 1444 for (i = 0; i < h->nr_cmds; i++) { 1445 kfree(h->cmd_sg_list[i]); 1446 h->cmd_sg_list[i] = NULL; 1447 } 1448 kfree(h->cmd_sg_list); 1449 h->cmd_sg_list = NULL; 1450 } 1451 1452 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1453 { 1454 int i; 1455 1456 if (h->chainsize <= 0) 1457 return 0; 1458 1459 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1460 GFP_KERNEL); 1461 if (!h->cmd_sg_list) { 1462 dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); 1463 return -ENOMEM; 1464 } 1465 for (i = 0; i < h->nr_cmds; i++) { 1466 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1467 h->chainsize, GFP_KERNEL); 1468 if (!h->cmd_sg_list[i]) { 1469 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); 1470 goto clean; 1471 } 1472 } 1473 return 0; 1474 1475 clean: 1476 hpsa_free_sg_chain_blocks(h); 1477 return -ENOMEM; 1478 } 1479 1480 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1481 struct CommandList *c) 1482 { 1483 struct SGDescriptor *chain_sg, *chain_block; 1484 u64 temp64; 1485 u32 chain_len; 1486 1487 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1488 chain_block = h->cmd_sg_list[c->cmdindex]; 1489 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); 1490 chain_len = sizeof(*chain_sg) * 1491 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); 1492 chain_sg->Len = cpu_to_le32(chain_len); 1493 temp64 = pci_map_single(h->pdev, chain_block, chain_len, 1494 PCI_DMA_TODEVICE); 1495 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1496 /* prevent subsequent unmapping */ 1497 chain_sg->Addr = cpu_to_le64(0); 1498 return -1; 1499 } 1500 chain_sg->Addr = cpu_to_le64(temp64); 1501 return 0; 1502 } 1503 1504 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1505 struct CommandList *c) 1506 { 1507 struct SGDescriptor *chain_sg; 1508 1509 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) 1510 return; 1511 1512 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1513 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), 1514 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); 1515 } 1516 1517 1518 /* Decode the various types of errors on ioaccel2 path. 1519 * Return 1 for any error that should generate a RAID path retry. 1520 * Return 0 for errors that don't require a RAID path retry. 1521 */ 1522 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 1523 struct CommandList *c, 1524 struct scsi_cmnd *cmd, 1525 struct io_accel2_cmd *c2) 1526 { 1527 int data_len; 1528 int retry = 0; 1529 1530 switch (c2->error_data.serv_response) { 1531 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1532 switch (c2->error_data.status) { 1533 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1534 break; 1535 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1536 dev_warn(&h->pdev->dev, 1537 "%s: task complete with check condition.\n", 1538 "HP SSD Smart Path"); 1539 cmd->result |= SAM_STAT_CHECK_CONDITION; 1540 if (c2->error_data.data_present != 1541 IOACCEL2_SENSE_DATA_PRESENT) { 1542 memset(cmd->sense_buffer, 0, 1543 SCSI_SENSE_BUFFERSIZE); 1544 break; 1545 } 1546 /* copy the sense data */ 1547 data_len = c2->error_data.sense_data_len; 1548 if (data_len > SCSI_SENSE_BUFFERSIZE) 1549 data_len = SCSI_SENSE_BUFFERSIZE; 1550 if (data_len > sizeof(c2->error_data.sense_data_buff)) 1551 data_len = 1552 sizeof(c2->error_data.sense_data_buff); 1553 memcpy(cmd->sense_buffer, 1554 c2->error_data.sense_data_buff, data_len); 1555 retry = 1; 1556 break; 1557 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1558 dev_warn(&h->pdev->dev, 1559 "%s: task complete with BUSY status.\n", 1560 "HP SSD Smart Path"); 1561 retry = 1; 1562 break; 1563 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1564 dev_warn(&h->pdev->dev, 1565 "%s: task complete with reservation conflict.\n", 1566 "HP SSD Smart Path"); 1567 retry = 1; 1568 break; 1569 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1570 /* Make scsi midlayer do unlimited retries */ 1571 cmd->result = DID_IMM_RETRY << 16; 1572 break; 1573 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1574 dev_warn(&h->pdev->dev, 1575 "%s: task complete with aborted status.\n", 1576 "HP SSD Smart Path"); 1577 retry = 1; 1578 break; 1579 default: 1580 dev_warn(&h->pdev->dev, 1581 "%s: task complete with unrecognized status: 0x%02x\n", 1582 "HP SSD Smart Path", c2->error_data.status); 1583 retry = 1; 1584 break; 1585 } 1586 break; 1587 case IOACCEL2_SERV_RESPONSE_FAILURE: 1588 /* don't expect to get here. */ 1589 dev_warn(&h->pdev->dev, 1590 "unexpected delivery or target failure, status = 0x%02x\n", 1591 c2->error_data.status); 1592 retry = 1; 1593 break; 1594 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1595 break; 1596 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1597 break; 1598 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1599 dev_warn(&h->pdev->dev, "task management function rejected.\n"); 1600 retry = 1; 1601 break; 1602 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1603 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); 1604 break; 1605 default: 1606 dev_warn(&h->pdev->dev, 1607 "%s: Unrecognized server response: 0x%02x\n", 1608 "HP SSD Smart Path", 1609 c2->error_data.serv_response); 1610 retry = 1; 1611 break; 1612 } 1613 1614 return retry; /* retry on raid path? */ 1615 } 1616 1617 static void process_ioaccel2_completion(struct ctlr_info *h, 1618 struct CommandList *c, struct scsi_cmnd *cmd, 1619 struct hpsa_scsi_dev_t *dev) 1620 { 1621 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1622 int raid_retry = 0; 1623 1624 /* check for good status */ 1625 if (likely(c2->error_data.serv_response == 0 && 1626 c2->error_data.status == 0)) { 1627 cmd_free(h, c); 1628 cmd->scsi_done(cmd); 1629 return; 1630 } 1631 1632 /* Any RAID offload error results in retry which will use 1633 * the normal I/O path so the controller can handle whatever's 1634 * wrong. 1635 */ 1636 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1637 c2->error_data.serv_response == 1638 IOACCEL2_SERV_RESPONSE_FAILURE) { 1639 dev->offload_enabled = 0; 1640 cmd->result = DID_SOFT_ERROR << 16; 1641 cmd_free(h, c); 1642 cmd->scsi_done(cmd); 1643 return; 1644 } 1645 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); 1646 /* If error found, disable Smart Path, 1647 * force a retry on the standard path. 1648 */ 1649 if (raid_retry) { 1650 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", 1651 "HP SSD Smart Path"); 1652 dev->offload_enabled = 0; /* Disable Smart Path */ 1653 cmd->result = DID_SOFT_ERROR << 16; 1654 } 1655 cmd_free(h, c); 1656 cmd->scsi_done(cmd); 1657 } 1658 1659 static void complete_scsi_command(struct CommandList *cp) 1660 { 1661 struct scsi_cmnd *cmd; 1662 struct ctlr_info *h; 1663 struct ErrorInfo *ei; 1664 struct hpsa_scsi_dev_t *dev; 1665 1666 unsigned char sense_key; 1667 unsigned char asc; /* additional sense code */ 1668 unsigned char ascq; /* additional sense code qualifier */ 1669 unsigned long sense_data_size; 1670 1671 ei = cp->err_info; 1672 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1673 h = cp->h; 1674 dev = cmd->device->hostdata; 1675 1676 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1677 if ((cp->cmd_type == CMD_SCSI) && 1678 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) 1679 hpsa_unmap_sg_chain_block(h, cp); 1680 1681 cmd->result = (DID_OK << 16); /* host byte */ 1682 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1683 1684 if (cp->cmd_type == CMD_IOACCEL2) 1685 return process_ioaccel2_completion(h, cp, cmd, dev); 1686 1687 cmd->result |= ei->ScsiStatus; 1688 1689 scsi_set_resid(cmd, ei->ResidualCnt); 1690 if (ei->CommandStatus == 0) { 1691 cmd_free(h, cp); 1692 cmd->scsi_done(cmd); 1693 return; 1694 } 1695 1696 /* copy the sense data */ 1697 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1698 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1699 else 1700 sense_data_size = sizeof(ei->SenseInfo); 1701 if (ei->SenseLen < sense_data_size) 1702 sense_data_size = ei->SenseLen; 1703 1704 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1705 1706 /* For I/O accelerator commands, copy over some fields to the normal 1707 * CISS header used below for error handling. 1708 */ 1709 if (cp->cmd_type == CMD_IOACCEL1) { 1710 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1711 cp->Header.SGList = scsi_sg_count(cmd); 1712 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); 1713 cp->Request.CDBLen = le16_to_cpu(c->io_flags) & 1714 IOACCEL1_IOFLAGS_CDBLEN_MASK; 1715 cp->Header.tag = c->tag; 1716 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1717 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1718 1719 /* Any RAID offload error results in retry which will use 1720 * the normal I/O path so the controller can handle whatever's 1721 * wrong. 1722 */ 1723 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1724 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1725 dev->offload_enabled = 0; 1726 cmd->result = DID_SOFT_ERROR << 16; 1727 cmd_free(h, cp); 1728 cmd->scsi_done(cmd); 1729 return; 1730 } 1731 } 1732 1733 /* an error has occurred */ 1734 switch (ei->CommandStatus) { 1735 1736 case CMD_TARGET_STATUS: 1737 if (ei->ScsiStatus) { 1738 /* Get sense key */ 1739 sense_key = 0xf & ei->SenseInfo[2]; 1740 /* Get additional sense code */ 1741 asc = ei->SenseInfo[12]; 1742 /* Get addition sense code qualifier */ 1743 ascq = ei->SenseInfo[13]; 1744 } 1745 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1746 if (sense_key == ABORTED_COMMAND) { 1747 cmd->result |= DID_SOFT_ERROR << 16; 1748 break; 1749 } 1750 break; 1751 } 1752 /* Problem was not a check condition 1753 * Pass it up to the upper layers... 1754 */ 1755 if (ei->ScsiStatus) { 1756 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1757 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1758 "Returning result: 0x%x\n", 1759 cp, ei->ScsiStatus, 1760 sense_key, asc, ascq, 1761 cmd->result); 1762 } else { /* scsi status is zero??? How??? */ 1763 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1764 "Returning no connection.\n", cp), 1765 1766 /* Ordinarily, this case should never happen, 1767 * but there is a bug in some released firmware 1768 * revisions that allows it to happen if, for 1769 * example, a 4100 backplane loses power and 1770 * the tape drive is in it. We assume that 1771 * it's a fatal error of some kind because we 1772 * can't show that it wasn't. We will make it 1773 * look like selection timeout since that is 1774 * the most common reason for this to occur, 1775 * and it's severe enough. 1776 */ 1777 1778 cmd->result = DID_NO_CONNECT << 16; 1779 } 1780 break; 1781 1782 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1783 break; 1784 case CMD_DATA_OVERRUN: 1785 dev_warn(&h->pdev->dev, "cp %p has" 1786 " completed with data overrun " 1787 "reported\n", cp); 1788 break; 1789 case CMD_INVALID: { 1790 /* print_bytes(cp, sizeof(*cp), 1, 0); 1791 print_cmd(cp); */ 1792 /* We get CMD_INVALID if you address a non-existent device 1793 * instead of a selection timeout (no response). You will 1794 * see this if you yank out a drive, then try to access it. 1795 * This is kind of a shame because it means that any other 1796 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1797 * missing target. */ 1798 cmd->result = DID_NO_CONNECT << 16; 1799 } 1800 break; 1801 case CMD_PROTOCOL_ERR: 1802 cmd->result = DID_ERROR << 16; 1803 dev_warn(&h->pdev->dev, "cp %p has " 1804 "protocol error\n", cp); 1805 break; 1806 case CMD_HARDWARE_ERR: 1807 cmd->result = DID_ERROR << 16; 1808 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1809 break; 1810 case CMD_CONNECTION_LOST: 1811 cmd->result = DID_ERROR << 16; 1812 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1813 break; 1814 case CMD_ABORTED: 1815 cmd->result = DID_ABORT << 16; 1816 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1817 cp, ei->ScsiStatus); 1818 break; 1819 case CMD_ABORT_FAILED: 1820 cmd->result = DID_ERROR << 16; 1821 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1822 break; 1823 case CMD_UNSOLICITED_ABORT: 1824 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1825 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1826 "abort\n", cp); 1827 break; 1828 case CMD_TIMEOUT: 1829 cmd->result = DID_TIME_OUT << 16; 1830 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1831 break; 1832 case CMD_UNABORTABLE: 1833 cmd->result = DID_ERROR << 16; 1834 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1835 break; 1836 case CMD_IOACCEL_DISABLED: 1837 /* This only handles the direct pass-through case since RAID 1838 * offload is handled above. Just attempt a retry. 1839 */ 1840 cmd->result = DID_SOFT_ERROR << 16; 1841 dev_warn(&h->pdev->dev, 1842 "cp %p had HP SSD Smart Path error\n", cp); 1843 break; 1844 default: 1845 cmd->result = DID_ERROR << 16; 1846 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1847 cp, ei->CommandStatus); 1848 } 1849 cmd_free(h, cp); 1850 cmd->scsi_done(cmd); 1851 } 1852 1853 static void hpsa_pci_unmap(struct pci_dev *pdev, 1854 struct CommandList *c, int sg_used, int data_direction) 1855 { 1856 int i; 1857 1858 for (i = 0; i < sg_used; i++) 1859 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), 1860 le32_to_cpu(c->SG[i].Len), 1861 data_direction); 1862 } 1863 1864 static int hpsa_map_one(struct pci_dev *pdev, 1865 struct CommandList *cp, 1866 unsigned char *buf, 1867 size_t buflen, 1868 int data_direction) 1869 { 1870 u64 addr64; 1871 1872 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1873 cp->Header.SGList = 0; 1874 cp->Header.SGTotal = cpu_to_le16(0); 1875 return 0; 1876 } 1877 1878 addr64 = pci_map_single(pdev, buf, buflen, data_direction); 1879 if (dma_mapping_error(&pdev->dev, addr64)) { 1880 /* Prevent subsequent unmap of something never mapped */ 1881 cp->Header.SGList = 0; 1882 cp->Header.SGTotal = cpu_to_le16(0); 1883 return -1; 1884 } 1885 cp->SG[0].Addr = cpu_to_le64(addr64); 1886 cp->SG[0].Len = cpu_to_le32(buflen); 1887 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ 1888 cp->Header.SGList = 1; /* no. SGs contig in this cmd */ 1889 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ 1890 return 0; 1891 } 1892 1893 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1894 struct CommandList *c) 1895 { 1896 DECLARE_COMPLETION_ONSTACK(wait); 1897 1898 c->waiting = &wait; 1899 enqueue_cmd_and_start_io(h, c); 1900 wait_for_completion(&wait); 1901 } 1902 1903 static u32 lockup_detected(struct ctlr_info *h) 1904 { 1905 int cpu; 1906 u32 rc, *lockup_detected; 1907 1908 cpu = get_cpu(); 1909 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 1910 rc = *lockup_detected; 1911 put_cpu(); 1912 return rc; 1913 } 1914 1915 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1916 struct CommandList *c) 1917 { 1918 /* If controller lockup detected, fake a hardware error. */ 1919 if (unlikely(lockup_detected(h))) 1920 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1921 else 1922 hpsa_scsi_do_simple_cmd_core(h, c); 1923 } 1924 1925 #define MAX_DRIVER_CMD_RETRIES 25 1926 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1927 struct CommandList *c, int data_direction) 1928 { 1929 int backoff_time = 10, retry_count = 0; 1930 1931 do { 1932 memset(c->err_info, 0, sizeof(*c->err_info)); 1933 hpsa_scsi_do_simple_cmd_core(h, c); 1934 retry_count++; 1935 if (retry_count > 3) { 1936 msleep(backoff_time); 1937 if (backoff_time < 1000) 1938 backoff_time *= 2; 1939 } 1940 } while ((check_for_unit_attention(h, c) || 1941 check_for_busy(h, c)) && 1942 retry_count <= MAX_DRIVER_CMD_RETRIES); 1943 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1944 } 1945 1946 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 1947 struct CommandList *c) 1948 { 1949 const u8 *cdb = c->Request.CDB; 1950 const u8 *lun = c->Header.LUN.LunAddrBytes; 1951 1952 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 1953 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1954 txt, lun[0], lun[1], lun[2], lun[3], 1955 lun[4], lun[5], lun[6], lun[7], 1956 cdb[0], cdb[1], cdb[2], cdb[3], 1957 cdb[4], cdb[5], cdb[6], cdb[7], 1958 cdb[8], cdb[9], cdb[10], cdb[11], 1959 cdb[12], cdb[13], cdb[14], cdb[15]); 1960 } 1961 1962 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 1963 struct CommandList *cp) 1964 { 1965 const struct ErrorInfo *ei = cp->err_info; 1966 struct device *d = &cp->h->pdev->dev; 1967 const u8 *sd = ei->SenseInfo; 1968 1969 switch (ei->CommandStatus) { 1970 case CMD_TARGET_STATUS: 1971 hpsa_print_cmd(h, "SCSI status", cp); 1972 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 1973 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", 1974 sd[2] & 0x0f, sd[12], sd[13]); 1975 else 1976 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); 1977 if (ei->ScsiStatus == 0) 1978 dev_warn(d, "SCSI status is abnormally zero. " 1979 "(probably indicates selection timeout " 1980 "reported incorrectly due to a known " 1981 "firmware bug, circa July, 2001.)\n"); 1982 break; 1983 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1984 break; 1985 case CMD_DATA_OVERRUN: 1986 hpsa_print_cmd(h, "overrun condition", cp); 1987 break; 1988 case CMD_INVALID: { 1989 /* controller unfortunately reports SCSI passthru's 1990 * to non-existent targets as invalid commands. 1991 */ 1992 hpsa_print_cmd(h, "invalid command", cp); 1993 dev_warn(d, "probably means device no longer present\n"); 1994 } 1995 break; 1996 case CMD_PROTOCOL_ERR: 1997 hpsa_print_cmd(h, "protocol error", cp); 1998 break; 1999 case CMD_HARDWARE_ERR: 2000 hpsa_print_cmd(h, "hardware error", cp); 2001 break; 2002 case CMD_CONNECTION_LOST: 2003 hpsa_print_cmd(h, "connection lost", cp); 2004 break; 2005 case CMD_ABORTED: 2006 hpsa_print_cmd(h, "aborted", cp); 2007 break; 2008 case CMD_ABORT_FAILED: 2009 hpsa_print_cmd(h, "abort failed", cp); 2010 break; 2011 case CMD_UNSOLICITED_ABORT: 2012 hpsa_print_cmd(h, "unsolicited abort", cp); 2013 break; 2014 case CMD_TIMEOUT: 2015 hpsa_print_cmd(h, "timed out", cp); 2016 break; 2017 case CMD_UNABORTABLE: 2018 hpsa_print_cmd(h, "unabortable", cp); 2019 break; 2020 default: 2021 hpsa_print_cmd(h, "unknown status", cp); 2022 dev_warn(d, "Unknown command status %x\n", 2023 ei->CommandStatus); 2024 } 2025 } 2026 2027 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2028 u16 page, unsigned char *buf, 2029 unsigned char bufsize) 2030 { 2031 int rc = IO_OK; 2032 struct CommandList *c; 2033 struct ErrorInfo *ei; 2034 2035 c = cmd_alloc(h); 2036 2037 if (c == NULL) { 2038 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2039 return -ENOMEM; 2040 } 2041 2042 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2043 page, scsi3addr, TYPE_CMD)) { 2044 rc = -1; 2045 goto out; 2046 } 2047 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2048 ei = c->err_info; 2049 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2050 hpsa_scsi_interpret_error(h, c); 2051 rc = -1; 2052 } 2053 out: 2054 cmd_free(h, c); 2055 return rc; 2056 } 2057 2058 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, 2059 unsigned char *scsi3addr, unsigned char page, 2060 struct bmic_controller_parameters *buf, size_t bufsize) 2061 { 2062 int rc = IO_OK; 2063 struct CommandList *c; 2064 struct ErrorInfo *ei; 2065 2066 c = cmd_alloc(h); 2067 if (c == NULL) { /* trouble... */ 2068 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2069 return -ENOMEM; 2070 } 2071 2072 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, 2073 page, scsi3addr, TYPE_CMD)) { 2074 rc = -1; 2075 goto out; 2076 } 2077 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2078 ei = c->err_info; 2079 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2080 hpsa_scsi_interpret_error(h, c); 2081 rc = -1; 2082 } 2083 out: 2084 cmd_free(h, c); 2085 return rc; 2086 } 2087 2088 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2089 u8 reset_type) 2090 { 2091 int rc = IO_OK; 2092 struct CommandList *c; 2093 struct ErrorInfo *ei; 2094 2095 c = cmd_alloc(h); 2096 2097 if (c == NULL) { /* trouble... */ 2098 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2099 return -ENOMEM; 2100 } 2101 2102 /* fill_cmd can't fail here, no data buffer to map. */ 2103 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2104 scsi3addr, TYPE_MSG); 2105 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 2106 hpsa_scsi_do_simple_cmd_core(h, c); 2107 /* no unmap needed here because no data xfer. */ 2108 2109 ei = c->err_info; 2110 if (ei->CommandStatus != 0) { 2111 hpsa_scsi_interpret_error(h, c); 2112 rc = -1; 2113 } 2114 cmd_free(h, c); 2115 return rc; 2116 } 2117 2118 static void hpsa_get_raid_level(struct ctlr_info *h, 2119 unsigned char *scsi3addr, unsigned char *raid_level) 2120 { 2121 int rc; 2122 unsigned char *buf; 2123 2124 *raid_level = RAID_UNKNOWN; 2125 buf = kzalloc(64, GFP_KERNEL); 2126 if (!buf) 2127 return; 2128 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 2129 if (rc == 0) 2130 *raid_level = buf[8]; 2131 if (*raid_level > RAID_UNKNOWN) 2132 *raid_level = RAID_UNKNOWN; 2133 kfree(buf); 2134 return; 2135 } 2136 2137 #define HPSA_MAP_DEBUG 2138 #ifdef HPSA_MAP_DEBUG 2139 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 2140 struct raid_map_data *map_buff) 2141 { 2142 struct raid_map_disk_data *dd = &map_buff->data[0]; 2143 int map, row, col; 2144 u16 map_cnt, row_cnt, disks_per_row; 2145 2146 if (rc != 0) 2147 return; 2148 2149 /* Show details only if debugging has been activated. */ 2150 if (h->raid_offload_debug < 2) 2151 return; 2152 2153 dev_info(&h->pdev->dev, "structure_size = %u\n", 2154 le32_to_cpu(map_buff->structure_size)); 2155 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 2156 le32_to_cpu(map_buff->volume_blk_size)); 2157 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 2158 le64_to_cpu(map_buff->volume_blk_cnt)); 2159 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 2160 map_buff->phys_blk_shift); 2161 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 2162 map_buff->parity_rotation_shift); 2163 dev_info(&h->pdev->dev, "strip_size = %u\n", 2164 le16_to_cpu(map_buff->strip_size)); 2165 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 2166 le64_to_cpu(map_buff->disk_starting_blk)); 2167 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 2168 le64_to_cpu(map_buff->disk_blk_cnt)); 2169 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 2170 le16_to_cpu(map_buff->data_disks_per_row)); 2171 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 2172 le16_to_cpu(map_buff->metadata_disks_per_row)); 2173 dev_info(&h->pdev->dev, "row_cnt = %u\n", 2174 le16_to_cpu(map_buff->row_cnt)); 2175 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2176 le16_to_cpu(map_buff->layout_map_count)); 2177 dev_info(&h->pdev->dev, "flags = 0x%x\n", 2178 le16_to_cpu(map_buff->flags)); 2179 dev_info(&h->pdev->dev, "encrypytion = %s\n", 2180 le16_to_cpu(map_buff->flags) & 2181 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 2182 dev_info(&h->pdev->dev, "dekindex = %u\n", 2183 le16_to_cpu(map_buff->dekindex)); 2184 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2185 for (map = 0; map < map_cnt; map++) { 2186 dev_info(&h->pdev->dev, "Map%u:\n", map); 2187 row_cnt = le16_to_cpu(map_buff->row_cnt); 2188 for (row = 0; row < row_cnt; row++) { 2189 dev_info(&h->pdev->dev, " Row%u:\n", row); 2190 disks_per_row = 2191 le16_to_cpu(map_buff->data_disks_per_row); 2192 for (col = 0; col < disks_per_row; col++, dd++) 2193 dev_info(&h->pdev->dev, 2194 " D%02u: h=0x%04x xor=%u,%u\n", 2195 col, dd->ioaccel_handle, 2196 dd->xor_mult[0], dd->xor_mult[1]); 2197 disks_per_row = 2198 le16_to_cpu(map_buff->metadata_disks_per_row); 2199 for (col = 0; col < disks_per_row; col++, dd++) 2200 dev_info(&h->pdev->dev, 2201 " M%02u: h=0x%04x xor=%u,%u\n", 2202 col, dd->ioaccel_handle, 2203 dd->xor_mult[0], dd->xor_mult[1]); 2204 } 2205 } 2206 } 2207 #else 2208 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 2209 __attribute__((unused)) int rc, 2210 __attribute__((unused)) struct raid_map_data *map_buff) 2211 { 2212 } 2213 #endif 2214 2215 static int hpsa_get_raid_map(struct ctlr_info *h, 2216 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2217 { 2218 int rc = 0; 2219 struct CommandList *c; 2220 struct ErrorInfo *ei; 2221 2222 c = cmd_alloc(h); 2223 if (c == NULL) { 2224 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2225 return -ENOMEM; 2226 } 2227 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2228 sizeof(this_device->raid_map), 0, 2229 scsi3addr, TYPE_CMD)) { 2230 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2231 cmd_free(h, c); 2232 return -ENOMEM; 2233 } 2234 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2235 ei = c->err_info; 2236 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2237 hpsa_scsi_interpret_error(h, c); 2238 cmd_free(h, c); 2239 return -1; 2240 } 2241 cmd_free(h, c); 2242 2243 /* @todo in the future, dynamically allocate RAID map memory */ 2244 if (le32_to_cpu(this_device->raid_map.structure_size) > 2245 sizeof(this_device->raid_map)) { 2246 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 2247 rc = -1; 2248 } 2249 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2250 return rc; 2251 } 2252 2253 static int hpsa_vpd_page_supported(struct ctlr_info *h, 2254 unsigned char scsi3addr[], u8 page) 2255 { 2256 int rc; 2257 int i; 2258 int pages; 2259 unsigned char *buf, bufsize; 2260 2261 buf = kzalloc(256, GFP_KERNEL); 2262 if (!buf) 2263 return 0; 2264 2265 /* Get the size of the page list first */ 2266 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2267 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2268 buf, HPSA_VPD_HEADER_SZ); 2269 if (rc != 0) 2270 goto exit_unsupported; 2271 pages = buf[3]; 2272 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 2273 bufsize = pages + HPSA_VPD_HEADER_SZ; 2274 else 2275 bufsize = 255; 2276 2277 /* Get the whole VPD page list */ 2278 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2279 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2280 buf, bufsize); 2281 if (rc != 0) 2282 goto exit_unsupported; 2283 2284 pages = buf[3]; 2285 for (i = 1; i <= pages; i++) 2286 if (buf[3 + i] == page) 2287 goto exit_supported; 2288 exit_unsupported: 2289 kfree(buf); 2290 return 0; 2291 exit_supported: 2292 kfree(buf); 2293 return 1; 2294 } 2295 2296 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 2297 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2298 { 2299 int rc; 2300 unsigned char *buf; 2301 u8 ioaccel_status; 2302 2303 this_device->offload_config = 0; 2304 this_device->offload_enabled = 0; 2305 2306 buf = kzalloc(64, GFP_KERNEL); 2307 if (!buf) 2308 return; 2309 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 2310 goto out; 2311 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2312 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 2313 if (rc != 0) 2314 goto out; 2315 2316 #define IOACCEL_STATUS_BYTE 4 2317 #define OFFLOAD_CONFIGURED_BIT 0x01 2318 #define OFFLOAD_ENABLED_BIT 0x02 2319 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 2320 this_device->offload_config = 2321 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 2322 if (this_device->offload_config) { 2323 this_device->offload_enabled = 2324 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 2325 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 2326 this_device->offload_enabled = 0; 2327 } 2328 out: 2329 kfree(buf); 2330 return; 2331 } 2332 2333 /* Get the device id from inquiry page 0x83 */ 2334 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2335 unsigned char *device_id, int buflen) 2336 { 2337 int rc; 2338 unsigned char *buf; 2339 2340 if (buflen > 16) 2341 buflen = 16; 2342 buf = kzalloc(64, GFP_KERNEL); 2343 if (!buf) 2344 return -ENOMEM; 2345 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2346 if (rc == 0) 2347 memcpy(device_id, &buf[8], buflen); 2348 kfree(buf); 2349 return rc != 0; 2350 } 2351 2352 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2353 struct ReportLUNdata *buf, int bufsize, 2354 int extended_response) 2355 { 2356 int rc = IO_OK; 2357 struct CommandList *c; 2358 unsigned char scsi3addr[8]; 2359 struct ErrorInfo *ei; 2360 2361 c = cmd_alloc(h); 2362 if (c == NULL) { /* trouble... */ 2363 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2364 return -1; 2365 } 2366 /* address the controller */ 2367 memset(scsi3addr, 0, sizeof(scsi3addr)); 2368 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 2369 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 2370 rc = -1; 2371 goto out; 2372 } 2373 if (extended_response) 2374 c->Request.CDB[1] = extended_response; 2375 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2376 ei = c->err_info; 2377 if (ei->CommandStatus != 0 && 2378 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2379 hpsa_scsi_interpret_error(h, c); 2380 rc = -1; 2381 } else { 2382 if (buf->extended_response_flag != extended_response) { 2383 dev_err(&h->pdev->dev, 2384 "report luns requested format %u, got %u\n", 2385 extended_response, 2386 buf->extended_response_flag); 2387 rc = -1; 2388 } 2389 } 2390 out: 2391 cmd_free(h, c); 2392 return rc; 2393 } 2394 2395 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2396 struct ReportLUNdata *buf, 2397 int bufsize, int extended_response) 2398 { 2399 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 2400 } 2401 2402 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2403 struct ReportLUNdata *buf, int bufsize) 2404 { 2405 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 2406 } 2407 2408 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 2409 int bus, int target, int lun) 2410 { 2411 device->bus = bus; 2412 device->target = target; 2413 device->lun = lun; 2414 } 2415 2416 /* Use VPD inquiry to get details of volume status */ 2417 static int hpsa_get_volume_status(struct ctlr_info *h, 2418 unsigned char scsi3addr[]) 2419 { 2420 int rc; 2421 int status; 2422 int size; 2423 unsigned char *buf; 2424 2425 buf = kzalloc(64, GFP_KERNEL); 2426 if (!buf) 2427 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2428 2429 /* Does controller have VPD for logical volume status? */ 2430 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) 2431 goto exit_failed; 2432 2433 /* Get the size of the VPD return buffer */ 2434 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2435 buf, HPSA_VPD_HEADER_SZ); 2436 if (rc != 0) 2437 goto exit_failed; 2438 size = buf[3]; 2439 2440 /* Now get the whole VPD buffer */ 2441 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2442 buf, size + HPSA_VPD_HEADER_SZ); 2443 if (rc != 0) 2444 goto exit_failed; 2445 status = buf[4]; /* status byte */ 2446 2447 kfree(buf); 2448 return status; 2449 exit_failed: 2450 kfree(buf); 2451 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2452 } 2453 2454 /* Determine offline status of a volume. 2455 * Return either: 2456 * 0 (not offline) 2457 * 0xff (offline for unknown reasons) 2458 * # (integer code indicating one of several NOT READY states 2459 * describing why a volume is to be kept offline) 2460 */ 2461 static int hpsa_volume_offline(struct ctlr_info *h, 2462 unsigned char scsi3addr[]) 2463 { 2464 struct CommandList *c; 2465 unsigned char *sense, sense_key, asc, ascq; 2466 int ldstat = 0; 2467 u16 cmd_status; 2468 u8 scsi_status; 2469 #define ASC_LUN_NOT_READY 0x04 2470 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 2471 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 2472 2473 c = cmd_alloc(h); 2474 if (!c) 2475 return 0; 2476 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 2477 hpsa_scsi_do_simple_cmd_core(h, c); 2478 sense = c->err_info->SenseInfo; 2479 sense_key = sense[2]; 2480 asc = sense[12]; 2481 ascq = sense[13]; 2482 cmd_status = c->err_info->CommandStatus; 2483 scsi_status = c->err_info->ScsiStatus; 2484 cmd_free(h, c); 2485 /* Is the volume 'not ready'? */ 2486 if (cmd_status != CMD_TARGET_STATUS || 2487 scsi_status != SAM_STAT_CHECK_CONDITION || 2488 sense_key != NOT_READY || 2489 asc != ASC_LUN_NOT_READY) { 2490 return 0; 2491 } 2492 2493 /* Determine the reason for not ready state */ 2494 ldstat = hpsa_get_volume_status(h, scsi3addr); 2495 2496 /* Keep volume offline in certain cases: */ 2497 switch (ldstat) { 2498 case HPSA_LV_UNDERGOING_ERASE: 2499 case HPSA_LV_UNDERGOING_RPI: 2500 case HPSA_LV_PENDING_RPI: 2501 case HPSA_LV_ENCRYPTED_NO_KEY: 2502 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 2503 case HPSA_LV_UNDERGOING_ENCRYPTION: 2504 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 2505 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 2506 return ldstat; 2507 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 2508 /* If VPD status page isn't available, 2509 * use ASC/ASCQ to determine state 2510 */ 2511 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 2512 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 2513 return ldstat; 2514 break; 2515 default: 2516 break; 2517 } 2518 return 0; 2519 } 2520 2521 static int hpsa_update_device_info(struct ctlr_info *h, 2522 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2523 unsigned char *is_OBDR_device) 2524 { 2525 2526 #define OBDR_SIG_OFFSET 43 2527 #define OBDR_TAPE_SIG "$DR-10" 2528 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 2529 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 2530 2531 unsigned char *inq_buff; 2532 unsigned char *obdr_sig; 2533 2534 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 2535 if (!inq_buff) 2536 goto bail_out; 2537 2538 /* Do an inquiry to the device to see what it is. */ 2539 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 2540 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 2541 /* Inquiry failed (msg printed already) */ 2542 dev_err(&h->pdev->dev, 2543 "hpsa_update_device_info: inquiry failed\n"); 2544 goto bail_out; 2545 } 2546 2547 this_device->devtype = (inq_buff[0] & 0x1f); 2548 memcpy(this_device->scsi3addr, scsi3addr, 8); 2549 memcpy(this_device->vendor, &inq_buff[8], 2550 sizeof(this_device->vendor)); 2551 memcpy(this_device->model, &inq_buff[16], 2552 sizeof(this_device->model)); 2553 memset(this_device->device_id, 0, 2554 sizeof(this_device->device_id)); 2555 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 2556 sizeof(this_device->device_id)); 2557 2558 if (this_device->devtype == TYPE_DISK && 2559 is_logical_dev_addr_mode(scsi3addr)) { 2560 int volume_offline; 2561 2562 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2563 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2564 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2565 volume_offline = hpsa_volume_offline(h, scsi3addr); 2566 if (volume_offline < 0 || volume_offline > 0xff) 2567 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 2568 this_device->volume_offline = volume_offline & 0xff; 2569 } else { 2570 this_device->raid_level = RAID_UNKNOWN; 2571 this_device->offload_config = 0; 2572 this_device->offload_enabled = 0; 2573 this_device->volume_offline = 0; 2574 } 2575 2576 if (is_OBDR_device) { 2577 /* See if this is a One-Button-Disaster-Recovery device 2578 * by looking for "$DR-10" at offset 43 in inquiry data. 2579 */ 2580 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 2581 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 2582 strncmp(obdr_sig, OBDR_TAPE_SIG, 2583 OBDR_SIG_LEN) == 0); 2584 } 2585 2586 kfree(inq_buff); 2587 return 0; 2588 2589 bail_out: 2590 kfree(inq_buff); 2591 return 1; 2592 } 2593 2594 static unsigned char *ext_target_model[] = { 2595 "MSA2012", 2596 "MSA2024", 2597 "MSA2312", 2598 "MSA2324", 2599 "P2000 G3 SAS", 2600 "MSA 2040 SAS", 2601 NULL, 2602 }; 2603 2604 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 2605 { 2606 int i; 2607 2608 for (i = 0; ext_target_model[i]; i++) 2609 if (strncmp(device->model, ext_target_model[i], 2610 strlen(ext_target_model[i])) == 0) 2611 return 1; 2612 return 0; 2613 } 2614 2615 /* Helper function to assign bus, target, lun mapping of devices. 2616 * Puts non-external target logical volumes on bus 0, external target logical 2617 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 2618 * Logical drive target and lun are assigned at this time, but 2619 * physical device lun and target assignment are deferred (assigned 2620 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 2621 */ 2622 static void figure_bus_target_lun(struct ctlr_info *h, 2623 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 2624 { 2625 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 2626 2627 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 2628 /* physical device, target and lun filled in later */ 2629 if (is_hba_lunid(lunaddrbytes)) 2630 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 2631 else 2632 /* defer target, lun assignment for physical devices */ 2633 hpsa_set_bus_target_lun(device, 2, -1, -1); 2634 return; 2635 } 2636 /* It's a logical device */ 2637 if (is_ext_target(h, device)) { 2638 /* external target way, put logicals on bus 1 2639 * and match target/lun numbers box 2640 * reports, other smart array, bus 0, target 0, match lunid 2641 */ 2642 hpsa_set_bus_target_lun(device, 2643 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 2644 return; 2645 } 2646 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 2647 } 2648 2649 /* 2650 * If there is no lun 0 on a target, linux won't find any devices. 2651 * For the external targets (arrays), we have to manually detect the enclosure 2652 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 2653 * it for some reason. *tmpdevice is the target we're adding, 2654 * this_device is a pointer into the current element of currentsd[] 2655 * that we're building up in update_scsi_devices(), below. 2656 * lunzerobits is a bitmap that tracks which targets already have a 2657 * lun 0 assigned. 2658 * Returns 1 if an enclosure was added, 0 if not. 2659 */ 2660 static int add_ext_target_dev(struct ctlr_info *h, 2661 struct hpsa_scsi_dev_t *tmpdevice, 2662 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 2663 unsigned long lunzerobits[], int *n_ext_target_devs) 2664 { 2665 unsigned char scsi3addr[8]; 2666 2667 if (test_bit(tmpdevice->target, lunzerobits)) 2668 return 0; /* There is already a lun 0 on this target. */ 2669 2670 if (!is_logical_dev_addr_mode(lunaddrbytes)) 2671 return 0; /* It's the logical targets that may lack lun 0. */ 2672 2673 if (!is_ext_target(h, tmpdevice)) 2674 return 0; /* Only external target devices have this problem. */ 2675 2676 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 2677 return 0; 2678 2679 memset(scsi3addr, 0, 8); 2680 scsi3addr[3] = tmpdevice->target; 2681 if (is_hba_lunid(scsi3addr)) 2682 return 0; /* Don't add the RAID controller here. */ 2683 2684 if (is_scsi_rev_5(h)) 2685 return 0; /* p1210m doesn't need to do this. */ 2686 2687 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 2688 dev_warn(&h->pdev->dev, "Maximum number of external " 2689 "target devices exceeded. Check your hardware " 2690 "configuration."); 2691 return 0; 2692 } 2693 2694 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 2695 return 0; 2696 (*n_ext_target_devs)++; 2697 hpsa_set_bus_target_lun(this_device, 2698 tmpdevice->bus, tmpdevice->target, 0); 2699 set_bit(tmpdevice->target, lunzerobits); 2700 return 1; 2701 } 2702 2703 /* 2704 * Get address of physical disk used for an ioaccel2 mode command: 2705 * 1. Extract ioaccel2 handle from the command. 2706 * 2. Find a matching ioaccel2 handle from list of physical disks. 2707 * 3. Return: 2708 * 1 and set scsi3addr to address of matching physical 2709 * 0 if no matching physical disk was found. 2710 */ 2711 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 2712 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 2713 { 2714 struct ReportExtendedLUNdata *physicals = NULL; 2715 int responsesize = 24; /* size of physical extended response */ 2716 int extended = 2; /* flag forces reporting 'other dev info'. */ 2717 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2718 u32 nphysicals = 0; /* number of reported physical devs */ 2719 int found = 0; /* found match (1) or not (0) */ 2720 u32 find; /* handle we need to match */ 2721 int i; 2722 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2723 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2724 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2725 __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2726 __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2727 2728 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2729 return 0; /* no match */ 2730 2731 /* point to the ioaccel2 device handle */ 2732 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 2733 if (c2a == NULL) 2734 return 0; /* no match */ 2735 2736 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; 2737 if (scmd == NULL) 2738 return 0; /* no match */ 2739 2740 d = scmd->device->hostdata; 2741 if (d == NULL) 2742 return 0; /* no match */ 2743 2744 it_nexus = cpu_to_le32(d->ioaccel_handle); 2745 scsi_nexus = c2a->scsi_nexus; 2746 find = le32_to_cpu(c2a->scsi_nexus); 2747 2748 if (h->raid_offload_debug > 0) 2749 dev_info(&h->pdev->dev, 2750 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 2751 __func__, scsi_nexus, 2752 d->device_id[0], d->device_id[1], d->device_id[2], 2753 d->device_id[3], d->device_id[4], d->device_id[5], 2754 d->device_id[6], d->device_id[7], d->device_id[8], 2755 d->device_id[9], d->device_id[10], d->device_id[11], 2756 d->device_id[12], d->device_id[13], d->device_id[14], 2757 d->device_id[15]); 2758 2759 /* Get the list of physical devices */ 2760 physicals = kzalloc(reportsize, GFP_KERNEL); 2761 if (physicals == NULL) 2762 return 0; 2763 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2764 reportsize, extended)) { 2765 dev_err(&h->pdev->dev, 2766 "Can't lookup %s device handle: report physical LUNs failed.\n", 2767 "HP SSD Smart Path"); 2768 kfree(physicals); 2769 return 0; 2770 } 2771 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2772 responsesize; 2773 2774 /* find ioaccel2 handle in list of physicals: */ 2775 for (i = 0; i < nphysicals; i++) { 2776 struct ext_report_lun_entry *entry = &physicals->LUN[i]; 2777 2778 /* handle is in bytes 28-31 of each lun */ 2779 if (entry->ioaccel_handle != find) 2780 continue; /* didn't match */ 2781 found = 1; 2782 memcpy(scsi3addr, entry->lunid, 8); 2783 if (h->raid_offload_debug > 0) 2784 dev_info(&h->pdev->dev, 2785 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", 2786 __func__, find, 2787 entry->ioaccel_handle, scsi3addr); 2788 break; /* found it */ 2789 } 2790 2791 kfree(physicals); 2792 if (found) 2793 return 1; 2794 else 2795 return 0; 2796 2797 } 2798 /* 2799 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2800 * logdev. The number of luns in physdev and logdev are returned in 2801 * *nphysicals and *nlogicals, respectively. 2802 * Returns 0 on success, -1 otherwise. 2803 */ 2804 static int hpsa_gather_lun_info(struct ctlr_info *h, 2805 int reportphyslunsize, int reportloglunsize, 2806 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2807 struct ReportLUNdata *logdev, u32 *nlogicals) 2808 { 2809 int physical_entry_size = 8; 2810 2811 *physical_mode = 0; 2812 2813 /* For I/O accelerator mode we need to read physical device handles */ 2814 if (h->transMethod & CFGTBL_Trans_io_accel1 || 2815 h->transMethod & CFGTBL_Trans_io_accel2) { 2816 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2817 physical_entry_size = 24; 2818 } 2819 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize, 2820 *physical_mode)) { 2821 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2822 return -1; 2823 } 2824 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 2825 physical_entry_size; 2826 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2827 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2828 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2829 *nphysicals - HPSA_MAX_PHYS_LUN); 2830 *nphysicals = HPSA_MAX_PHYS_LUN; 2831 } 2832 if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) { 2833 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2834 return -1; 2835 } 2836 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 2837 /* Reject Logicals in excess of our max capability. */ 2838 if (*nlogicals > HPSA_MAX_LUN) { 2839 dev_warn(&h->pdev->dev, 2840 "maximum logical LUNs (%d) exceeded. " 2841 "%d LUNs ignored.\n", HPSA_MAX_LUN, 2842 *nlogicals - HPSA_MAX_LUN); 2843 *nlogicals = HPSA_MAX_LUN; 2844 } 2845 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 2846 dev_warn(&h->pdev->dev, 2847 "maximum logical + physical LUNs (%d) exceeded. " 2848 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2849 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 2850 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 2851 } 2852 return 0; 2853 } 2854 2855 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, 2856 int i, int nphysicals, int nlogicals, 2857 struct ReportExtendedLUNdata *physdev_list, 2858 struct ReportLUNdata *logdev_list) 2859 { 2860 /* Helper function, figure out where the LUN ID info is coming from 2861 * given index i, lists of physical and logical devices, where in 2862 * the list the raid controller is supposed to appear (first or last) 2863 */ 2864 2865 int logicals_start = nphysicals + (raid_ctlr_position == 0); 2866 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 2867 2868 if (i == raid_ctlr_position) 2869 return RAID_CTLR_LUNID; 2870 2871 if (i < logicals_start) 2872 return &physdev_list->LUN[i - 2873 (raid_ctlr_position == 0)].lunid[0]; 2874 2875 if (i < last_device) 2876 return &logdev_list->LUN[i - nphysicals - 2877 (raid_ctlr_position == 0)][0]; 2878 BUG(); 2879 return NULL; 2880 } 2881 2882 static int hpsa_hba_mode_enabled(struct ctlr_info *h) 2883 { 2884 int rc; 2885 int hba_mode_enabled; 2886 struct bmic_controller_parameters *ctlr_params; 2887 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), 2888 GFP_KERNEL); 2889 2890 if (!ctlr_params) 2891 return -ENOMEM; 2892 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, 2893 sizeof(struct bmic_controller_parameters)); 2894 if (rc) { 2895 kfree(ctlr_params); 2896 return rc; 2897 } 2898 2899 hba_mode_enabled = 2900 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); 2901 kfree(ctlr_params); 2902 return hba_mode_enabled; 2903 } 2904 2905 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 2906 { 2907 /* the idea here is we could get notified 2908 * that some devices have changed, so we do a report 2909 * physical luns and report logical luns cmd, and adjust 2910 * our list of devices accordingly. 2911 * 2912 * The scsi3addr's of devices won't change so long as the 2913 * adapter is not reset. That means we can rescan and 2914 * tell which devices we already know about, vs. new 2915 * devices, vs. disappearing devices. 2916 */ 2917 struct ReportExtendedLUNdata *physdev_list = NULL; 2918 struct ReportLUNdata *logdev_list = NULL; 2919 u32 nphysicals = 0; 2920 u32 nlogicals = 0; 2921 int physical_mode = 0; 2922 u32 ndev_allocated = 0; 2923 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 2924 int ncurrent = 0; 2925 int i, n_ext_target_devs, ndevs_to_allocate; 2926 int raid_ctlr_position; 2927 int rescan_hba_mode; 2928 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 2929 2930 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 2931 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); 2932 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); 2933 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 2934 2935 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 2936 dev_err(&h->pdev->dev, "out of memory\n"); 2937 goto out; 2938 } 2939 memset(lunzerobits, 0, sizeof(lunzerobits)); 2940 2941 rescan_hba_mode = hpsa_hba_mode_enabled(h); 2942 if (rescan_hba_mode < 0) 2943 goto out; 2944 2945 if (!h->hba_mode_enabled && rescan_hba_mode) 2946 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); 2947 else if (h->hba_mode_enabled && !rescan_hba_mode) 2948 dev_warn(&h->pdev->dev, "HBA mode disabled\n"); 2949 2950 h->hba_mode_enabled = rescan_hba_mode; 2951 2952 if (hpsa_gather_lun_info(h, 2953 sizeof(*physdev_list), sizeof(*logdev_list), 2954 (struct ReportLUNdata *) physdev_list, &nphysicals, 2955 &physical_mode, logdev_list, &nlogicals)) 2956 goto out; 2957 2958 /* We might see up to the maximum number of logical and physical disks 2959 * plus external target devices, and a device for the local RAID 2960 * controller. 2961 */ 2962 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 2963 2964 /* Allocate the per device structures */ 2965 for (i = 0; i < ndevs_to_allocate; i++) { 2966 if (i >= HPSA_MAX_DEVICES) { 2967 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 2968 " %d devices ignored.\n", HPSA_MAX_DEVICES, 2969 ndevs_to_allocate - HPSA_MAX_DEVICES); 2970 break; 2971 } 2972 2973 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 2974 if (!currentsd[i]) { 2975 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 2976 __FILE__, __LINE__); 2977 goto out; 2978 } 2979 ndev_allocated++; 2980 } 2981 2982 if (is_scsi_rev_5(h)) 2983 raid_ctlr_position = 0; 2984 else 2985 raid_ctlr_position = nphysicals + nlogicals; 2986 2987 /* adjust our table of devices */ 2988 n_ext_target_devs = 0; 2989 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 2990 u8 *lunaddrbytes, is_OBDR = 0; 2991 2992 /* Figure out where the LUN ID info is coming from */ 2993 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 2994 i, nphysicals, nlogicals, physdev_list, logdev_list); 2995 /* skip masked physical devices. */ 2996 if (lunaddrbytes[3] & 0xC0 && 2997 i < nphysicals + (raid_ctlr_position == 0)) 2998 continue; 2999 3000 /* Get device type, vendor, model, device id */ 3001 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 3002 &is_OBDR)) 3003 continue; /* skip it if we can't talk to it. */ 3004 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 3005 this_device = currentsd[ncurrent]; 3006 3007 /* 3008 * For external target devices, we have to insert a LUN 0 which 3009 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 3010 * is nonetheless an enclosure device there. We have to 3011 * present that otherwise linux won't find anything if 3012 * there is no lun 0. 3013 */ 3014 if (add_ext_target_dev(h, tmpdevice, this_device, 3015 lunaddrbytes, lunzerobits, 3016 &n_ext_target_devs)) { 3017 ncurrent++; 3018 this_device = currentsd[ncurrent]; 3019 } 3020 3021 *this_device = *tmpdevice; 3022 3023 switch (this_device->devtype) { 3024 case TYPE_ROM: 3025 /* We don't *really* support actual CD-ROM devices, 3026 * just "One Button Disaster Recovery" tape drive 3027 * which temporarily pretends to be a CD-ROM drive. 3028 * So we check that the device is really an OBDR tape 3029 * device by checking for "$DR-10" in bytes 43-48 of 3030 * the inquiry data. 3031 */ 3032 if (is_OBDR) 3033 ncurrent++; 3034 break; 3035 case TYPE_DISK: 3036 if (h->hba_mode_enabled) { 3037 /* never use raid mapper in HBA mode */ 3038 this_device->offload_enabled = 0; 3039 ncurrent++; 3040 break; 3041 } else if (h->acciopath_status) { 3042 if (i >= nphysicals) { 3043 ncurrent++; 3044 break; 3045 } 3046 } else { 3047 if (i < nphysicals) 3048 break; 3049 ncurrent++; 3050 break; 3051 } 3052 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { 3053 memcpy(&this_device->ioaccel_handle, 3054 &lunaddrbytes[20], 3055 sizeof(this_device->ioaccel_handle)); 3056 ncurrent++; 3057 } 3058 break; 3059 case TYPE_TAPE: 3060 case TYPE_MEDIUM_CHANGER: 3061 ncurrent++; 3062 break; 3063 case TYPE_RAID: 3064 /* Only present the Smartarray HBA as a RAID controller. 3065 * If it's a RAID controller other than the HBA itself 3066 * (an external RAID controller, MSA500 or similar) 3067 * don't present it. 3068 */ 3069 if (!is_hba_lunid(lunaddrbytes)) 3070 break; 3071 ncurrent++; 3072 break; 3073 default: 3074 break; 3075 } 3076 if (ncurrent >= HPSA_MAX_DEVICES) 3077 break; 3078 } 3079 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3080 out: 3081 kfree(tmpdevice); 3082 for (i = 0; i < ndev_allocated; i++) 3083 kfree(currentsd[i]); 3084 kfree(currentsd); 3085 kfree(physdev_list); 3086 kfree(logdev_list); 3087 } 3088 3089 /* 3090 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 3091 * dma mapping and fills in the scatter gather entries of the 3092 * hpsa command, cp. 3093 */ 3094 static int hpsa_scatter_gather(struct ctlr_info *h, 3095 struct CommandList *cp, 3096 struct scsi_cmnd *cmd) 3097 { 3098 unsigned int len; 3099 struct scatterlist *sg; 3100 u64 addr64; 3101 int use_sg, i, sg_index, chained; 3102 struct SGDescriptor *curr_sg; 3103 3104 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 3105 3106 use_sg = scsi_dma_map(cmd); 3107 if (use_sg < 0) 3108 return use_sg; 3109 3110 if (!use_sg) 3111 goto sglist_finished; 3112 3113 curr_sg = cp->SG; 3114 chained = 0; 3115 sg_index = 0; 3116 scsi_for_each_sg(cmd, sg, use_sg, i) { 3117 if (i == h->max_cmd_sg_entries - 1 && 3118 use_sg > h->max_cmd_sg_entries) { 3119 chained = 1; 3120 curr_sg = h->cmd_sg_list[cp->cmdindex]; 3121 sg_index = 0; 3122 } 3123 addr64 = (u64) sg_dma_address(sg); 3124 len = sg_dma_len(sg); 3125 curr_sg->Addr = cpu_to_le64(addr64); 3126 curr_sg->Len = cpu_to_le32(len); 3127 curr_sg->Ext = cpu_to_le32(0); 3128 curr_sg++; 3129 } 3130 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3131 3132 if (use_sg + chained > h->maxSG) 3133 h->maxSG = use_sg + chained; 3134 3135 if (chained) { 3136 cp->Header.SGList = h->max_cmd_sg_entries; 3137 cp->Header.SGTotal = cpu_to_le16(use_sg + 1); 3138 if (hpsa_map_sg_chain_block(h, cp)) { 3139 scsi_dma_unmap(cmd); 3140 return -1; 3141 } 3142 return 0; 3143 } 3144 3145 sglist_finished: 3146 3147 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3148 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 3149 return 0; 3150 } 3151 3152 #define IO_ACCEL_INELIGIBLE (1) 3153 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 3154 { 3155 int is_write = 0; 3156 u32 block; 3157 u32 block_cnt; 3158 3159 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 3160 switch (cdb[0]) { 3161 case WRITE_6: 3162 case WRITE_12: 3163 is_write = 1; 3164 case READ_6: 3165 case READ_12: 3166 if (*cdb_len == 6) { 3167 block = (((u32) cdb[2]) << 8) | cdb[3]; 3168 block_cnt = cdb[4]; 3169 } else { 3170 BUG_ON(*cdb_len != 12); 3171 block = (((u32) cdb[2]) << 24) | 3172 (((u32) cdb[3]) << 16) | 3173 (((u32) cdb[4]) << 8) | 3174 cdb[5]; 3175 block_cnt = 3176 (((u32) cdb[6]) << 24) | 3177 (((u32) cdb[7]) << 16) | 3178 (((u32) cdb[8]) << 8) | 3179 cdb[9]; 3180 } 3181 if (block_cnt > 0xffff) 3182 return IO_ACCEL_INELIGIBLE; 3183 3184 cdb[0] = is_write ? WRITE_10 : READ_10; 3185 cdb[1] = 0; 3186 cdb[2] = (u8) (block >> 24); 3187 cdb[3] = (u8) (block >> 16); 3188 cdb[4] = (u8) (block >> 8); 3189 cdb[5] = (u8) (block); 3190 cdb[6] = 0; 3191 cdb[7] = (u8) (block_cnt >> 8); 3192 cdb[8] = (u8) (block_cnt); 3193 cdb[9] = 0; 3194 *cdb_len = 10; 3195 break; 3196 } 3197 return 0; 3198 } 3199 3200 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 3201 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3202 u8 *scsi3addr) 3203 { 3204 struct scsi_cmnd *cmd = c->scsi_cmd; 3205 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 3206 unsigned int len; 3207 unsigned int total_len = 0; 3208 struct scatterlist *sg; 3209 u64 addr64; 3210 int use_sg, i; 3211 struct SGDescriptor *curr_sg; 3212 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 3213 3214 /* TODO: implement chaining support */ 3215 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3216 return IO_ACCEL_INELIGIBLE; 3217 3218 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 3219 3220 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3221 return IO_ACCEL_INELIGIBLE; 3222 3223 c->cmd_type = CMD_IOACCEL1; 3224 3225 /* Adjust the DMA address to point to the accelerated command buffer */ 3226 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 3227 (c->cmdindex * sizeof(*cp)); 3228 BUG_ON(c->busaddr & 0x0000007F); 3229 3230 use_sg = scsi_dma_map(cmd); 3231 if (use_sg < 0) 3232 return use_sg; 3233 3234 if (use_sg) { 3235 curr_sg = cp->SG; 3236 scsi_for_each_sg(cmd, sg, use_sg, i) { 3237 addr64 = (u64) sg_dma_address(sg); 3238 len = sg_dma_len(sg); 3239 total_len += len; 3240 curr_sg->Addr = cpu_to_le64(addr64); 3241 curr_sg->Len = cpu_to_le32(len); 3242 curr_sg->Ext = cpu_to_le32(0); 3243 curr_sg++; 3244 } 3245 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3246 3247 switch (cmd->sc_data_direction) { 3248 case DMA_TO_DEVICE: 3249 control |= IOACCEL1_CONTROL_DATA_OUT; 3250 break; 3251 case DMA_FROM_DEVICE: 3252 control |= IOACCEL1_CONTROL_DATA_IN; 3253 break; 3254 case DMA_NONE: 3255 control |= IOACCEL1_CONTROL_NODATAXFER; 3256 break; 3257 default: 3258 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3259 cmd->sc_data_direction); 3260 BUG(); 3261 break; 3262 } 3263 } else { 3264 control |= IOACCEL1_CONTROL_NODATAXFER; 3265 } 3266 3267 c->Header.SGList = use_sg; 3268 /* Fill out the command structure to submit */ 3269 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); 3270 cp->transfer_len = cpu_to_le32(total_len); 3271 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | 3272 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); 3273 cp->control = cpu_to_le32(control); 3274 memcpy(cp->CDB, cdb, cdb_len); 3275 memcpy(cp->CISS_LUN, scsi3addr, 8); 3276 /* Tag was already set at init time. */ 3277 enqueue_cmd_and_start_io(h, c); 3278 return 0; 3279 } 3280 3281 /* 3282 * Queue a command directly to a device behind the controller using the 3283 * I/O accelerator path. 3284 */ 3285 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 3286 struct CommandList *c) 3287 { 3288 struct scsi_cmnd *cmd = c->scsi_cmd; 3289 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3290 3291 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 3292 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 3293 } 3294 3295 /* 3296 * Set encryption parameters for the ioaccel2 request 3297 */ 3298 static void set_encrypt_ioaccel2(struct ctlr_info *h, 3299 struct CommandList *c, struct io_accel2_cmd *cp) 3300 { 3301 struct scsi_cmnd *cmd = c->scsi_cmd; 3302 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3303 struct raid_map_data *map = &dev->raid_map; 3304 u64 first_block; 3305 3306 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3307 3308 /* Are we doing encryption on this device */ 3309 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) 3310 return; 3311 /* Set the data encryption key index. */ 3312 cp->dekindex = map->dekindex; 3313 3314 /* Set the encryption enable flag, encoded into direction field. */ 3315 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 3316 3317 /* Set encryption tweak values based on logical block address 3318 * If block size is 512, tweak value is LBA. 3319 * For other block sizes, tweak is (LBA * block size)/ 512) 3320 */ 3321 switch (cmd->cmnd[0]) { 3322 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 3323 case WRITE_6: 3324 case READ_6: 3325 first_block = get_unaligned_be16(&cmd->cmnd[2]); 3326 break; 3327 case WRITE_10: 3328 case READ_10: 3329 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 3330 case WRITE_12: 3331 case READ_12: 3332 first_block = get_unaligned_be32(&cmd->cmnd[2]); 3333 break; 3334 case WRITE_16: 3335 case READ_16: 3336 first_block = get_unaligned_be64(&cmd->cmnd[2]); 3337 break; 3338 default: 3339 dev_err(&h->pdev->dev, 3340 "ERROR: %s: size (0x%x) not supported for encryption\n", 3341 __func__, cmd->cmnd[0]); 3342 BUG(); 3343 break; 3344 } 3345 3346 if (le32_to_cpu(map->volume_blk_size) != 512) 3347 first_block = first_block * 3348 le32_to_cpu(map->volume_blk_size)/512; 3349 3350 cp->tweak_lower = cpu_to_le32(first_block); 3351 cp->tweak_upper = cpu_to_le32(first_block >> 32); 3352 } 3353 3354 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3355 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3356 u8 *scsi3addr) 3357 { 3358 struct scsi_cmnd *cmd = c->scsi_cmd; 3359 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 3360 struct ioaccel2_sg_element *curr_sg; 3361 int use_sg, i; 3362 struct scatterlist *sg; 3363 u64 addr64; 3364 u32 len; 3365 u32 total_len = 0; 3366 3367 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3368 return IO_ACCEL_INELIGIBLE; 3369 3370 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3371 return IO_ACCEL_INELIGIBLE; 3372 c->cmd_type = CMD_IOACCEL2; 3373 /* Adjust the DMA address to point to the accelerated command buffer */ 3374 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 3375 (c->cmdindex * sizeof(*cp)); 3376 BUG_ON(c->busaddr & 0x0000007F); 3377 3378 memset(cp, 0, sizeof(*cp)); 3379 cp->IU_type = IOACCEL2_IU_TYPE; 3380 3381 use_sg = scsi_dma_map(cmd); 3382 if (use_sg < 0) 3383 return use_sg; 3384 3385 if (use_sg) { 3386 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3387 curr_sg = cp->sg; 3388 scsi_for_each_sg(cmd, sg, use_sg, i) { 3389 addr64 = (u64) sg_dma_address(sg); 3390 len = sg_dma_len(sg); 3391 total_len += len; 3392 curr_sg->address = cpu_to_le64(addr64); 3393 curr_sg->length = cpu_to_le32(len); 3394 curr_sg->reserved[0] = 0; 3395 curr_sg->reserved[1] = 0; 3396 curr_sg->reserved[2] = 0; 3397 curr_sg->chain_indicator = 0; 3398 curr_sg++; 3399 } 3400 3401 switch (cmd->sc_data_direction) { 3402 case DMA_TO_DEVICE: 3403 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3404 cp->direction |= IOACCEL2_DIR_DATA_OUT; 3405 break; 3406 case DMA_FROM_DEVICE: 3407 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3408 cp->direction |= IOACCEL2_DIR_DATA_IN; 3409 break; 3410 case DMA_NONE: 3411 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3412 cp->direction |= IOACCEL2_DIR_NO_DATA; 3413 break; 3414 default: 3415 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3416 cmd->sc_data_direction); 3417 BUG(); 3418 break; 3419 } 3420 } else { 3421 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3422 cp->direction |= IOACCEL2_DIR_NO_DATA; 3423 } 3424 3425 /* Set encryption parameters, if necessary */ 3426 set_encrypt_ioaccel2(h, c, cp); 3427 3428 cp->scsi_nexus = cpu_to_le32(ioaccel_handle); 3429 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); 3430 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3431 3432 /* fill in sg elements */ 3433 cp->sg_count = (u8) use_sg; 3434 3435 cp->data_len = cpu_to_le32(total_len); 3436 cp->err_ptr = cpu_to_le64(c->busaddr + 3437 offsetof(struct io_accel2_cmd, error_data)); 3438 cp->err_len = cpu_to_le32(sizeof(cp->error_data)); 3439 3440 enqueue_cmd_and_start_io(h, c); 3441 return 0; 3442 } 3443 3444 /* 3445 * Queue a command to the correct I/O accelerator path. 3446 */ 3447 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3448 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3449 u8 *scsi3addr) 3450 { 3451 if (h->transMethod & CFGTBL_Trans_io_accel1) 3452 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3453 cdb, cdb_len, scsi3addr); 3454 else 3455 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3456 cdb, cdb_len, scsi3addr); 3457 } 3458 3459 static void raid_map_helper(struct raid_map_data *map, 3460 int offload_to_mirror, u32 *map_index, u32 *current_group) 3461 { 3462 if (offload_to_mirror == 0) { 3463 /* use physical disk in the first mirrored group. */ 3464 *map_index %= le16_to_cpu(map->data_disks_per_row); 3465 return; 3466 } 3467 do { 3468 /* determine mirror group that *map_index indicates */ 3469 *current_group = *map_index / 3470 le16_to_cpu(map->data_disks_per_row); 3471 if (offload_to_mirror == *current_group) 3472 continue; 3473 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { 3474 /* select map index from next group */ 3475 *map_index += le16_to_cpu(map->data_disks_per_row); 3476 (*current_group)++; 3477 } else { 3478 /* select map index from first group */ 3479 *map_index %= le16_to_cpu(map->data_disks_per_row); 3480 *current_group = 0; 3481 } 3482 } while (offload_to_mirror != *current_group); 3483 } 3484 3485 /* 3486 * Attempt to perform offload RAID mapping for a logical volume I/O. 3487 */ 3488 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 3489 struct CommandList *c) 3490 { 3491 struct scsi_cmnd *cmd = c->scsi_cmd; 3492 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3493 struct raid_map_data *map = &dev->raid_map; 3494 struct raid_map_disk_data *dd = &map->data[0]; 3495 int is_write = 0; 3496 u32 map_index; 3497 u64 first_block, last_block; 3498 u32 block_cnt; 3499 u32 blocks_per_row; 3500 u64 first_row, last_row; 3501 u32 first_row_offset, last_row_offset; 3502 u32 first_column, last_column; 3503 u64 r0_first_row, r0_last_row; 3504 u32 r5or6_blocks_per_row; 3505 u64 r5or6_first_row, r5or6_last_row; 3506 u32 r5or6_first_row_offset, r5or6_last_row_offset; 3507 u32 r5or6_first_column, r5or6_last_column; 3508 u32 total_disks_per_row; 3509 u32 stripesize; 3510 u32 first_group, last_group, current_group; 3511 u32 map_row; 3512 u32 disk_handle; 3513 u64 disk_block; 3514 u32 disk_block_cnt; 3515 u8 cdb[16]; 3516 u8 cdb_len; 3517 u16 strip_size; 3518 #if BITS_PER_LONG == 32 3519 u64 tmpdiv; 3520 #endif 3521 int offload_to_mirror; 3522 3523 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3524 3525 /* check for valid opcode, get LBA and block count */ 3526 switch (cmd->cmnd[0]) { 3527 case WRITE_6: 3528 is_write = 1; 3529 case READ_6: 3530 first_block = 3531 (((u64) cmd->cmnd[2]) << 8) | 3532 cmd->cmnd[3]; 3533 block_cnt = cmd->cmnd[4]; 3534 if (block_cnt == 0) 3535 block_cnt = 256; 3536 break; 3537 case WRITE_10: 3538 is_write = 1; 3539 case READ_10: 3540 first_block = 3541 (((u64) cmd->cmnd[2]) << 24) | 3542 (((u64) cmd->cmnd[3]) << 16) | 3543 (((u64) cmd->cmnd[4]) << 8) | 3544 cmd->cmnd[5]; 3545 block_cnt = 3546 (((u32) cmd->cmnd[7]) << 8) | 3547 cmd->cmnd[8]; 3548 break; 3549 case WRITE_12: 3550 is_write = 1; 3551 case READ_12: 3552 first_block = 3553 (((u64) cmd->cmnd[2]) << 24) | 3554 (((u64) cmd->cmnd[3]) << 16) | 3555 (((u64) cmd->cmnd[4]) << 8) | 3556 cmd->cmnd[5]; 3557 block_cnt = 3558 (((u32) cmd->cmnd[6]) << 24) | 3559 (((u32) cmd->cmnd[7]) << 16) | 3560 (((u32) cmd->cmnd[8]) << 8) | 3561 cmd->cmnd[9]; 3562 break; 3563 case WRITE_16: 3564 is_write = 1; 3565 case READ_16: 3566 first_block = 3567 (((u64) cmd->cmnd[2]) << 56) | 3568 (((u64) cmd->cmnd[3]) << 48) | 3569 (((u64) cmd->cmnd[4]) << 40) | 3570 (((u64) cmd->cmnd[5]) << 32) | 3571 (((u64) cmd->cmnd[6]) << 24) | 3572 (((u64) cmd->cmnd[7]) << 16) | 3573 (((u64) cmd->cmnd[8]) << 8) | 3574 cmd->cmnd[9]; 3575 block_cnt = 3576 (((u32) cmd->cmnd[10]) << 24) | 3577 (((u32) cmd->cmnd[11]) << 16) | 3578 (((u32) cmd->cmnd[12]) << 8) | 3579 cmd->cmnd[13]; 3580 break; 3581 default: 3582 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 3583 } 3584 last_block = first_block + block_cnt - 1; 3585 3586 /* check for write to non-RAID-0 */ 3587 if (is_write && dev->raid_level != 0) 3588 return IO_ACCEL_INELIGIBLE; 3589 3590 /* check for invalid block or wraparound */ 3591 if (last_block >= le64_to_cpu(map->volume_blk_cnt) || 3592 last_block < first_block) 3593 return IO_ACCEL_INELIGIBLE; 3594 3595 /* calculate stripe information for the request */ 3596 blocks_per_row = le16_to_cpu(map->data_disks_per_row) * 3597 le16_to_cpu(map->strip_size); 3598 strip_size = le16_to_cpu(map->strip_size); 3599 #if BITS_PER_LONG == 32 3600 tmpdiv = first_block; 3601 (void) do_div(tmpdiv, blocks_per_row); 3602 first_row = tmpdiv; 3603 tmpdiv = last_block; 3604 (void) do_div(tmpdiv, blocks_per_row); 3605 last_row = tmpdiv; 3606 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3607 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3608 tmpdiv = first_row_offset; 3609 (void) do_div(tmpdiv, strip_size); 3610 first_column = tmpdiv; 3611 tmpdiv = last_row_offset; 3612 (void) do_div(tmpdiv, strip_size); 3613 last_column = tmpdiv; 3614 #else 3615 first_row = first_block / blocks_per_row; 3616 last_row = last_block / blocks_per_row; 3617 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3618 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3619 first_column = first_row_offset / strip_size; 3620 last_column = last_row_offset / strip_size; 3621 #endif 3622 3623 /* if this isn't a single row/column then give to the controller */ 3624 if ((first_row != last_row) || (first_column != last_column)) 3625 return IO_ACCEL_INELIGIBLE; 3626 3627 /* proceeding with driver mapping */ 3628 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 3629 le16_to_cpu(map->metadata_disks_per_row); 3630 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3631 le16_to_cpu(map->row_cnt); 3632 map_index = (map_row * total_disks_per_row) + first_column; 3633 3634 switch (dev->raid_level) { 3635 case HPSA_RAID_0: 3636 break; /* nothing special to do */ 3637 case HPSA_RAID_1: 3638 /* Handles load balance across RAID 1 members. 3639 * (2-drive R1 and R10 with even # of drives.) 3640 * Appropriate for SSDs, not optimal for HDDs 3641 */ 3642 BUG_ON(le16_to_cpu(map->layout_map_count) != 2); 3643 if (dev->offload_to_mirror) 3644 map_index += le16_to_cpu(map->data_disks_per_row); 3645 dev->offload_to_mirror = !dev->offload_to_mirror; 3646 break; 3647 case HPSA_RAID_ADM: 3648 /* Handles N-way mirrors (R1-ADM) 3649 * and R10 with # of drives divisible by 3.) 3650 */ 3651 BUG_ON(le16_to_cpu(map->layout_map_count) != 3); 3652 3653 offload_to_mirror = dev->offload_to_mirror; 3654 raid_map_helper(map, offload_to_mirror, 3655 &map_index, ¤t_group); 3656 /* set mirror group to use next time */ 3657 offload_to_mirror = 3658 (offload_to_mirror >= 3659 le16_to_cpu(map->layout_map_count) - 1) 3660 ? 0 : offload_to_mirror + 1; 3661 dev->offload_to_mirror = offload_to_mirror; 3662 /* Avoid direct use of dev->offload_to_mirror within this 3663 * function since multiple threads might simultaneously 3664 * increment it beyond the range of dev->layout_map_count -1. 3665 */ 3666 break; 3667 case HPSA_RAID_5: 3668 case HPSA_RAID_6: 3669 if (le16_to_cpu(map->layout_map_count) <= 1) 3670 break; 3671 3672 /* Verify first and last block are in same RAID group */ 3673 r5or6_blocks_per_row = 3674 le16_to_cpu(map->strip_size) * 3675 le16_to_cpu(map->data_disks_per_row); 3676 BUG_ON(r5or6_blocks_per_row == 0); 3677 stripesize = r5or6_blocks_per_row * 3678 le16_to_cpu(map->layout_map_count); 3679 #if BITS_PER_LONG == 32 3680 tmpdiv = first_block; 3681 first_group = do_div(tmpdiv, stripesize); 3682 tmpdiv = first_group; 3683 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3684 first_group = tmpdiv; 3685 tmpdiv = last_block; 3686 last_group = do_div(tmpdiv, stripesize); 3687 tmpdiv = last_group; 3688 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3689 last_group = tmpdiv; 3690 #else 3691 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 3692 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 3693 #endif 3694 if (first_group != last_group) 3695 return IO_ACCEL_INELIGIBLE; 3696 3697 /* Verify request is in a single row of RAID 5/6 */ 3698 #if BITS_PER_LONG == 32 3699 tmpdiv = first_block; 3700 (void) do_div(tmpdiv, stripesize); 3701 first_row = r5or6_first_row = r0_first_row = tmpdiv; 3702 tmpdiv = last_block; 3703 (void) do_div(tmpdiv, stripesize); 3704 r5or6_last_row = r0_last_row = tmpdiv; 3705 #else 3706 first_row = r5or6_first_row = r0_first_row = 3707 first_block / stripesize; 3708 r5or6_last_row = r0_last_row = last_block / stripesize; 3709 #endif 3710 if (r5or6_first_row != r5or6_last_row) 3711 return IO_ACCEL_INELIGIBLE; 3712 3713 3714 /* Verify request is in a single column */ 3715 #if BITS_PER_LONG == 32 3716 tmpdiv = first_block; 3717 first_row_offset = do_div(tmpdiv, stripesize); 3718 tmpdiv = first_row_offset; 3719 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 3720 r5or6_first_row_offset = first_row_offset; 3721 tmpdiv = last_block; 3722 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 3723 tmpdiv = r5or6_last_row_offset; 3724 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 3725 tmpdiv = r5or6_first_row_offset; 3726 (void) do_div(tmpdiv, map->strip_size); 3727 first_column = r5or6_first_column = tmpdiv; 3728 tmpdiv = r5or6_last_row_offset; 3729 (void) do_div(tmpdiv, map->strip_size); 3730 r5or6_last_column = tmpdiv; 3731 #else 3732 first_row_offset = r5or6_first_row_offset = 3733 (u32)((first_block % stripesize) % 3734 r5or6_blocks_per_row); 3735 3736 r5or6_last_row_offset = 3737 (u32)((last_block % stripesize) % 3738 r5or6_blocks_per_row); 3739 3740 first_column = r5or6_first_column = 3741 r5or6_first_row_offset / le16_to_cpu(map->strip_size); 3742 r5or6_last_column = 3743 r5or6_last_row_offset / le16_to_cpu(map->strip_size); 3744 #endif 3745 if (r5or6_first_column != r5or6_last_column) 3746 return IO_ACCEL_INELIGIBLE; 3747 3748 /* Request is eligible */ 3749 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3750 le16_to_cpu(map->row_cnt); 3751 3752 map_index = (first_group * 3753 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + 3754 (map_row * total_disks_per_row) + first_column; 3755 break; 3756 default: 3757 return IO_ACCEL_INELIGIBLE; 3758 } 3759 3760 disk_handle = dd[map_index].ioaccel_handle; 3761 disk_block = le64_to_cpu(map->disk_starting_blk) + 3762 first_row * le16_to_cpu(map->strip_size) + 3763 (first_row_offset - first_column * 3764 le16_to_cpu(map->strip_size)); 3765 disk_block_cnt = block_cnt; 3766 3767 /* handle differing logical/physical block sizes */ 3768 if (map->phys_blk_shift) { 3769 disk_block <<= map->phys_blk_shift; 3770 disk_block_cnt <<= map->phys_blk_shift; 3771 } 3772 BUG_ON(disk_block_cnt > 0xffff); 3773 3774 /* build the new CDB for the physical disk I/O */ 3775 if (disk_block > 0xffffffff) { 3776 cdb[0] = is_write ? WRITE_16 : READ_16; 3777 cdb[1] = 0; 3778 cdb[2] = (u8) (disk_block >> 56); 3779 cdb[3] = (u8) (disk_block >> 48); 3780 cdb[4] = (u8) (disk_block >> 40); 3781 cdb[5] = (u8) (disk_block >> 32); 3782 cdb[6] = (u8) (disk_block >> 24); 3783 cdb[7] = (u8) (disk_block >> 16); 3784 cdb[8] = (u8) (disk_block >> 8); 3785 cdb[9] = (u8) (disk_block); 3786 cdb[10] = (u8) (disk_block_cnt >> 24); 3787 cdb[11] = (u8) (disk_block_cnt >> 16); 3788 cdb[12] = (u8) (disk_block_cnt >> 8); 3789 cdb[13] = (u8) (disk_block_cnt); 3790 cdb[14] = 0; 3791 cdb[15] = 0; 3792 cdb_len = 16; 3793 } else { 3794 cdb[0] = is_write ? WRITE_10 : READ_10; 3795 cdb[1] = 0; 3796 cdb[2] = (u8) (disk_block >> 24); 3797 cdb[3] = (u8) (disk_block >> 16); 3798 cdb[4] = (u8) (disk_block >> 8); 3799 cdb[5] = (u8) (disk_block); 3800 cdb[6] = 0; 3801 cdb[7] = (u8) (disk_block_cnt >> 8); 3802 cdb[8] = (u8) (disk_block_cnt); 3803 cdb[9] = 0; 3804 cdb_len = 10; 3805 } 3806 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3807 dev->scsi3addr); 3808 } 3809 3810 /* Submit commands down the "normal" RAID stack path */ 3811 static int hpsa_ciss_submit(struct ctlr_info *h, 3812 struct CommandList *c, struct scsi_cmnd *cmd, 3813 unsigned char scsi3addr[]) 3814 { 3815 cmd->host_scribble = (unsigned char *) c; 3816 c->cmd_type = CMD_SCSI; 3817 c->scsi_cmd = cmd; 3818 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3819 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 3820 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); 3821 3822 /* Fill in the request block... */ 3823 3824 c->Request.Timeout = 0; 3825 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 3826 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 3827 c->Request.CDBLen = cmd->cmd_len; 3828 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 3829 switch (cmd->sc_data_direction) { 3830 case DMA_TO_DEVICE: 3831 c->Request.type_attr_dir = 3832 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); 3833 break; 3834 case DMA_FROM_DEVICE: 3835 c->Request.type_attr_dir = 3836 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); 3837 break; 3838 case DMA_NONE: 3839 c->Request.type_attr_dir = 3840 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); 3841 break; 3842 case DMA_BIDIRECTIONAL: 3843 /* This can happen if a buggy application does a scsi passthru 3844 * and sets both inlen and outlen to non-zero. ( see 3845 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 3846 */ 3847 3848 c->Request.type_attr_dir = 3849 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); 3850 /* This is technically wrong, and hpsa controllers should 3851 * reject it with CMD_INVALID, which is the most correct 3852 * response, but non-fibre backends appear to let it 3853 * slide by, and give the same results as if this field 3854 * were set correctly. Either way is acceptable for 3855 * our purposes here. 3856 */ 3857 3858 break; 3859 3860 default: 3861 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3862 cmd->sc_data_direction); 3863 BUG(); 3864 break; 3865 } 3866 3867 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 3868 cmd_free(h, c); 3869 return SCSI_MLQUEUE_HOST_BUSY; 3870 } 3871 enqueue_cmd_and_start_io(h, c); 3872 /* the cmd'll come back via intr handler in complete_scsi_command() */ 3873 return 0; 3874 } 3875 3876 /* Running in struct Scsi_Host->host_lock less mode */ 3877 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 3878 { 3879 struct ctlr_info *h; 3880 struct hpsa_scsi_dev_t *dev; 3881 unsigned char scsi3addr[8]; 3882 struct CommandList *c; 3883 int rc = 0; 3884 3885 /* Get the ptr to our adapter structure out of cmd->host. */ 3886 h = sdev_to_hba(cmd->device); 3887 dev = cmd->device->hostdata; 3888 if (!dev) { 3889 cmd->result = DID_NO_CONNECT << 16; 3890 cmd->scsi_done(cmd); 3891 return 0; 3892 } 3893 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3894 3895 if (unlikely(lockup_detected(h))) { 3896 cmd->result = DID_ERROR << 16; 3897 cmd->scsi_done(cmd); 3898 return 0; 3899 } 3900 c = cmd_alloc(h); 3901 if (c == NULL) { /* trouble... */ 3902 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3903 return SCSI_MLQUEUE_HOST_BUSY; 3904 } 3905 3906 /* Call alternate submit routine for I/O accelerated commands. 3907 * Retries always go down the normal I/O path. 3908 */ 3909 if (likely(cmd->retries == 0 && 3910 cmd->request->cmd_type == REQ_TYPE_FS && 3911 h->acciopath_status)) { 3912 3913 cmd->host_scribble = (unsigned char *) c; 3914 c->cmd_type = CMD_SCSI; 3915 c->scsi_cmd = cmd; 3916 3917 if (dev->offload_enabled) { 3918 rc = hpsa_scsi_ioaccel_raid_map(h, c); 3919 if (rc == 0) 3920 return 0; /* Sent on ioaccel path */ 3921 if (rc < 0) { /* scsi_dma_map failed. */ 3922 cmd_free(h, c); 3923 return SCSI_MLQUEUE_HOST_BUSY; 3924 } 3925 } else if (dev->ioaccel_handle) { 3926 rc = hpsa_scsi_ioaccel_direct_map(h, c); 3927 if (rc == 0) 3928 return 0; /* Sent on direct map path */ 3929 if (rc < 0) { /* scsi_dma_map failed. */ 3930 cmd_free(h, c); 3931 return SCSI_MLQUEUE_HOST_BUSY; 3932 } 3933 } 3934 } 3935 return hpsa_ciss_submit(h, c, cmd, scsi3addr); 3936 } 3937 3938 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 3939 { 3940 unsigned long flags; 3941 3942 /* 3943 * Don't let rescans be initiated on a controller known 3944 * to be locked up. If the controller locks up *during* 3945 * a rescan, that thread is probably hosed, but at least 3946 * we can prevent new rescan threads from piling up on a 3947 * locked up controller. 3948 */ 3949 if (unlikely(lockup_detected(h))) { 3950 spin_lock_irqsave(&h->scan_lock, flags); 3951 h->scan_finished = 1; 3952 wake_up_all(&h->scan_wait_queue); 3953 spin_unlock_irqrestore(&h->scan_lock, flags); 3954 return 1; 3955 } 3956 return 0; 3957 } 3958 3959 static void hpsa_scan_start(struct Scsi_Host *sh) 3960 { 3961 struct ctlr_info *h = shost_to_hba(sh); 3962 unsigned long flags; 3963 3964 if (do_not_scan_if_controller_locked_up(h)) 3965 return; 3966 3967 /* wait until any scan already in progress is finished. */ 3968 while (1) { 3969 spin_lock_irqsave(&h->scan_lock, flags); 3970 if (h->scan_finished) 3971 break; 3972 spin_unlock_irqrestore(&h->scan_lock, flags); 3973 wait_event(h->scan_wait_queue, h->scan_finished); 3974 /* Note: We don't need to worry about a race between this 3975 * thread and driver unload because the midlayer will 3976 * have incremented the reference count, so unload won't 3977 * happen if we're in here. 3978 */ 3979 } 3980 h->scan_finished = 0; /* mark scan as in progress */ 3981 spin_unlock_irqrestore(&h->scan_lock, flags); 3982 3983 if (do_not_scan_if_controller_locked_up(h)) 3984 return; 3985 3986 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 3987 3988 spin_lock_irqsave(&h->scan_lock, flags); 3989 h->scan_finished = 1; /* mark scan as finished. */ 3990 wake_up_all(&h->scan_wait_queue); 3991 spin_unlock_irqrestore(&h->scan_lock, flags); 3992 } 3993 3994 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 3995 { 3996 struct ctlr_info *h = sdev_to_hba(sdev); 3997 3998 if (qdepth < 1) 3999 qdepth = 1; 4000 else 4001 if (qdepth > h->nr_cmds) 4002 qdepth = h->nr_cmds; 4003 scsi_change_queue_depth(sdev, qdepth); 4004 return sdev->queue_depth; 4005 } 4006 4007 static int hpsa_scan_finished(struct Scsi_Host *sh, 4008 unsigned long elapsed_time) 4009 { 4010 struct ctlr_info *h = shost_to_hba(sh); 4011 unsigned long flags; 4012 int finished; 4013 4014 spin_lock_irqsave(&h->scan_lock, flags); 4015 finished = h->scan_finished; 4016 spin_unlock_irqrestore(&h->scan_lock, flags); 4017 return finished; 4018 } 4019 4020 static void hpsa_unregister_scsi(struct ctlr_info *h) 4021 { 4022 /* we are being forcibly unloaded, and may not refuse. */ 4023 scsi_remove_host(h->scsi_host); 4024 scsi_host_put(h->scsi_host); 4025 h->scsi_host = NULL; 4026 } 4027 4028 static int hpsa_register_scsi(struct ctlr_info *h) 4029 { 4030 struct Scsi_Host *sh; 4031 int error; 4032 4033 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 4034 if (sh == NULL) 4035 goto fail; 4036 4037 sh->io_port = 0; 4038 sh->n_io_port = 0; 4039 sh->this_id = -1; 4040 sh->max_channel = 3; 4041 sh->max_cmd_len = MAX_COMMAND_SIZE; 4042 sh->max_lun = HPSA_MAX_LUN; 4043 sh->max_id = HPSA_MAX_LUN; 4044 sh->can_queue = h->nr_cmds - 4045 HPSA_CMDS_RESERVED_FOR_ABORTS - 4046 HPSA_CMDS_RESERVED_FOR_DRIVER - 4047 HPSA_MAX_CONCURRENT_PASSTHRUS; 4048 if (h->hba_mode_enabled) 4049 sh->cmd_per_lun = 7; 4050 else 4051 sh->cmd_per_lun = sh->can_queue; 4052 sh->sg_tablesize = h->maxsgentries; 4053 h->scsi_host = sh; 4054 sh->hostdata[0] = (unsigned long) h; 4055 sh->irq = h->intr[h->intr_mode]; 4056 sh->unique_id = sh->irq; 4057 error = scsi_add_host(sh, &h->pdev->dev); 4058 if (error) 4059 goto fail_host_put; 4060 scsi_scan_host(sh); 4061 return 0; 4062 4063 fail_host_put: 4064 dev_err(&h->pdev->dev, "%s: scsi_add_host" 4065 " failed for controller %d\n", __func__, h->ctlr); 4066 scsi_host_put(sh); 4067 return error; 4068 fail: 4069 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 4070 " failed for controller %d\n", __func__, h->ctlr); 4071 return -ENOMEM; 4072 } 4073 4074 static int wait_for_device_to_become_ready(struct ctlr_info *h, 4075 unsigned char lunaddr[]) 4076 { 4077 int rc; 4078 int count = 0; 4079 int waittime = 1; /* seconds */ 4080 struct CommandList *c; 4081 4082 c = cmd_alloc(h); 4083 if (!c) { 4084 dev_warn(&h->pdev->dev, "out of memory in " 4085 "wait_for_device_to_become_ready.\n"); 4086 return IO_ERROR; 4087 } 4088 4089 /* Send test unit ready until device ready, or give up. */ 4090 while (count < HPSA_TUR_RETRY_LIMIT) { 4091 4092 /* Wait for a bit. do this first, because if we send 4093 * the TUR right away, the reset will just abort it. 4094 */ 4095 msleep(1000 * waittime); 4096 count++; 4097 rc = 0; /* Device ready. */ 4098 4099 /* Increase wait time with each try, up to a point. */ 4100 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 4101 waittime = waittime * 2; 4102 4103 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 4104 (void) fill_cmd(c, TEST_UNIT_READY, h, 4105 NULL, 0, 0, lunaddr, TYPE_CMD); 4106 hpsa_scsi_do_simple_cmd_core(h, c); 4107 /* no unmap needed here because no data xfer. */ 4108 4109 if (c->err_info->CommandStatus == CMD_SUCCESS) 4110 break; 4111 4112 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4113 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 4114 (c->err_info->SenseInfo[2] == NO_SENSE || 4115 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 4116 break; 4117 4118 dev_warn(&h->pdev->dev, "waiting %d secs " 4119 "for device to become ready.\n", waittime); 4120 rc = 1; /* device not ready. */ 4121 } 4122 4123 if (rc) 4124 dev_warn(&h->pdev->dev, "giving up on device.\n"); 4125 else 4126 dev_warn(&h->pdev->dev, "device is ready.\n"); 4127 4128 cmd_free(h, c); 4129 return rc; 4130 } 4131 4132 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 4133 * complaining. Doing a host- or bus-reset can't do anything good here. 4134 */ 4135 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 4136 { 4137 int rc; 4138 struct ctlr_info *h; 4139 struct hpsa_scsi_dev_t *dev; 4140 4141 /* find the controller to which the command to be aborted was sent */ 4142 h = sdev_to_hba(scsicmd->device); 4143 if (h == NULL) /* paranoia */ 4144 return FAILED; 4145 dev = scsicmd->device->hostdata; 4146 if (!dev) { 4147 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 4148 "device lookup failed.\n"); 4149 return FAILED; 4150 } 4151 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 4152 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4153 /* send a reset to the SCSI LUN which the command was sent to */ 4154 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 4155 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 4156 return SUCCESS; 4157 4158 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 4159 return FAILED; 4160 } 4161 4162 static void swizzle_abort_tag(u8 *tag) 4163 { 4164 u8 original_tag[8]; 4165 4166 memcpy(original_tag, tag, 8); 4167 tag[0] = original_tag[3]; 4168 tag[1] = original_tag[2]; 4169 tag[2] = original_tag[1]; 4170 tag[3] = original_tag[0]; 4171 tag[4] = original_tag[7]; 4172 tag[5] = original_tag[6]; 4173 tag[6] = original_tag[5]; 4174 tag[7] = original_tag[4]; 4175 } 4176 4177 static void hpsa_get_tag(struct ctlr_info *h, 4178 struct CommandList *c, __le32 *taglower, __le32 *tagupper) 4179 { 4180 u64 tag; 4181 if (c->cmd_type == CMD_IOACCEL1) { 4182 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4183 &h->ioaccel_cmd_pool[c->cmdindex]; 4184 tag = le64_to_cpu(cm1->tag); 4185 *tagupper = cpu_to_le32(tag >> 32); 4186 *taglower = cpu_to_le32(tag); 4187 return; 4188 } 4189 if (c->cmd_type == CMD_IOACCEL2) { 4190 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 4191 &h->ioaccel2_cmd_pool[c->cmdindex]; 4192 /* upper tag not used in ioaccel2 mode */ 4193 memset(tagupper, 0, sizeof(*tagupper)); 4194 *taglower = cm2->Tag; 4195 return; 4196 } 4197 tag = le64_to_cpu(c->Header.tag); 4198 *tagupper = cpu_to_le32(tag >> 32); 4199 *taglower = cpu_to_le32(tag); 4200 } 4201 4202 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4203 struct CommandList *abort, int swizzle) 4204 { 4205 int rc = IO_OK; 4206 struct CommandList *c; 4207 struct ErrorInfo *ei; 4208 __le32 tagupper, taglower; 4209 4210 c = cmd_alloc(h); 4211 if (c == NULL) { /* trouble... */ 4212 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 4213 return -ENOMEM; 4214 } 4215 4216 /* fill_cmd can't fail here, no buffer to map */ 4217 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 4218 0, 0, scsi3addr, TYPE_MSG); 4219 if (swizzle) 4220 swizzle_abort_tag(&c->Request.CDB[4]); 4221 hpsa_scsi_do_simple_cmd_core(h, c); 4222 hpsa_get_tag(h, abort, &taglower, &tagupper); 4223 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 4224 __func__, tagupper, taglower); 4225 /* no unmap needed here because no data xfer. */ 4226 4227 ei = c->err_info; 4228 switch (ei->CommandStatus) { 4229 case CMD_SUCCESS: 4230 break; 4231 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 4232 rc = -1; 4233 break; 4234 default: 4235 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 4236 __func__, tagupper, taglower); 4237 hpsa_scsi_interpret_error(h, c); 4238 rc = -1; 4239 break; 4240 } 4241 cmd_free(h, c); 4242 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 4243 __func__, tagupper, taglower); 4244 return rc; 4245 } 4246 4247 /* ioaccel2 path firmware cannot handle abort task requests. 4248 * Change abort requests to physical target reset, and send to the 4249 * address of the physical disk used for the ioaccel 2 command. 4250 * Return 0 on success (IO_OK) 4251 * -1 on failure 4252 */ 4253 4254 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 4255 unsigned char *scsi3addr, struct CommandList *abort) 4256 { 4257 int rc = IO_OK; 4258 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 4259 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 4260 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 4261 unsigned char *psa = &phys_scsi3addr[0]; 4262 4263 /* Get a pointer to the hpsa logical device. */ 4264 scmd = (struct scsi_cmnd *) abort->scsi_cmd; 4265 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 4266 if (dev == NULL) { 4267 dev_warn(&h->pdev->dev, 4268 "Cannot abort: no device pointer for command.\n"); 4269 return -1; /* not abortable */ 4270 } 4271 4272 if (h->raid_offload_debug > 0) 4273 dev_info(&h->pdev->dev, 4274 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4275 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 4276 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 4277 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 4278 4279 if (!dev->offload_enabled) { 4280 dev_warn(&h->pdev->dev, 4281 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 4282 return -1; /* not abortable */ 4283 } 4284 4285 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 4286 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 4287 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 4288 return -1; /* not abortable */ 4289 } 4290 4291 /* send the reset */ 4292 if (h->raid_offload_debug > 0) 4293 dev_info(&h->pdev->dev, 4294 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4295 psa[0], psa[1], psa[2], psa[3], 4296 psa[4], psa[5], psa[6], psa[7]); 4297 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 4298 if (rc != 0) { 4299 dev_warn(&h->pdev->dev, 4300 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4301 psa[0], psa[1], psa[2], psa[3], 4302 psa[4], psa[5], psa[6], psa[7]); 4303 return rc; /* failed to reset */ 4304 } 4305 4306 /* wait for device to recover */ 4307 if (wait_for_device_to_become_ready(h, psa) != 0) { 4308 dev_warn(&h->pdev->dev, 4309 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4310 psa[0], psa[1], psa[2], psa[3], 4311 psa[4], psa[5], psa[6], psa[7]); 4312 return -1; /* failed to recover */ 4313 } 4314 4315 /* device recovered */ 4316 dev_info(&h->pdev->dev, 4317 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4318 psa[0], psa[1], psa[2], psa[3], 4319 psa[4], psa[5], psa[6], psa[7]); 4320 4321 return rc; /* success */ 4322 } 4323 4324 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 4325 * tell which kind we're dealing with, so we send the abort both ways. There 4326 * shouldn't be any collisions between swizzled and unswizzled tags due to the 4327 * way we construct our tags but we check anyway in case the assumptions which 4328 * make this true someday become false. 4329 */ 4330 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 4331 unsigned char *scsi3addr, struct CommandList *abort) 4332 { 4333 /* ioccelerator mode 2 commands should be aborted via the 4334 * accelerated path, since RAID path is unaware of these commands, 4335 * but underlying firmware can't handle abort TMF. 4336 * Change abort to physical device reset. 4337 */ 4338 if (abort->cmd_type == CMD_IOACCEL2) 4339 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 4340 4341 return hpsa_send_abort(h, scsi3addr, abort, 0) && 4342 hpsa_send_abort(h, scsi3addr, abort, 1); 4343 } 4344 4345 /* Send an abort for the specified command. 4346 * If the device and controller support it, 4347 * send a task abort request. 4348 */ 4349 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 4350 { 4351 4352 int i, rc; 4353 struct ctlr_info *h; 4354 struct hpsa_scsi_dev_t *dev; 4355 struct CommandList *abort; /* pointer to command to be aborted */ 4356 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4357 char msg[256]; /* For debug messaging. */ 4358 int ml = 0; 4359 __le32 tagupper, taglower; 4360 4361 /* Find the controller of the command to be aborted */ 4362 h = sdev_to_hba(sc->device); 4363 if (WARN(h == NULL, 4364 "ABORT REQUEST FAILED, Controller lookup failed.\n")) 4365 return FAILED; 4366 4367 /* Check that controller supports some kind of task abort */ 4368 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 4369 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 4370 return FAILED; 4371 4372 memset(msg, 0, sizeof(msg)); 4373 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ", 4374 h->scsi_host->host_no, sc->device->channel, 4375 sc->device->id, sc->device->lun); 4376 4377 /* Find the device of the command to be aborted */ 4378 dev = sc->device->hostdata; 4379 if (!dev) { 4380 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 4381 msg); 4382 return FAILED; 4383 } 4384 4385 /* Get SCSI command to be aborted */ 4386 abort = (struct CommandList *) sc->host_scribble; 4387 if (abort == NULL) { 4388 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", 4389 msg); 4390 return FAILED; 4391 } 4392 hpsa_get_tag(h, abort, &taglower, &tagupper); 4393 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 4394 as = (struct scsi_cmnd *) abort->scsi_cmd; 4395 if (as != NULL) 4396 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4397 as->cmnd[0], as->serial_number); 4398 dev_dbg(&h->pdev->dev, "%s\n", msg); 4399 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 4400 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4401 /* 4402 * Command is in flight, or possibly already completed 4403 * by the firmware (but not to the scsi mid layer) but we can't 4404 * distinguish which. Send the abort down. 4405 */ 4406 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 4407 if (rc != 0) { 4408 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 4409 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 4410 h->scsi_host->host_no, 4411 dev->bus, dev->target, dev->lun); 4412 return FAILED; 4413 } 4414 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 4415 4416 /* If the abort(s) above completed and actually aborted the 4417 * command, then the command to be aborted should already be 4418 * completed. If not, wait around a bit more to see if they 4419 * manage to complete normally. 4420 */ 4421 #define ABORT_COMPLETE_WAIT_SECS 30 4422 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 4423 if (test_bit(abort->cmdindex & (BITS_PER_LONG - 1), 4424 h->cmd_pool_bits + 4425 (abort->cmdindex / BITS_PER_LONG))) 4426 msleep(100); 4427 else 4428 return SUCCESS; 4429 } 4430 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 4431 msg, ABORT_COMPLETE_WAIT_SECS); 4432 return FAILED; 4433 } 4434 4435 4436 /* 4437 * For operations that cannot sleep, a command block is allocated at init, 4438 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 4439 * which ones are free or in use. Lock must be held when calling this. 4440 * cmd_free() is the complement. 4441 */ 4442 static struct CommandList *cmd_alloc(struct ctlr_info *h) 4443 { 4444 struct CommandList *c; 4445 int i; 4446 union u64bit temp64; 4447 dma_addr_t cmd_dma_handle, err_dma_handle; 4448 int loopcount; 4449 4450 /* There is some *extremely* small but non-zero chance that that 4451 * multiple threads could get in here, and one thread could 4452 * be scanning through the list of bits looking for a free 4453 * one, but the free ones are always behind him, and other 4454 * threads sneak in behind him and eat them before he can 4455 * get to them, so that while there is always a free one, a 4456 * very unlucky thread might be starved anyway, never able to 4457 * beat the other threads. In reality, this happens so 4458 * infrequently as to be indistinguishable from never. 4459 */ 4460 4461 loopcount = 0; 4462 do { 4463 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4464 if (i == h->nr_cmds) 4465 i = 0; 4466 loopcount++; 4467 } while (test_and_set_bit(i & (BITS_PER_LONG - 1), 4468 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 && 4469 loopcount < 10); 4470 4471 /* Thread got starved? We do not expect this to ever happen. */ 4472 if (loopcount >= 10) 4473 return NULL; 4474 4475 c = h->cmd_pool + i; 4476 memset(c, 0, sizeof(*c)); 4477 c->Header.tag = cpu_to_le64((u64) i << DIRECT_LOOKUP_SHIFT); 4478 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); 4479 c->err_info = h->errinfo_pool + i; 4480 memset(c->err_info, 0, sizeof(*c->err_info)); 4481 err_dma_handle = h->errinfo_pool_dhandle 4482 + i * sizeof(*c->err_info); 4483 4484 c->cmdindex = i; 4485 4486 c->busaddr = (u32) cmd_dma_handle; 4487 temp64.val = (u64) err_dma_handle; 4488 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); 4489 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); 4490 4491 c->h = h; 4492 return c; 4493 } 4494 4495 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4496 { 4497 int i; 4498 4499 i = c - h->cmd_pool; 4500 clear_bit(i & (BITS_PER_LONG - 1), 4501 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4502 } 4503 4504 #ifdef CONFIG_COMPAT 4505 4506 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, 4507 void __user *arg) 4508 { 4509 IOCTL32_Command_struct __user *arg32 = 4510 (IOCTL32_Command_struct __user *) arg; 4511 IOCTL_Command_struct arg64; 4512 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 4513 int err; 4514 u32 cp; 4515 4516 memset(&arg64, 0, sizeof(arg64)); 4517 err = 0; 4518 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4519 sizeof(arg64.LUN_info)); 4520 err |= copy_from_user(&arg64.Request, &arg32->Request, 4521 sizeof(arg64.Request)); 4522 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4523 sizeof(arg64.error_info)); 4524 err |= get_user(arg64.buf_size, &arg32->buf_size); 4525 err |= get_user(cp, &arg32->buf); 4526 arg64.buf = compat_ptr(cp); 4527 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4528 4529 if (err) 4530 return -EFAULT; 4531 4532 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); 4533 if (err) 4534 return err; 4535 err |= copy_in_user(&arg32->error_info, &p->error_info, 4536 sizeof(arg32->error_info)); 4537 if (err) 4538 return -EFAULT; 4539 return err; 4540 } 4541 4542 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4543 int cmd, void __user *arg) 4544 { 4545 BIG_IOCTL32_Command_struct __user *arg32 = 4546 (BIG_IOCTL32_Command_struct __user *) arg; 4547 BIG_IOCTL_Command_struct arg64; 4548 BIG_IOCTL_Command_struct __user *p = 4549 compat_alloc_user_space(sizeof(arg64)); 4550 int err; 4551 u32 cp; 4552 4553 memset(&arg64, 0, sizeof(arg64)); 4554 err = 0; 4555 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4556 sizeof(arg64.LUN_info)); 4557 err |= copy_from_user(&arg64.Request, &arg32->Request, 4558 sizeof(arg64.Request)); 4559 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4560 sizeof(arg64.error_info)); 4561 err |= get_user(arg64.buf_size, &arg32->buf_size); 4562 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 4563 err |= get_user(cp, &arg32->buf); 4564 arg64.buf = compat_ptr(cp); 4565 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4566 4567 if (err) 4568 return -EFAULT; 4569 4570 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); 4571 if (err) 4572 return err; 4573 err |= copy_in_user(&arg32->error_info, &p->error_info, 4574 sizeof(arg32->error_info)); 4575 if (err) 4576 return -EFAULT; 4577 return err; 4578 } 4579 4580 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 4581 { 4582 switch (cmd) { 4583 case CCISS_GETPCIINFO: 4584 case CCISS_GETINTINFO: 4585 case CCISS_SETINTINFO: 4586 case CCISS_GETNODENAME: 4587 case CCISS_SETNODENAME: 4588 case CCISS_GETHEARTBEAT: 4589 case CCISS_GETBUSTYPES: 4590 case CCISS_GETFIRMVER: 4591 case CCISS_GETDRIVVER: 4592 case CCISS_REVALIDVOLS: 4593 case CCISS_DEREGDISK: 4594 case CCISS_REGNEWDISK: 4595 case CCISS_REGNEWD: 4596 case CCISS_RESCANDISK: 4597 case CCISS_GETLUNINFO: 4598 return hpsa_ioctl(dev, cmd, arg); 4599 4600 case CCISS_PASSTHRU32: 4601 return hpsa_ioctl32_passthru(dev, cmd, arg); 4602 case CCISS_BIG_PASSTHRU32: 4603 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 4604 4605 default: 4606 return -ENOIOCTLCMD; 4607 } 4608 } 4609 #endif 4610 4611 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 4612 { 4613 struct hpsa_pci_info pciinfo; 4614 4615 if (!argp) 4616 return -EINVAL; 4617 pciinfo.domain = pci_domain_nr(h->pdev->bus); 4618 pciinfo.bus = h->pdev->bus->number; 4619 pciinfo.dev_fn = h->pdev->devfn; 4620 pciinfo.board_id = h->board_id; 4621 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 4622 return -EFAULT; 4623 return 0; 4624 } 4625 4626 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 4627 { 4628 DriverVer_type DriverVer; 4629 unsigned char vmaj, vmin, vsubmin; 4630 int rc; 4631 4632 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 4633 &vmaj, &vmin, &vsubmin); 4634 if (rc != 3) { 4635 dev_info(&h->pdev->dev, "driver version string '%s' " 4636 "unrecognized.", HPSA_DRIVER_VERSION); 4637 vmaj = 0; 4638 vmin = 0; 4639 vsubmin = 0; 4640 } 4641 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 4642 if (!argp) 4643 return -EINVAL; 4644 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 4645 return -EFAULT; 4646 return 0; 4647 } 4648 4649 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4650 { 4651 IOCTL_Command_struct iocommand; 4652 struct CommandList *c; 4653 char *buff = NULL; 4654 u64 temp64; 4655 int rc = 0; 4656 4657 if (!argp) 4658 return -EINVAL; 4659 if (!capable(CAP_SYS_RAWIO)) 4660 return -EPERM; 4661 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 4662 return -EFAULT; 4663 if ((iocommand.buf_size < 1) && 4664 (iocommand.Request.Type.Direction != XFER_NONE)) { 4665 return -EINVAL; 4666 } 4667 if (iocommand.buf_size > 0) { 4668 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4669 if (buff == NULL) 4670 return -EFAULT; 4671 if (iocommand.Request.Type.Direction & XFER_WRITE) { 4672 /* Copy the data into the buffer we created */ 4673 if (copy_from_user(buff, iocommand.buf, 4674 iocommand.buf_size)) { 4675 rc = -EFAULT; 4676 goto out_kfree; 4677 } 4678 } else { 4679 memset(buff, 0, iocommand.buf_size); 4680 } 4681 } 4682 c = cmd_alloc(h); 4683 if (c == NULL) { 4684 rc = -ENOMEM; 4685 goto out_kfree; 4686 } 4687 /* Fill in the command type */ 4688 c->cmd_type = CMD_IOCTL_PEND; 4689 /* Fill in Command Header */ 4690 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4691 if (iocommand.buf_size > 0) { /* buffer to fill */ 4692 c->Header.SGList = 1; 4693 c->Header.SGTotal = cpu_to_le16(1); 4694 } else { /* no buffers to fill */ 4695 c->Header.SGList = 0; 4696 c->Header.SGTotal = cpu_to_le16(0); 4697 } 4698 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4699 4700 /* Fill in Request block */ 4701 memcpy(&c->Request, &iocommand.Request, 4702 sizeof(c->Request)); 4703 4704 /* Fill in the scatter gather information */ 4705 if (iocommand.buf_size > 0) { 4706 temp64 = pci_map_single(h->pdev, buff, 4707 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4708 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { 4709 c->SG[0].Addr = cpu_to_le64(0); 4710 c->SG[0].Len = cpu_to_le32(0); 4711 rc = -ENOMEM; 4712 goto out; 4713 } 4714 c->SG[0].Addr = cpu_to_le64(temp64); 4715 c->SG[0].Len = cpu_to_le32(iocommand.buf_size); 4716 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 4717 } 4718 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4719 if (iocommand.buf_size > 0) 4720 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 4721 check_ioctl_unit_attention(h, c); 4722 4723 /* Copy the error information out */ 4724 memcpy(&iocommand.error_info, c->err_info, 4725 sizeof(iocommand.error_info)); 4726 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 4727 rc = -EFAULT; 4728 goto out; 4729 } 4730 if ((iocommand.Request.Type.Direction & XFER_READ) && 4731 iocommand.buf_size > 0) { 4732 /* Copy the data out of the buffer we created */ 4733 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 4734 rc = -EFAULT; 4735 goto out; 4736 } 4737 } 4738 out: 4739 cmd_free(h, c); 4740 out_kfree: 4741 kfree(buff); 4742 return rc; 4743 } 4744 4745 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4746 { 4747 BIG_IOCTL_Command_struct *ioc; 4748 struct CommandList *c; 4749 unsigned char **buff = NULL; 4750 int *buff_size = NULL; 4751 u64 temp64; 4752 BYTE sg_used = 0; 4753 int status = 0; 4754 u32 left; 4755 u32 sz; 4756 BYTE __user *data_ptr; 4757 4758 if (!argp) 4759 return -EINVAL; 4760 if (!capable(CAP_SYS_RAWIO)) 4761 return -EPERM; 4762 ioc = (BIG_IOCTL_Command_struct *) 4763 kmalloc(sizeof(*ioc), GFP_KERNEL); 4764 if (!ioc) { 4765 status = -ENOMEM; 4766 goto cleanup1; 4767 } 4768 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 4769 status = -EFAULT; 4770 goto cleanup1; 4771 } 4772 if ((ioc->buf_size < 1) && 4773 (ioc->Request.Type.Direction != XFER_NONE)) { 4774 status = -EINVAL; 4775 goto cleanup1; 4776 } 4777 /* Check kmalloc limits using all SGs */ 4778 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 4779 status = -EINVAL; 4780 goto cleanup1; 4781 } 4782 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 4783 status = -EINVAL; 4784 goto cleanup1; 4785 } 4786 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 4787 if (!buff) { 4788 status = -ENOMEM; 4789 goto cleanup1; 4790 } 4791 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 4792 if (!buff_size) { 4793 status = -ENOMEM; 4794 goto cleanup1; 4795 } 4796 left = ioc->buf_size; 4797 data_ptr = ioc->buf; 4798 while (left) { 4799 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 4800 buff_size[sg_used] = sz; 4801 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 4802 if (buff[sg_used] == NULL) { 4803 status = -ENOMEM; 4804 goto cleanup1; 4805 } 4806 if (ioc->Request.Type.Direction & XFER_WRITE) { 4807 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 4808 status = -EFAULT; 4809 goto cleanup1; 4810 } 4811 } else 4812 memset(buff[sg_used], 0, sz); 4813 left -= sz; 4814 data_ptr += sz; 4815 sg_used++; 4816 } 4817 c = cmd_alloc(h); 4818 if (c == NULL) { 4819 status = -ENOMEM; 4820 goto cleanup1; 4821 } 4822 c->cmd_type = CMD_IOCTL_PEND; 4823 c->Header.ReplyQueue = 0; 4824 c->Header.SGList = (u8) sg_used; 4825 c->Header.SGTotal = cpu_to_le16(sg_used); 4826 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 4827 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 4828 if (ioc->buf_size > 0) { 4829 int i; 4830 for (i = 0; i < sg_used; i++) { 4831 temp64 = pci_map_single(h->pdev, buff[i], 4832 buff_size[i], PCI_DMA_BIDIRECTIONAL); 4833 if (dma_mapping_error(&h->pdev->dev, 4834 (dma_addr_t) temp64)) { 4835 c->SG[i].Addr = cpu_to_le64(0); 4836 c->SG[i].Len = cpu_to_le32(0); 4837 hpsa_pci_unmap(h->pdev, c, i, 4838 PCI_DMA_BIDIRECTIONAL); 4839 status = -ENOMEM; 4840 goto cleanup0; 4841 } 4842 c->SG[i].Addr = cpu_to_le64(temp64); 4843 c->SG[i].Len = cpu_to_le32(buff_size[i]); 4844 c->SG[i].Ext = cpu_to_le32(0); 4845 } 4846 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 4847 } 4848 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4849 if (sg_used) 4850 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 4851 check_ioctl_unit_attention(h, c); 4852 /* Copy the error information out */ 4853 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 4854 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 4855 status = -EFAULT; 4856 goto cleanup0; 4857 } 4858 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 4859 int i; 4860 4861 /* Copy the data out of the buffer we created */ 4862 BYTE __user *ptr = ioc->buf; 4863 for (i = 0; i < sg_used; i++) { 4864 if (copy_to_user(ptr, buff[i], buff_size[i])) { 4865 status = -EFAULT; 4866 goto cleanup0; 4867 } 4868 ptr += buff_size[i]; 4869 } 4870 } 4871 status = 0; 4872 cleanup0: 4873 cmd_free(h, c); 4874 cleanup1: 4875 if (buff) { 4876 int i; 4877 4878 for (i = 0; i < sg_used; i++) 4879 kfree(buff[i]); 4880 kfree(buff); 4881 } 4882 kfree(buff_size); 4883 kfree(ioc); 4884 return status; 4885 } 4886 4887 static void check_ioctl_unit_attention(struct ctlr_info *h, 4888 struct CommandList *c) 4889 { 4890 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4891 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 4892 (void) check_for_unit_attention(h, c); 4893 } 4894 4895 static int increment_passthru_count(struct ctlr_info *h) 4896 { 4897 unsigned long flags; 4898 4899 spin_lock_irqsave(&h->passthru_count_lock, flags); 4900 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { 4901 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4902 return -1; 4903 } 4904 h->passthru_count++; 4905 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4906 return 0; 4907 } 4908 4909 static void decrement_passthru_count(struct ctlr_info *h) 4910 { 4911 unsigned long flags; 4912 4913 spin_lock_irqsave(&h->passthru_count_lock, flags); 4914 if (h->passthru_count <= 0) { 4915 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4916 /* not expecting to get here. */ 4917 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); 4918 return; 4919 } 4920 h->passthru_count--; 4921 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4922 } 4923 4924 /* 4925 * ioctl 4926 */ 4927 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 4928 { 4929 struct ctlr_info *h; 4930 void __user *argp = (void __user *)arg; 4931 int rc; 4932 4933 h = sdev_to_hba(dev); 4934 4935 switch (cmd) { 4936 case CCISS_DEREGDISK: 4937 case CCISS_REGNEWDISK: 4938 case CCISS_REGNEWD: 4939 hpsa_scan_start(h->scsi_host); 4940 return 0; 4941 case CCISS_GETPCIINFO: 4942 return hpsa_getpciinfo_ioctl(h, argp); 4943 case CCISS_GETDRIVVER: 4944 return hpsa_getdrivver_ioctl(h, argp); 4945 case CCISS_PASSTHRU: 4946 if (increment_passthru_count(h)) 4947 return -EAGAIN; 4948 rc = hpsa_passthru_ioctl(h, argp); 4949 decrement_passthru_count(h); 4950 return rc; 4951 case CCISS_BIG_PASSTHRU: 4952 if (increment_passthru_count(h)) 4953 return -EAGAIN; 4954 rc = hpsa_big_passthru_ioctl(h, argp); 4955 decrement_passthru_count(h); 4956 return rc; 4957 default: 4958 return -ENOTTY; 4959 } 4960 } 4961 4962 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 4963 u8 reset_type) 4964 { 4965 struct CommandList *c; 4966 4967 c = cmd_alloc(h); 4968 if (!c) 4969 return -ENOMEM; 4970 /* fill_cmd can't fail here, no data buffer to map */ 4971 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 4972 RAID_CTLR_LUNID, TYPE_MSG); 4973 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 4974 c->waiting = NULL; 4975 enqueue_cmd_and_start_io(h, c); 4976 /* Don't wait for completion, the reset won't complete. Don't free 4977 * the command either. This is the last command we will send before 4978 * re-initializing everything, so it doesn't matter and won't leak. 4979 */ 4980 return 0; 4981 } 4982 4983 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 4984 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 4985 int cmd_type) 4986 { 4987 int pci_dir = XFER_NONE; 4988 struct CommandList *a; /* for commands to be aborted */ 4989 4990 c->cmd_type = CMD_IOCTL_PEND; 4991 c->Header.ReplyQueue = 0; 4992 if (buff != NULL && size > 0) { 4993 c->Header.SGList = 1; 4994 c->Header.SGTotal = cpu_to_le16(1); 4995 } else { 4996 c->Header.SGList = 0; 4997 c->Header.SGTotal = cpu_to_le16(0); 4998 } 4999 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5000 5001 if (cmd_type == TYPE_CMD) { 5002 switch (cmd) { 5003 case HPSA_INQUIRY: 5004 /* are we trying to read a vital product page */ 5005 if (page_code & VPD_PAGE) { 5006 c->Request.CDB[1] = 0x01; 5007 c->Request.CDB[2] = (page_code & 0xff); 5008 } 5009 c->Request.CDBLen = 6; 5010 c->Request.type_attr_dir = 5011 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5012 c->Request.Timeout = 0; 5013 c->Request.CDB[0] = HPSA_INQUIRY; 5014 c->Request.CDB[4] = size & 0xFF; 5015 break; 5016 case HPSA_REPORT_LOG: 5017 case HPSA_REPORT_PHYS: 5018 /* Talking to controller so It's a physical command 5019 mode = 00 target = 0. Nothing to write. 5020 */ 5021 c->Request.CDBLen = 12; 5022 c->Request.type_attr_dir = 5023 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5024 c->Request.Timeout = 0; 5025 c->Request.CDB[0] = cmd; 5026 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5027 c->Request.CDB[7] = (size >> 16) & 0xFF; 5028 c->Request.CDB[8] = (size >> 8) & 0xFF; 5029 c->Request.CDB[9] = size & 0xFF; 5030 break; 5031 case HPSA_CACHE_FLUSH: 5032 c->Request.CDBLen = 12; 5033 c->Request.type_attr_dir = 5034 TYPE_ATTR_DIR(cmd_type, 5035 ATTR_SIMPLE, XFER_WRITE); 5036 c->Request.Timeout = 0; 5037 c->Request.CDB[0] = BMIC_WRITE; 5038 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5039 c->Request.CDB[7] = (size >> 8) & 0xFF; 5040 c->Request.CDB[8] = size & 0xFF; 5041 break; 5042 case TEST_UNIT_READY: 5043 c->Request.CDBLen = 6; 5044 c->Request.type_attr_dir = 5045 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 5046 c->Request.Timeout = 0; 5047 break; 5048 case HPSA_GET_RAID_MAP: 5049 c->Request.CDBLen = 12; 5050 c->Request.type_attr_dir = 5051 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5052 c->Request.Timeout = 0; 5053 c->Request.CDB[0] = HPSA_CISS_READ; 5054 c->Request.CDB[1] = cmd; 5055 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5056 c->Request.CDB[7] = (size >> 16) & 0xFF; 5057 c->Request.CDB[8] = (size >> 8) & 0xFF; 5058 c->Request.CDB[9] = size & 0xFF; 5059 break; 5060 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5061 c->Request.CDBLen = 10; 5062 c->Request.type_attr_dir = 5063 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5064 c->Request.Timeout = 0; 5065 c->Request.CDB[0] = BMIC_READ; 5066 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5067 c->Request.CDB[7] = (size >> 16) & 0xFF; 5068 c->Request.CDB[8] = (size >> 8) & 0xFF; 5069 break; 5070 default: 5071 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5072 BUG(); 5073 return -1; 5074 } 5075 } else if (cmd_type == TYPE_MSG) { 5076 switch (cmd) { 5077 5078 case HPSA_DEVICE_RESET_MSG: 5079 c->Request.CDBLen = 16; 5080 c->Request.type_attr_dir = 5081 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 5082 c->Request.Timeout = 0; /* Don't time out */ 5083 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5084 c->Request.CDB[0] = cmd; 5085 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 5086 /* If bytes 4-7 are zero, it means reset the */ 5087 /* LunID device */ 5088 c->Request.CDB[4] = 0x00; 5089 c->Request.CDB[5] = 0x00; 5090 c->Request.CDB[6] = 0x00; 5091 c->Request.CDB[7] = 0x00; 5092 break; 5093 case HPSA_ABORT_MSG: 5094 a = buff; /* point to command to be aborted */ 5095 dev_dbg(&h->pdev->dev, 5096 "Abort Tag:0x%016llx request Tag:0x%016llx", 5097 a->Header.tag, c->Header.tag); 5098 c->Request.CDBLen = 16; 5099 c->Request.type_attr_dir = 5100 TYPE_ATTR_DIR(cmd_type, 5101 ATTR_SIMPLE, XFER_WRITE); 5102 c->Request.Timeout = 0; /* Don't time out */ 5103 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5104 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5105 c->Request.CDB[2] = 0x00; /* reserved */ 5106 c->Request.CDB[3] = 0x00; /* reserved */ 5107 /* Tag to abort goes in CDB[4]-CDB[11] */ 5108 memcpy(&c->Request.CDB[4], &a->Header.tag, 5109 sizeof(a->Header.tag)); 5110 c->Request.CDB[12] = 0x00; /* reserved */ 5111 c->Request.CDB[13] = 0x00; /* reserved */ 5112 c->Request.CDB[14] = 0x00; /* reserved */ 5113 c->Request.CDB[15] = 0x00; /* reserved */ 5114 break; 5115 default: 5116 dev_warn(&h->pdev->dev, "unknown message type %d\n", 5117 cmd); 5118 BUG(); 5119 } 5120 } else { 5121 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 5122 BUG(); 5123 } 5124 5125 switch (GET_DIR(c->Request.type_attr_dir)) { 5126 case XFER_READ: 5127 pci_dir = PCI_DMA_FROMDEVICE; 5128 break; 5129 case XFER_WRITE: 5130 pci_dir = PCI_DMA_TODEVICE; 5131 break; 5132 case XFER_NONE: 5133 pci_dir = PCI_DMA_NONE; 5134 break; 5135 default: 5136 pci_dir = PCI_DMA_BIDIRECTIONAL; 5137 } 5138 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 5139 return -1; 5140 return 0; 5141 } 5142 5143 /* 5144 * Map (physical) PCI mem into (virtual) kernel space 5145 */ 5146 static void __iomem *remap_pci_mem(ulong base, ulong size) 5147 { 5148 ulong page_base = ((ulong) base) & PAGE_MASK; 5149 ulong page_offs = ((ulong) base) - page_base; 5150 void __iomem *page_remapped = ioremap_nocache(page_base, 5151 page_offs + size); 5152 5153 return page_remapped ? (page_remapped + page_offs) : NULL; 5154 } 5155 5156 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 5157 { 5158 return h->access.command_completed(h, q); 5159 } 5160 5161 static inline bool interrupt_pending(struct ctlr_info *h) 5162 { 5163 return h->access.intr_pending(h); 5164 } 5165 5166 static inline long interrupt_not_for_us(struct ctlr_info *h) 5167 { 5168 return (h->access.intr_pending(h) == 0) || 5169 (h->interrupts_enabled == 0); 5170 } 5171 5172 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 5173 u32 raw_tag) 5174 { 5175 if (unlikely(tag_index >= h->nr_cmds)) { 5176 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 5177 return 1; 5178 } 5179 return 0; 5180 } 5181 5182 static inline void finish_cmd(struct CommandList *c) 5183 { 5184 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5185 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5186 || c->cmd_type == CMD_IOACCEL2)) 5187 complete_scsi_command(c); 5188 else if (c->cmd_type == CMD_IOCTL_PEND) 5189 complete(c->waiting); 5190 } 5191 5192 5193 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 5194 { 5195 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 5196 #define HPSA_SIMPLE_ERROR_BITS 0x03 5197 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 5198 return tag & ~HPSA_SIMPLE_ERROR_BITS; 5199 return tag & ~HPSA_PERF_ERROR_BITS; 5200 } 5201 5202 /* process completion of an indexed ("direct lookup") command */ 5203 static inline void process_indexed_cmd(struct ctlr_info *h, 5204 u32 raw_tag) 5205 { 5206 u32 tag_index; 5207 struct CommandList *c; 5208 5209 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; 5210 if (!bad_tag(h, tag_index, raw_tag)) { 5211 c = h->cmd_pool + tag_index; 5212 finish_cmd(c); 5213 } 5214 } 5215 5216 /* Some controllers, like p400, will give us one interrupt 5217 * after a soft reset, even if we turned interrupts off. 5218 * Only need to check for this in the hpsa_xxx_discard_completions 5219 * functions. 5220 */ 5221 static int ignore_bogus_interrupt(struct ctlr_info *h) 5222 { 5223 if (likely(!reset_devices)) 5224 return 0; 5225 5226 if (likely(h->interrupts_enabled)) 5227 return 0; 5228 5229 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 5230 "(known firmware bug.) Ignoring.\n"); 5231 5232 return 1; 5233 } 5234 5235 /* 5236 * Convert &h->q[x] (passed to interrupt handlers) back to h. 5237 * Relies on (h-q[x] == x) being true for x such that 5238 * 0 <= x < MAX_REPLY_QUEUES. 5239 */ 5240 static struct ctlr_info *queue_to_hba(u8 *queue) 5241 { 5242 return container_of((queue - *queue), struct ctlr_info, q[0]); 5243 } 5244 5245 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 5246 { 5247 struct ctlr_info *h = queue_to_hba(queue); 5248 u8 q = *(u8 *) queue; 5249 u32 raw_tag; 5250 5251 if (ignore_bogus_interrupt(h)) 5252 return IRQ_NONE; 5253 5254 if (interrupt_not_for_us(h)) 5255 return IRQ_NONE; 5256 h->last_intr_timestamp = get_jiffies_64(); 5257 while (interrupt_pending(h)) { 5258 raw_tag = get_next_completion(h, q); 5259 while (raw_tag != FIFO_EMPTY) 5260 raw_tag = next_command(h, q); 5261 } 5262 return IRQ_HANDLED; 5263 } 5264 5265 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 5266 { 5267 struct ctlr_info *h = queue_to_hba(queue); 5268 u32 raw_tag; 5269 u8 q = *(u8 *) queue; 5270 5271 if (ignore_bogus_interrupt(h)) 5272 return IRQ_NONE; 5273 5274 h->last_intr_timestamp = get_jiffies_64(); 5275 raw_tag = get_next_completion(h, q); 5276 while (raw_tag != FIFO_EMPTY) 5277 raw_tag = next_command(h, q); 5278 return IRQ_HANDLED; 5279 } 5280 5281 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 5282 { 5283 struct ctlr_info *h = queue_to_hba((u8 *) queue); 5284 u32 raw_tag; 5285 u8 q = *(u8 *) queue; 5286 5287 if (interrupt_not_for_us(h)) 5288 return IRQ_NONE; 5289 h->last_intr_timestamp = get_jiffies_64(); 5290 while (interrupt_pending(h)) { 5291 raw_tag = get_next_completion(h, q); 5292 while (raw_tag != FIFO_EMPTY) { 5293 process_indexed_cmd(h, raw_tag); 5294 raw_tag = next_command(h, q); 5295 } 5296 } 5297 return IRQ_HANDLED; 5298 } 5299 5300 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 5301 { 5302 struct ctlr_info *h = queue_to_hba(queue); 5303 u32 raw_tag; 5304 u8 q = *(u8 *) queue; 5305 5306 h->last_intr_timestamp = get_jiffies_64(); 5307 raw_tag = get_next_completion(h, q); 5308 while (raw_tag != FIFO_EMPTY) { 5309 process_indexed_cmd(h, raw_tag); 5310 raw_tag = next_command(h, q); 5311 } 5312 return IRQ_HANDLED; 5313 } 5314 5315 /* Send a message CDB to the firmware. Careful, this only works 5316 * in simple mode, not performant mode due to the tag lookup. 5317 * We only ever use this immediately after a controller reset. 5318 */ 5319 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 5320 unsigned char type) 5321 { 5322 struct Command { 5323 struct CommandListHeader CommandHeader; 5324 struct RequestBlock Request; 5325 struct ErrDescriptor ErrorDescriptor; 5326 }; 5327 struct Command *cmd; 5328 static const size_t cmd_sz = sizeof(*cmd) + 5329 sizeof(cmd->ErrorDescriptor); 5330 dma_addr_t paddr64; 5331 __le32 paddr32; 5332 u32 tag; 5333 void __iomem *vaddr; 5334 int i, err; 5335 5336 vaddr = pci_ioremap_bar(pdev, 0); 5337 if (vaddr == NULL) 5338 return -ENOMEM; 5339 5340 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 5341 * CCISS commands, so they must be allocated from the lower 4GiB of 5342 * memory. 5343 */ 5344 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5345 if (err) { 5346 iounmap(vaddr); 5347 return err; 5348 } 5349 5350 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 5351 if (cmd == NULL) { 5352 iounmap(vaddr); 5353 return -ENOMEM; 5354 } 5355 5356 /* This must fit, because of the 32-bit consistent DMA mask. Also, 5357 * although there's no guarantee, we assume that the address is at 5358 * least 4-byte aligned (most likely, it's page-aligned). 5359 */ 5360 paddr32 = cpu_to_le32(paddr64); 5361 5362 cmd->CommandHeader.ReplyQueue = 0; 5363 cmd->CommandHeader.SGList = 0; 5364 cmd->CommandHeader.SGTotal = cpu_to_le16(0); 5365 cmd->CommandHeader.tag = cpu_to_le64(paddr64); 5366 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5367 5368 cmd->Request.CDBLen = 16; 5369 cmd->Request.type_attr_dir = 5370 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); 5371 cmd->Request.Timeout = 0; /* Don't time out */ 5372 cmd->Request.CDB[0] = opcode; 5373 cmd->Request.CDB[1] = type; 5374 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5375 cmd->ErrorDescriptor.Addr = 5376 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); 5377 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); 5378 5379 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); 5380 5381 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5382 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5383 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) 5384 break; 5385 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5386 } 5387 5388 iounmap(vaddr); 5389 5390 /* we leak the DMA buffer here ... no choice since the controller could 5391 * still complete the command. 5392 */ 5393 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 5394 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 5395 opcode, type); 5396 return -ETIMEDOUT; 5397 } 5398 5399 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 5400 5401 if (tag & HPSA_ERROR_BIT) { 5402 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 5403 opcode, type); 5404 return -EIO; 5405 } 5406 5407 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 5408 opcode, type); 5409 return 0; 5410 } 5411 5412 #define hpsa_noop(p) hpsa_message(p, 3, 0) 5413 5414 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5415 void __iomem *vaddr, u32 use_doorbell) 5416 { 5417 5418 if (use_doorbell) { 5419 /* For everything after the P600, the PCI power state method 5420 * of resetting the controller doesn't work, so we have this 5421 * other way using the doorbell register. 5422 */ 5423 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5424 writel(use_doorbell, vaddr + SA5_DOORBELL); 5425 5426 /* PMC hardware guys tell us we need a 10 second delay after 5427 * doorbell reset and before any attempt to talk to the board 5428 * at all to ensure that this actually works and doesn't fall 5429 * over in some weird corner cases. 5430 */ 5431 msleep(10000); 5432 } else { /* Try to do it the PCI power state way */ 5433 5434 /* Quoting from the Open CISS Specification: "The Power 5435 * Management Control/Status Register (CSR) controls the power 5436 * state of the device. The normal operating state is D0, 5437 * CSR=00h. The software off state is D3, CSR=03h. To reset 5438 * the controller, place the interface device in D3 then to D0, 5439 * this causes a secondary PCI reset which will reset the 5440 * controller." */ 5441 5442 int rc = 0; 5443 5444 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5445 5446 /* enter the D3hot power management state */ 5447 rc = pci_set_power_state(pdev, PCI_D3hot); 5448 if (rc) 5449 return rc; 5450 5451 msleep(500); 5452 5453 /* enter the D0 power management state */ 5454 rc = pci_set_power_state(pdev, PCI_D0); 5455 if (rc) 5456 return rc; 5457 5458 /* 5459 * The P600 requires a small delay when changing states. 5460 * Otherwise we may think the board did not reset and we bail. 5461 * This for kdump only and is particular to the P600. 5462 */ 5463 msleep(500); 5464 } 5465 return 0; 5466 } 5467 5468 static void init_driver_version(char *driver_version, int len) 5469 { 5470 memset(driver_version, 0, len); 5471 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 5472 } 5473 5474 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 5475 { 5476 char *driver_version; 5477 int i, size = sizeof(cfgtable->driver_version); 5478 5479 driver_version = kmalloc(size, GFP_KERNEL); 5480 if (!driver_version) 5481 return -ENOMEM; 5482 5483 init_driver_version(driver_version, size); 5484 for (i = 0; i < size; i++) 5485 writeb(driver_version[i], &cfgtable->driver_version[i]); 5486 kfree(driver_version); 5487 return 0; 5488 } 5489 5490 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 5491 unsigned char *driver_ver) 5492 { 5493 int i; 5494 5495 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 5496 driver_ver[i] = readb(&cfgtable->driver_version[i]); 5497 } 5498 5499 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 5500 { 5501 5502 char *driver_ver, *old_driver_ver; 5503 int rc, size = sizeof(cfgtable->driver_version); 5504 5505 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 5506 if (!old_driver_ver) 5507 return -ENOMEM; 5508 driver_ver = old_driver_ver + size; 5509 5510 /* After a reset, the 32 bytes of "driver version" in the cfgtable 5511 * should have been changed, otherwise we know the reset failed. 5512 */ 5513 init_driver_version(old_driver_ver, size); 5514 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 5515 rc = !memcmp(driver_ver, old_driver_ver, size); 5516 kfree(old_driver_ver); 5517 return rc; 5518 } 5519 /* This does a hard reset of the controller using PCI power management 5520 * states or the using the doorbell register. 5521 */ 5522 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 5523 { 5524 u64 cfg_offset; 5525 u32 cfg_base_addr; 5526 u64 cfg_base_addr_index; 5527 void __iomem *vaddr; 5528 unsigned long paddr; 5529 u32 misc_fw_support; 5530 int rc; 5531 struct CfgTable __iomem *cfgtable; 5532 u32 use_doorbell; 5533 u32 board_id; 5534 u16 command_register; 5535 5536 /* For controllers as old as the P600, this is very nearly 5537 * the same thing as 5538 * 5539 * pci_save_state(pci_dev); 5540 * pci_set_power_state(pci_dev, PCI_D3hot); 5541 * pci_set_power_state(pci_dev, PCI_D0); 5542 * pci_restore_state(pci_dev); 5543 * 5544 * For controllers newer than the P600, the pci power state 5545 * method of resetting doesn't work so we have another way 5546 * using the doorbell register. 5547 */ 5548 5549 rc = hpsa_lookup_board_id(pdev, &board_id); 5550 if (rc < 0) { 5551 dev_warn(&pdev->dev, "Board ID not found\n"); 5552 return rc; 5553 } 5554 if (!ctlr_is_resettable(board_id)) { 5555 dev_warn(&pdev->dev, "Controller not resettable\n"); 5556 return -ENODEV; 5557 } 5558 5559 /* if controller is soft- but not hard resettable... */ 5560 if (!ctlr_is_hard_resettable(board_id)) 5561 return -ENOTSUPP; /* try soft reset later. */ 5562 5563 /* Save the PCI command register */ 5564 pci_read_config_word(pdev, 4, &command_register); 5565 pci_save_state(pdev); 5566 5567 /* find the first memory BAR, so we can find the cfg table */ 5568 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 5569 if (rc) 5570 return rc; 5571 vaddr = remap_pci_mem(paddr, 0x250); 5572 if (!vaddr) 5573 return -ENOMEM; 5574 5575 /* find cfgtable in order to check if reset via doorbell is supported */ 5576 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 5577 &cfg_base_addr_index, &cfg_offset); 5578 if (rc) 5579 goto unmap_vaddr; 5580 cfgtable = remap_pci_mem(pci_resource_start(pdev, 5581 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 5582 if (!cfgtable) { 5583 rc = -ENOMEM; 5584 goto unmap_vaddr; 5585 } 5586 rc = write_driver_ver_to_cfgtable(cfgtable); 5587 if (rc) 5588 goto unmap_cfgtable; 5589 5590 /* If reset via doorbell register is supported, use that. 5591 * There are two such methods. Favor the newest method. 5592 */ 5593 misc_fw_support = readl(&cfgtable->misc_fw_support); 5594 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 5595 if (use_doorbell) { 5596 use_doorbell = DOORBELL_CTLR_RESET2; 5597 } else { 5598 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 5599 if (use_doorbell) { 5600 dev_warn(&pdev->dev, 5601 "Soft reset not supported. Firmware update is required.\n"); 5602 rc = -ENOTSUPP; /* try soft reset */ 5603 goto unmap_cfgtable; 5604 } 5605 } 5606 5607 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 5608 if (rc) 5609 goto unmap_cfgtable; 5610 5611 pci_restore_state(pdev); 5612 pci_write_config_word(pdev, 4, command_register); 5613 5614 /* Some devices (notably the HP Smart Array 5i Controller) 5615 need a little pause here */ 5616 msleep(HPSA_POST_RESET_PAUSE_MSECS); 5617 5618 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 5619 if (rc) { 5620 dev_warn(&pdev->dev, 5621 "Failed waiting for board to become ready after hard reset\n"); 5622 goto unmap_cfgtable; 5623 } 5624 5625 rc = controller_reset_failed(vaddr); 5626 if (rc < 0) 5627 goto unmap_cfgtable; 5628 if (rc) { 5629 dev_warn(&pdev->dev, "Unable to successfully reset " 5630 "controller. Will try soft reset.\n"); 5631 rc = -ENOTSUPP; 5632 } else { 5633 dev_info(&pdev->dev, "board ready after hard reset.\n"); 5634 } 5635 5636 unmap_cfgtable: 5637 iounmap(cfgtable); 5638 5639 unmap_vaddr: 5640 iounmap(vaddr); 5641 return rc; 5642 } 5643 5644 /* 5645 * We cannot read the structure directly, for portability we must use 5646 * the io functions. 5647 * This is for debug only. 5648 */ 5649 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) 5650 { 5651 #ifdef HPSA_DEBUG 5652 int i; 5653 char temp_name[17]; 5654 5655 dev_info(dev, "Controller Configuration information\n"); 5656 dev_info(dev, "------------------------------------\n"); 5657 for (i = 0; i < 4; i++) 5658 temp_name[i] = readb(&(tb->Signature[i])); 5659 temp_name[4] = '\0'; 5660 dev_info(dev, " Signature = %s\n", temp_name); 5661 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 5662 dev_info(dev, " Transport methods supported = 0x%x\n", 5663 readl(&(tb->TransportSupport))); 5664 dev_info(dev, " Transport methods active = 0x%x\n", 5665 readl(&(tb->TransportActive))); 5666 dev_info(dev, " Requested transport Method = 0x%x\n", 5667 readl(&(tb->HostWrite.TransportRequest))); 5668 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 5669 readl(&(tb->HostWrite.CoalIntDelay))); 5670 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 5671 readl(&(tb->HostWrite.CoalIntCount))); 5672 dev_info(dev, " Max outstanding commands = %d\n", 5673 readl(&(tb->CmdsOutMax))); 5674 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 5675 for (i = 0; i < 16; i++) 5676 temp_name[i] = readb(&(tb->ServerName[i])); 5677 temp_name[16] = '\0'; 5678 dev_info(dev, " Server Name = %s\n", temp_name); 5679 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 5680 readl(&(tb->HeartBeat))); 5681 #endif /* HPSA_DEBUG */ 5682 } 5683 5684 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 5685 { 5686 int i, offset, mem_type, bar_type; 5687 5688 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 5689 return 0; 5690 offset = 0; 5691 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5692 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 5693 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 5694 offset += 4; 5695 else { 5696 mem_type = pci_resource_flags(pdev, i) & 5697 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 5698 switch (mem_type) { 5699 case PCI_BASE_ADDRESS_MEM_TYPE_32: 5700 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 5701 offset += 4; /* 32 bit */ 5702 break; 5703 case PCI_BASE_ADDRESS_MEM_TYPE_64: 5704 offset += 8; 5705 break; 5706 default: /* reserved in PCI 2.2 */ 5707 dev_warn(&pdev->dev, 5708 "base address is invalid\n"); 5709 return -1; 5710 break; 5711 } 5712 } 5713 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 5714 return i + 1; 5715 } 5716 return -1; 5717 } 5718 5719 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 5720 * controllers that are capable. If not, we use legacy INTx mode. 5721 */ 5722 5723 static void hpsa_interrupt_mode(struct ctlr_info *h) 5724 { 5725 #ifdef CONFIG_PCI_MSI 5726 int err, i; 5727 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 5728 5729 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 5730 hpsa_msix_entries[i].vector = 0; 5731 hpsa_msix_entries[i].entry = i; 5732 } 5733 5734 /* Some boards advertise MSI but don't really support it */ 5735 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 5736 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 5737 goto default_int_mode; 5738 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 5739 dev_info(&h->pdev->dev, "MSI-X capable controller\n"); 5740 h->msix_vector = MAX_REPLY_QUEUES; 5741 if (h->msix_vector > num_online_cpus()) 5742 h->msix_vector = num_online_cpus(); 5743 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, 5744 1, h->msix_vector); 5745 if (err < 0) { 5746 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); 5747 h->msix_vector = 0; 5748 goto single_msi_mode; 5749 } else if (err < h->msix_vector) { 5750 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 5751 "available\n", err); 5752 } 5753 h->msix_vector = err; 5754 for (i = 0; i < h->msix_vector; i++) 5755 h->intr[i] = hpsa_msix_entries[i].vector; 5756 return; 5757 } 5758 single_msi_mode: 5759 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 5760 dev_info(&h->pdev->dev, "MSI capable controller\n"); 5761 if (!pci_enable_msi(h->pdev)) 5762 h->msi_vector = 1; 5763 else 5764 dev_warn(&h->pdev->dev, "MSI init failed\n"); 5765 } 5766 default_int_mode: 5767 #endif /* CONFIG_PCI_MSI */ 5768 /* if we get here we're going to use the default interrupt mode */ 5769 h->intr[h->intr_mode] = h->pdev->irq; 5770 } 5771 5772 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 5773 { 5774 int i; 5775 u32 subsystem_vendor_id, subsystem_device_id; 5776 5777 subsystem_vendor_id = pdev->subsystem_vendor; 5778 subsystem_device_id = pdev->subsystem_device; 5779 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 5780 subsystem_vendor_id; 5781 5782 for (i = 0; i < ARRAY_SIZE(products); i++) 5783 if (*board_id == products[i].board_id) 5784 return i; 5785 5786 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 5787 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 5788 !hpsa_allow_any) { 5789 dev_warn(&pdev->dev, "unrecognized board ID: " 5790 "0x%08x, ignoring.\n", *board_id); 5791 return -ENODEV; 5792 } 5793 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 5794 } 5795 5796 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 5797 unsigned long *memory_bar) 5798 { 5799 int i; 5800 5801 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 5802 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 5803 /* addressing mode bits already removed */ 5804 *memory_bar = pci_resource_start(pdev, i); 5805 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 5806 *memory_bar); 5807 return 0; 5808 } 5809 dev_warn(&pdev->dev, "no memory BAR found\n"); 5810 return -ENODEV; 5811 } 5812 5813 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 5814 int wait_for_ready) 5815 { 5816 int i, iterations; 5817 u32 scratchpad; 5818 if (wait_for_ready) 5819 iterations = HPSA_BOARD_READY_ITERATIONS; 5820 else 5821 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 5822 5823 for (i = 0; i < iterations; i++) { 5824 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 5825 if (wait_for_ready) { 5826 if (scratchpad == HPSA_FIRMWARE_READY) 5827 return 0; 5828 } else { 5829 if (scratchpad != HPSA_FIRMWARE_READY) 5830 return 0; 5831 } 5832 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 5833 } 5834 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 5835 return -ENODEV; 5836 } 5837 5838 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 5839 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 5840 u64 *cfg_offset) 5841 { 5842 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 5843 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 5844 *cfg_base_addr &= (u32) 0x0000ffff; 5845 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 5846 if (*cfg_base_addr_index == -1) { 5847 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 5848 return -ENODEV; 5849 } 5850 return 0; 5851 } 5852 5853 static int hpsa_find_cfgtables(struct ctlr_info *h) 5854 { 5855 u64 cfg_offset; 5856 u32 cfg_base_addr; 5857 u64 cfg_base_addr_index; 5858 u32 trans_offset; 5859 int rc; 5860 5861 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 5862 &cfg_base_addr_index, &cfg_offset); 5863 if (rc) 5864 return rc; 5865 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 5866 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 5867 if (!h->cfgtable) { 5868 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); 5869 return -ENOMEM; 5870 } 5871 rc = write_driver_ver_to_cfgtable(h->cfgtable); 5872 if (rc) 5873 return rc; 5874 /* Find performant mode table. */ 5875 trans_offset = readl(&h->cfgtable->TransMethodOffset); 5876 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 5877 cfg_base_addr_index)+cfg_offset+trans_offset, 5878 sizeof(*h->transtable)); 5879 if (!h->transtable) 5880 return -ENOMEM; 5881 return 0; 5882 } 5883 5884 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 5885 { 5886 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 5887 5888 /* Limit commands in memory limited kdump scenario. */ 5889 if (reset_devices && h->max_commands > 32) 5890 h->max_commands = 32; 5891 5892 if (h->max_commands < 16) { 5893 dev_warn(&h->pdev->dev, "Controller reports " 5894 "max supported commands of %d, an obvious lie. " 5895 "Using 16. Ensure that firmware is up to date.\n", 5896 h->max_commands); 5897 h->max_commands = 16; 5898 } 5899 } 5900 5901 /* If the controller reports that the total max sg entries is greater than 512, 5902 * then we know that chained SG blocks work. (Original smart arrays did not 5903 * support chained SG blocks and would return zero for max sg entries.) 5904 */ 5905 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) 5906 { 5907 return h->maxsgentries > 512; 5908 } 5909 5910 /* Interrogate the hardware for some limits: 5911 * max commands, max SG elements without chaining, and with chaining, 5912 * SG chain block size, etc. 5913 */ 5914 static void hpsa_find_board_params(struct ctlr_info *h) 5915 { 5916 hpsa_get_max_perf_mode_cmds(h); 5917 h->nr_cmds = h->max_commands; 5918 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 5919 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 5920 if (hpsa_supports_chained_sg_blocks(h)) { 5921 /* Limit in-command s/g elements to 32 save dma'able memory. */ 5922 h->max_cmd_sg_entries = 32; 5923 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; 5924 h->maxsgentries--; /* save one for chain pointer */ 5925 } else { 5926 /* 5927 * Original smart arrays supported at most 31 s/g entries 5928 * embedded inline in the command (trying to use more 5929 * would lock up the controller) 5930 */ 5931 h->max_cmd_sg_entries = 31; 5932 h->maxsgentries = 31; /* default to traditional values */ 5933 h->chainsize = 0; 5934 } 5935 5936 /* Find out what task management functions are supported and cache */ 5937 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 5938 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 5939 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 5940 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 5941 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 5942 } 5943 5944 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 5945 { 5946 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 5947 dev_err(&h->pdev->dev, "not a valid CISS config table\n"); 5948 return false; 5949 } 5950 return true; 5951 } 5952 5953 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 5954 { 5955 u32 driver_support; 5956 5957 driver_support = readl(&(h->cfgtable->driver_support)); 5958 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 5959 #ifdef CONFIG_X86 5960 driver_support |= ENABLE_SCSI_PREFETCH; 5961 #endif 5962 driver_support |= ENABLE_UNIT_ATTN; 5963 writel(driver_support, &(h->cfgtable->driver_support)); 5964 } 5965 5966 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 5967 * in a prefetch beyond physical memory. 5968 */ 5969 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 5970 { 5971 u32 dma_prefetch; 5972 5973 if (h->board_id != 0x3225103C) 5974 return; 5975 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 5976 dma_prefetch |= 0x8000; 5977 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 5978 } 5979 5980 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 5981 { 5982 int i; 5983 u32 doorbell_value; 5984 unsigned long flags; 5985 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 5986 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 5987 spin_lock_irqsave(&h->lock, flags); 5988 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 5989 spin_unlock_irqrestore(&h->lock, flags); 5990 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 5991 break; 5992 /* delay and try again */ 5993 msleep(20); 5994 } 5995 } 5996 5997 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 5998 { 5999 int i; 6000 u32 doorbell_value; 6001 unsigned long flags; 6002 6003 /* under certain very rare conditions, this can take awhile. 6004 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 6005 * as we enter this code.) 6006 */ 6007 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6008 spin_lock_irqsave(&h->lock, flags); 6009 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6010 spin_unlock_irqrestore(&h->lock, flags); 6011 if (!(doorbell_value & CFGTBL_ChangeReq)) 6012 break; 6013 /* delay and try again */ 6014 usleep_range(10000, 20000); 6015 } 6016 } 6017 6018 static int hpsa_enter_simple_mode(struct ctlr_info *h) 6019 { 6020 u32 trans_support; 6021 6022 trans_support = readl(&(h->cfgtable->TransportSupport)); 6023 if (!(trans_support & SIMPLE_MODE)) 6024 return -ENOTSUPP; 6025 6026 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 6027 6028 /* Update the field, and then ring the doorbell */ 6029 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 6030 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6031 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6032 hpsa_wait_for_mode_change_ack(h); 6033 print_cfg_table(&h->pdev->dev, h->cfgtable); 6034 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 6035 goto error; 6036 h->transMethod = CFGTBL_Trans_Simple; 6037 return 0; 6038 error: 6039 dev_err(&h->pdev->dev, "failed to enter simple mode\n"); 6040 return -ENODEV; 6041 } 6042 6043 static int hpsa_pci_init(struct ctlr_info *h) 6044 { 6045 int prod_index, err; 6046 6047 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 6048 if (prod_index < 0) 6049 return prod_index; 6050 h->product_name = products[prod_index].product_name; 6051 h->access = *(products[prod_index].access); 6052 6053 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 6054 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 6055 6056 err = pci_enable_device(h->pdev); 6057 if (err) { 6058 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 6059 return err; 6060 } 6061 6062 err = pci_request_regions(h->pdev, HPSA); 6063 if (err) { 6064 dev_err(&h->pdev->dev, 6065 "cannot obtain PCI resources, aborting\n"); 6066 return err; 6067 } 6068 6069 pci_set_master(h->pdev); 6070 6071 hpsa_interrupt_mode(h); 6072 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6073 if (err) 6074 goto err_out_free_res; 6075 h->vaddr = remap_pci_mem(h->paddr, 0x250); 6076 if (!h->vaddr) { 6077 err = -ENOMEM; 6078 goto err_out_free_res; 6079 } 6080 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 6081 if (err) 6082 goto err_out_free_res; 6083 err = hpsa_find_cfgtables(h); 6084 if (err) 6085 goto err_out_free_res; 6086 hpsa_find_board_params(h); 6087 6088 if (!hpsa_CISS_signature_present(h)) { 6089 err = -ENODEV; 6090 goto err_out_free_res; 6091 } 6092 hpsa_set_driver_support_bits(h); 6093 hpsa_p600_dma_prefetch_quirk(h); 6094 err = hpsa_enter_simple_mode(h); 6095 if (err) 6096 goto err_out_free_res; 6097 return 0; 6098 6099 err_out_free_res: 6100 if (h->transtable) 6101 iounmap(h->transtable); 6102 if (h->cfgtable) 6103 iounmap(h->cfgtable); 6104 if (h->vaddr) 6105 iounmap(h->vaddr); 6106 pci_disable_device(h->pdev); 6107 pci_release_regions(h->pdev); 6108 return err; 6109 } 6110 6111 static void hpsa_hba_inquiry(struct ctlr_info *h) 6112 { 6113 int rc; 6114 6115 #define HBA_INQUIRY_BYTE_COUNT 64 6116 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 6117 if (!h->hba_inquiry_data) 6118 return; 6119 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 6120 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 6121 if (rc != 0) { 6122 kfree(h->hba_inquiry_data); 6123 h->hba_inquiry_data = NULL; 6124 } 6125 } 6126 6127 static int hpsa_init_reset_devices(struct pci_dev *pdev) 6128 { 6129 int rc, i; 6130 void __iomem *vaddr; 6131 6132 if (!reset_devices) 6133 return 0; 6134 6135 /* kdump kernel is loading, we don't know in which state is 6136 * the pci interface. The dev->enable_cnt is equal zero 6137 * so we call enable+disable, wait a while and switch it on. 6138 */ 6139 rc = pci_enable_device(pdev); 6140 if (rc) { 6141 dev_warn(&pdev->dev, "Failed to enable PCI device\n"); 6142 return -ENODEV; 6143 } 6144 pci_disable_device(pdev); 6145 msleep(260); /* a randomly chosen number */ 6146 rc = pci_enable_device(pdev); 6147 if (rc) { 6148 dev_warn(&pdev->dev, "failed to enable device.\n"); 6149 return -ENODEV; 6150 } 6151 6152 pci_set_master(pdev); 6153 6154 vaddr = pci_ioremap_bar(pdev, 0); 6155 if (vaddr == NULL) { 6156 rc = -ENOMEM; 6157 goto out_disable; 6158 } 6159 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); 6160 iounmap(vaddr); 6161 6162 /* Reset the controller with a PCI power-cycle or via doorbell */ 6163 rc = hpsa_kdump_hard_reset_controller(pdev); 6164 6165 /* -ENOTSUPP here means we cannot reset the controller 6166 * but it's already (and still) up and running in 6167 * "performant mode". Or, it might be 640x, which can't reset 6168 * due to concerns about shared bbwc between 6402/6404 pair. 6169 */ 6170 if (rc) 6171 goto out_disable; 6172 6173 /* Now try to get the controller to respond to a no-op */ 6174 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); 6175 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 6176 if (hpsa_noop(pdev) == 0) 6177 break; 6178 else 6179 dev_warn(&pdev->dev, "no-op failed%s\n", 6180 (i < 11 ? "; re-trying" : "")); 6181 } 6182 6183 out_disable: 6184 6185 pci_disable_device(pdev); 6186 return rc; 6187 } 6188 6189 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 6190 { 6191 h->cmd_pool_bits = kzalloc( 6192 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 6193 sizeof(unsigned long), GFP_KERNEL); 6194 h->cmd_pool = pci_alloc_consistent(h->pdev, 6195 h->nr_cmds * sizeof(*h->cmd_pool), 6196 &(h->cmd_pool_dhandle)); 6197 h->errinfo_pool = pci_alloc_consistent(h->pdev, 6198 h->nr_cmds * sizeof(*h->errinfo_pool), 6199 &(h->errinfo_pool_dhandle)); 6200 if ((h->cmd_pool_bits == NULL) 6201 || (h->cmd_pool == NULL) 6202 || (h->errinfo_pool == NULL)) { 6203 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 6204 goto clean_up; 6205 } 6206 return 0; 6207 clean_up: 6208 hpsa_free_cmd_pool(h); 6209 return -ENOMEM; 6210 } 6211 6212 static void hpsa_free_cmd_pool(struct ctlr_info *h) 6213 { 6214 kfree(h->cmd_pool_bits); 6215 if (h->cmd_pool) 6216 pci_free_consistent(h->pdev, 6217 h->nr_cmds * sizeof(struct CommandList), 6218 h->cmd_pool, h->cmd_pool_dhandle); 6219 if (h->ioaccel2_cmd_pool) 6220 pci_free_consistent(h->pdev, 6221 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6222 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6223 if (h->errinfo_pool) 6224 pci_free_consistent(h->pdev, 6225 h->nr_cmds * sizeof(struct ErrorInfo), 6226 h->errinfo_pool, 6227 h->errinfo_pool_dhandle); 6228 if (h->ioaccel_cmd_pool) 6229 pci_free_consistent(h->pdev, 6230 h->nr_cmds * sizeof(struct io_accel1_cmd), 6231 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6232 } 6233 6234 static void hpsa_irq_affinity_hints(struct ctlr_info *h) 6235 { 6236 int i, cpu; 6237 6238 cpu = cpumask_first(cpu_online_mask); 6239 for (i = 0; i < h->msix_vector; i++) { 6240 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); 6241 cpu = cpumask_next(cpu, cpu_online_mask); 6242 } 6243 } 6244 6245 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ 6246 static void hpsa_free_irqs(struct ctlr_info *h) 6247 { 6248 int i; 6249 6250 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6251 /* Single reply queue, only one irq to free */ 6252 i = h->intr_mode; 6253 irq_set_affinity_hint(h->intr[i], NULL); 6254 free_irq(h->intr[i], &h->q[i]); 6255 return; 6256 } 6257 6258 for (i = 0; i < h->msix_vector; i++) { 6259 irq_set_affinity_hint(h->intr[i], NULL); 6260 free_irq(h->intr[i], &h->q[i]); 6261 } 6262 for (; i < MAX_REPLY_QUEUES; i++) 6263 h->q[i] = 0; 6264 } 6265 6266 /* returns 0 on success; cleans up and returns -Enn on error */ 6267 static int hpsa_request_irqs(struct ctlr_info *h, 6268 irqreturn_t (*msixhandler)(int, void *), 6269 irqreturn_t (*intxhandler)(int, void *)) 6270 { 6271 int rc, i; 6272 6273 /* 6274 * initialize h->q[x] = x so that interrupt handlers know which 6275 * queue to process. 6276 */ 6277 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6278 h->q[i] = (u8) i; 6279 6280 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 6281 /* If performant mode and MSI-X, use multiple reply queues */ 6282 for (i = 0; i < h->msix_vector; i++) { 6283 rc = request_irq(h->intr[i], msixhandler, 6284 0, h->devname, 6285 &h->q[i]); 6286 if (rc) { 6287 int j; 6288 6289 dev_err(&h->pdev->dev, 6290 "failed to get irq %d for %s\n", 6291 h->intr[i], h->devname); 6292 for (j = 0; j < i; j++) { 6293 free_irq(h->intr[j], &h->q[j]); 6294 h->q[j] = 0; 6295 } 6296 for (; j < MAX_REPLY_QUEUES; j++) 6297 h->q[j] = 0; 6298 return rc; 6299 } 6300 } 6301 hpsa_irq_affinity_hints(h); 6302 } else { 6303 /* Use single reply pool */ 6304 if (h->msix_vector > 0 || h->msi_vector) { 6305 rc = request_irq(h->intr[h->intr_mode], 6306 msixhandler, 0, h->devname, 6307 &h->q[h->intr_mode]); 6308 } else { 6309 rc = request_irq(h->intr[h->intr_mode], 6310 intxhandler, IRQF_SHARED, h->devname, 6311 &h->q[h->intr_mode]); 6312 } 6313 } 6314 if (rc) { 6315 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 6316 h->intr[h->intr_mode], h->devname); 6317 return -ENODEV; 6318 } 6319 return 0; 6320 } 6321 6322 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 6323 { 6324 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 6325 HPSA_RESET_TYPE_CONTROLLER)) { 6326 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 6327 return -EIO; 6328 } 6329 6330 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 6331 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 6332 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 6333 return -1; 6334 } 6335 6336 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 6337 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 6338 dev_warn(&h->pdev->dev, "Board failed to become ready " 6339 "after soft reset.\n"); 6340 return -1; 6341 } 6342 6343 return 0; 6344 } 6345 6346 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6347 { 6348 hpsa_free_irqs(h); 6349 #ifdef CONFIG_PCI_MSI 6350 if (h->msix_vector) { 6351 if (h->pdev->msix_enabled) 6352 pci_disable_msix(h->pdev); 6353 } else if (h->msi_vector) { 6354 if (h->pdev->msi_enabled) 6355 pci_disable_msi(h->pdev); 6356 } 6357 #endif /* CONFIG_PCI_MSI */ 6358 } 6359 6360 static void hpsa_free_reply_queues(struct ctlr_info *h) 6361 { 6362 int i; 6363 6364 for (i = 0; i < h->nreply_queues; i++) { 6365 if (!h->reply_queue[i].head) 6366 continue; 6367 pci_free_consistent(h->pdev, h->reply_queue_size, 6368 h->reply_queue[i].head, h->reply_queue[i].busaddr); 6369 h->reply_queue[i].head = NULL; 6370 h->reply_queue[i].busaddr = 0; 6371 } 6372 } 6373 6374 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6375 { 6376 hpsa_free_irqs_and_disable_msix(h); 6377 hpsa_free_sg_chain_blocks(h); 6378 hpsa_free_cmd_pool(h); 6379 kfree(h->ioaccel1_blockFetchTable); 6380 kfree(h->blockFetchTable); 6381 hpsa_free_reply_queues(h); 6382 if (h->vaddr) 6383 iounmap(h->vaddr); 6384 if (h->transtable) 6385 iounmap(h->transtable); 6386 if (h->cfgtable) 6387 iounmap(h->cfgtable); 6388 pci_disable_device(h->pdev); 6389 pci_release_regions(h->pdev); 6390 kfree(h); 6391 } 6392 6393 /* Called when controller lockup detected. */ 6394 static void fail_all_outstanding_cmds(struct ctlr_info *h) 6395 { 6396 int i; 6397 struct CommandList *c = NULL; 6398 6399 for (i = 0; i < h->nr_cmds; i++) { 6400 if (!test_bit(i & (BITS_PER_LONG - 1), 6401 h->cmd_pool_bits + (i / BITS_PER_LONG))) 6402 continue; 6403 c = h->cmd_pool + i; 6404 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 6405 finish_cmd(c); 6406 } 6407 } 6408 6409 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 6410 { 6411 int i, cpu; 6412 6413 cpu = cpumask_first(cpu_online_mask); 6414 for (i = 0; i < num_online_cpus(); i++) { 6415 u32 *lockup_detected; 6416 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 6417 *lockup_detected = value; 6418 cpu = cpumask_next(cpu, cpu_online_mask); 6419 } 6420 wmb(); /* be sure the per-cpu variables are out to memory */ 6421 } 6422 6423 static void controller_lockup_detected(struct ctlr_info *h) 6424 { 6425 unsigned long flags; 6426 u32 lockup_detected; 6427 6428 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6429 spin_lock_irqsave(&h->lock, flags); 6430 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6431 if (!lockup_detected) { 6432 /* no heartbeat, but controller gave us a zero. */ 6433 dev_warn(&h->pdev->dev, 6434 "lockup detected but scratchpad register is zero\n"); 6435 lockup_detected = 0xffffffff; 6436 } 6437 set_lockup_detected_for_all_cpus(h, lockup_detected); 6438 spin_unlock_irqrestore(&h->lock, flags); 6439 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6440 lockup_detected); 6441 pci_disable_device(h->pdev); 6442 spin_lock_irqsave(&h->lock, flags); 6443 fail_all_outstanding_cmds(h); 6444 spin_unlock_irqrestore(&h->lock, flags); 6445 } 6446 6447 static void detect_controller_lockup(struct ctlr_info *h) 6448 { 6449 u64 now; 6450 u32 heartbeat; 6451 unsigned long flags; 6452 6453 now = get_jiffies_64(); 6454 /* If we've received an interrupt recently, we're ok. */ 6455 if (time_after64(h->last_intr_timestamp + 6456 (h->heartbeat_sample_interval), now)) 6457 return; 6458 6459 /* 6460 * If we've already checked the heartbeat recently, we're ok. 6461 * This could happen if someone sends us a signal. We 6462 * otherwise don't care about signals in this thread. 6463 */ 6464 if (time_after64(h->last_heartbeat_timestamp + 6465 (h->heartbeat_sample_interval), now)) 6466 return; 6467 6468 /* If heartbeat has not changed since we last looked, we're not ok. */ 6469 spin_lock_irqsave(&h->lock, flags); 6470 heartbeat = readl(&h->cfgtable->HeartBeat); 6471 spin_unlock_irqrestore(&h->lock, flags); 6472 if (h->last_heartbeat == heartbeat) { 6473 controller_lockup_detected(h); 6474 return; 6475 } 6476 6477 /* We're ok. */ 6478 h->last_heartbeat = heartbeat; 6479 h->last_heartbeat_timestamp = now; 6480 } 6481 6482 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 6483 { 6484 int i; 6485 char *event_type; 6486 6487 /* Ask the controller to clear the events we're handling. */ 6488 if ((h->transMethod & (CFGTBL_Trans_io_accel1 6489 | CFGTBL_Trans_io_accel2)) && 6490 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 6491 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 6492 6493 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 6494 event_type = "state change"; 6495 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 6496 event_type = "configuration change"; 6497 /* Stop sending new RAID offload reqs via the IO accelerator */ 6498 scsi_block_requests(h->scsi_host); 6499 for (i = 0; i < h->ndevices; i++) 6500 h->dev[i]->offload_enabled = 0; 6501 hpsa_drain_accel_commands(h); 6502 /* Set 'accelerator path config change' bit */ 6503 dev_warn(&h->pdev->dev, 6504 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 6505 h->events, event_type); 6506 writel(h->events, &(h->cfgtable->clear_event_notify)); 6507 /* Set the "clear event notify field update" bit 6 */ 6508 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6509 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 6510 hpsa_wait_for_clear_event_notify_ack(h); 6511 scsi_unblock_requests(h->scsi_host); 6512 } else { 6513 /* Acknowledge controller notification events. */ 6514 writel(h->events, &(h->cfgtable->clear_event_notify)); 6515 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6516 hpsa_wait_for_clear_event_notify_ack(h); 6517 #if 0 6518 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6519 hpsa_wait_for_mode_change_ack(h); 6520 #endif 6521 } 6522 return; 6523 } 6524 6525 /* Check a register on the controller to see if there are configuration 6526 * changes (added/changed/removed logical drives, etc.) which mean that 6527 * we should rescan the controller for devices. 6528 * Also check flag for driver-initiated rescan. 6529 */ 6530 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 6531 { 6532 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 6533 return 0; 6534 6535 h->events = readl(&(h->cfgtable->event_notify)); 6536 return h->events & RESCAN_REQUIRED_EVENT_BITS; 6537 } 6538 6539 /* 6540 * Check if any of the offline devices have become ready 6541 */ 6542 static int hpsa_offline_devices_ready(struct ctlr_info *h) 6543 { 6544 unsigned long flags; 6545 struct offline_device_entry *d; 6546 struct list_head *this, *tmp; 6547 6548 spin_lock_irqsave(&h->offline_device_lock, flags); 6549 list_for_each_safe(this, tmp, &h->offline_device_list) { 6550 d = list_entry(this, struct offline_device_entry, 6551 offline_list); 6552 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6553 if (!hpsa_volume_offline(h, d->scsi3addr)) { 6554 spin_lock_irqsave(&h->offline_device_lock, flags); 6555 list_del(&d->offline_list); 6556 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6557 return 1; 6558 } 6559 spin_lock_irqsave(&h->offline_device_lock, flags); 6560 } 6561 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6562 return 0; 6563 } 6564 6565 6566 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6567 { 6568 unsigned long flags; 6569 struct ctlr_info *h = container_of(to_delayed_work(work), 6570 struct ctlr_info, monitor_ctlr_work); 6571 detect_controller_lockup(h); 6572 if (lockup_detected(h)) 6573 return; 6574 6575 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6576 scsi_host_get(h->scsi_host); 6577 hpsa_ack_ctlr_events(h); 6578 hpsa_scan_start(h->scsi_host); 6579 scsi_host_put(h->scsi_host); 6580 } 6581 6582 spin_lock_irqsave(&h->lock, flags); 6583 if (h->remove_in_progress) { 6584 spin_unlock_irqrestore(&h->lock, flags); 6585 return; 6586 } 6587 schedule_delayed_work(&h->monitor_ctlr_work, 6588 h->heartbeat_sample_interval); 6589 spin_unlock_irqrestore(&h->lock, flags); 6590 } 6591 6592 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6593 { 6594 int dac, rc; 6595 struct ctlr_info *h; 6596 int try_soft_reset = 0; 6597 unsigned long flags; 6598 6599 if (number_of_controllers == 0) 6600 printk(KERN_INFO DRIVER_NAME "\n"); 6601 6602 rc = hpsa_init_reset_devices(pdev); 6603 if (rc) { 6604 if (rc != -ENOTSUPP) 6605 return rc; 6606 /* If the reset fails in a particular way (it has no way to do 6607 * a proper hard reset, so returns -ENOTSUPP) we can try to do 6608 * a soft reset once we get the controller configured up to the 6609 * point that it can accept a command. 6610 */ 6611 try_soft_reset = 1; 6612 rc = 0; 6613 } 6614 6615 reinit_after_soft_reset: 6616 6617 /* Command structures must be aligned on a 32-byte boundary because 6618 * the 5 lower bits of the address are used by the hardware. and by 6619 * the driver. See comments in hpsa.h for more info. 6620 */ 6621 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6622 h = kzalloc(sizeof(*h), GFP_KERNEL); 6623 if (!h) 6624 return -ENOMEM; 6625 6626 h->pdev = pdev; 6627 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6628 INIT_LIST_HEAD(&h->offline_device_list); 6629 spin_lock_init(&h->lock); 6630 spin_lock_init(&h->offline_device_lock); 6631 spin_lock_init(&h->scan_lock); 6632 spin_lock_init(&h->passthru_count_lock); 6633 6634 /* Allocate and clear per-cpu variable lockup_detected */ 6635 h->lockup_detected = alloc_percpu(u32); 6636 if (!h->lockup_detected) { 6637 rc = -ENOMEM; 6638 goto clean1; 6639 } 6640 set_lockup_detected_for_all_cpus(h, 0); 6641 6642 rc = hpsa_pci_init(h); 6643 if (rc != 0) 6644 goto clean1; 6645 6646 sprintf(h->devname, HPSA "%d", number_of_controllers); 6647 h->ctlr = number_of_controllers; 6648 number_of_controllers++; 6649 6650 /* configure PCI DMA stuff */ 6651 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 6652 if (rc == 0) { 6653 dac = 1; 6654 } else { 6655 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6656 if (rc == 0) { 6657 dac = 0; 6658 } else { 6659 dev_err(&pdev->dev, "no suitable DMA available\n"); 6660 goto clean1; 6661 } 6662 } 6663 6664 /* make sure the board interrupts are off */ 6665 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6666 6667 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 6668 goto clean2; 6669 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 6670 h->devname, pdev->device, 6671 h->intr[h->intr_mode], dac ? "" : " not"); 6672 rc = hpsa_allocate_cmd_pool(h); 6673 if (rc) 6674 goto clean2_and_free_irqs; 6675 if (hpsa_allocate_sg_chain_blocks(h)) 6676 goto clean4; 6677 init_waitqueue_head(&h->scan_wait_queue); 6678 h->scan_finished = 1; /* no scan currently in progress */ 6679 6680 pci_set_drvdata(pdev, h); 6681 h->ndevices = 0; 6682 h->hba_mode_enabled = 0; 6683 h->scsi_host = NULL; 6684 spin_lock_init(&h->devlock); 6685 hpsa_put_ctlr_into_performant_mode(h); 6686 6687 /* At this point, the controller is ready to take commands. 6688 * Now, if reset_devices and the hard reset didn't work, try 6689 * the soft reset and see if that works. 6690 */ 6691 if (try_soft_reset) { 6692 6693 /* This is kind of gross. We may or may not get a completion 6694 * from the soft reset command, and if we do, then the value 6695 * from the fifo may or may not be valid. So, we wait 10 secs 6696 * after the reset throwing away any completions we get during 6697 * that time. Unregister the interrupt handler and register 6698 * fake ones to scoop up any residual completions. 6699 */ 6700 spin_lock_irqsave(&h->lock, flags); 6701 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6702 spin_unlock_irqrestore(&h->lock, flags); 6703 hpsa_free_irqs(h); 6704 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, 6705 hpsa_intx_discard_completions); 6706 if (rc) { 6707 dev_warn(&h->pdev->dev, 6708 "Failed to request_irq after soft reset.\n"); 6709 goto clean4; 6710 } 6711 6712 rc = hpsa_kdump_soft_reset(h); 6713 if (rc) 6714 /* Neither hard nor soft reset worked, we're hosed. */ 6715 goto clean4; 6716 6717 dev_info(&h->pdev->dev, "Board READY.\n"); 6718 dev_info(&h->pdev->dev, 6719 "Waiting for stale completions to drain.\n"); 6720 h->access.set_intr_mask(h, HPSA_INTR_ON); 6721 msleep(10000); 6722 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6723 6724 rc = controller_reset_failed(h->cfgtable); 6725 if (rc) 6726 dev_info(&h->pdev->dev, 6727 "Soft reset appears to have failed.\n"); 6728 6729 /* since the controller's reset, we have to go back and re-init 6730 * everything. Easiest to just forget what we've done and do it 6731 * all over again. 6732 */ 6733 hpsa_undo_allocations_after_kdump_soft_reset(h); 6734 try_soft_reset = 0; 6735 if (rc) 6736 /* don't go to clean4, we already unallocated */ 6737 return -ENODEV; 6738 6739 goto reinit_after_soft_reset; 6740 } 6741 6742 /* Enable Accelerated IO path at driver layer */ 6743 h->acciopath_status = 1; 6744 6745 6746 /* Turn the interrupts on so we can service requests */ 6747 h->access.set_intr_mask(h, HPSA_INTR_ON); 6748 6749 hpsa_hba_inquiry(h); 6750 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 6751 6752 /* Monitor the controller for firmware lockups */ 6753 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 6754 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 6755 schedule_delayed_work(&h->monitor_ctlr_work, 6756 h->heartbeat_sample_interval); 6757 return 0; 6758 6759 clean4: 6760 hpsa_free_sg_chain_blocks(h); 6761 hpsa_free_cmd_pool(h); 6762 clean2_and_free_irqs: 6763 hpsa_free_irqs(h); 6764 clean2: 6765 clean1: 6766 if (h->lockup_detected) 6767 free_percpu(h->lockup_detected); 6768 kfree(h); 6769 return rc; 6770 } 6771 6772 static void hpsa_flush_cache(struct ctlr_info *h) 6773 { 6774 char *flush_buf; 6775 struct CommandList *c; 6776 6777 /* Don't bother trying to flush the cache if locked up */ 6778 if (unlikely(lockup_detected(h))) 6779 return; 6780 flush_buf = kzalloc(4, GFP_KERNEL); 6781 if (!flush_buf) 6782 return; 6783 6784 c = cmd_alloc(h); 6785 if (!c) { 6786 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 6787 goto out_of_memory; 6788 } 6789 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 6790 RAID_CTLR_LUNID, TYPE_CMD)) { 6791 goto out; 6792 } 6793 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 6794 if (c->err_info->CommandStatus != 0) 6795 out: 6796 dev_warn(&h->pdev->dev, 6797 "error flushing cache on controller\n"); 6798 cmd_free(h, c); 6799 out_of_memory: 6800 kfree(flush_buf); 6801 } 6802 6803 static void hpsa_shutdown(struct pci_dev *pdev) 6804 { 6805 struct ctlr_info *h; 6806 6807 h = pci_get_drvdata(pdev); 6808 /* Turn board interrupts off and send the flush cache command 6809 * sendcmd will turn off interrupt, and send the flush... 6810 * To write all data in the battery backed cache to disks 6811 */ 6812 hpsa_flush_cache(h); 6813 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6814 hpsa_free_irqs_and_disable_msix(h); 6815 } 6816 6817 static void hpsa_free_device_info(struct ctlr_info *h) 6818 { 6819 int i; 6820 6821 for (i = 0; i < h->ndevices; i++) 6822 kfree(h->dev[i]); 6823 } 6824 6825 static void hpsa_remove_one(struct pci_dev *pdev) 6826 { 6827 struct ctlr_info *h; 6828 unsigned long flags; 6829 6830 if (pci_get_drvdata(pdev) == NULL) { 6831 dev_err(&pdev->dev, "unable to remove device\n"); 6832 return; 6833 } 6834 h = pci_get_drvdata(pdev); 6835 6836 /* Get rid of any controller monitoring work items */ 6837 spin_lock_irqsave(&h->lock, flags); 6838 h->remove_in_progress = 1; 6839 cancel_delayed_work(&h->monitor_ctlr_work); 6840 spin_unlock_irqrestore(&h->lock, flags); 6841 6842 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 6843 hpsa_shutdown(pdev); 6844 iounmap(h->vaddr); 6845 iounmap(h->transtable); 6846 iounmap(h->cfgtable); 6847 hpsa_free_device_info(h); 6848 hpsa_free_sg_chain_blocks(h); 6849 pci_free_consistent(h->pdev, 6850 h->nr_cmds * sizeof(struct CommandList), 6851 h->cmd_pool, h->cmd_pool_dhandle); 6852 pci_free_consistent(h->pdev, 6853 h->nr_cmds * sizeof(struct ErrorInfo), 6854 h->errinfo_pool, h->errinfo_pool_dhandle); 6855 hpsa_free_reply_queues(h); 6856 kfree(h->cmd_pool_bits); 6857 kfree(h->blockFetchTable); 6858 kfree(h->ioaccel1_blockFetchTable); 6859 kfree(h->ioaccel2_blockFetchTable); 6860 kfree(h->hba_inquiry_data); 6861 pci_disable_device(pdev); 6862 pci_release_regions(pdev); 6863 free_percpu(h->lockup_detected); 6864 kfree(h); 6865 } 6866 6867 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 6868 __attribute__((unused)) pm_message_t state) 6869 { 6870 return -ENOSYS; 6871 } 6872 6873 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 6874 { 6875 return -ENOSYS; 6876 } 6877 6878 static struct pci_driver hpsa_pci_driver = { 6879 .name = HPSA, 6880 .probe = hpsa_init_one, 6881 .remove = hpsa_remove_one, 6882 .id_table = hpsa_pci_device_id, /* id_table */ 6883 .shutdown = hpsa_shutdown, 6884 .suspend = hpsa_suspend, 6885 .resume = hpsa_resume, 6886 }; 6887 6888 /* Fill in bucket_map[], given nsgs (the max number of 6889 * scatter gather elements supported) and bucket[], 6890 * which is an array of 8 integers. The bucket[] array 6891 * contains 8 different DMA transfer sizes (in 16 6892 * byte increments) which the controller uses to fetch 6893 * commands. This function fills in bucket_map[], which 6894 * maps a given number of scatter gather elements to one of 6895 * the 8 DMA transfer sizes. The point of it is to allow the 6896 * controller to only do as much DMA as needed to fetch the 6897 * command, with the DMA transfer size encoded in the lower 6898 * bits of the command address. 6899 */ 6900 static void calc_bucket_map(int bucket[], int num_buckets, 6901 int nsgs, int min_blocks, u32 *bucket_map) 6902 { 6903 int i, j, b, size; 6904 6905 /* Note, bucket_map must have nsgs+1 entries. */ 6906 for (i = 0; i <= nsgs; i++) { 6907 /* Compute size of a command with i SG entries */ 6908 size = i + min_blocks; 6909 b = num_buckets; /* Assume the biggest bucket */ 6910 /* Find the bucket that is just big enough */ 6911 for (j = 0; j < num_buckets; j++) { 6912 if (bucket[j] >= size) { 6913 b = j; 6914 break; 6915 } 6916 } 6917 /* for a command with i SG entries, use bucket b. */ 6918 bucket_map[i] = b; 6919 } 6920 } 6921 6922 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 6923 { 6924 int i; 6925 unsigned long register_value; 6926 unsigned long transMethod = CFGTBL_Trans_Performant | 6927 (trans_support & CFGTBL_Trans_use_short_tags) | 6928 CFGTBL_Trans_enable_directed_msix | 6929 (trans_support & (CFGTBL_Trans_io_accel1 | 6930 CFGTBL_Trans_io_accel2)); 6931 struct access_method access = SA5_performant_access; 6932 6933 /* This is a bit complicated. There are 8 registers on 6934 * the controller which we write to to tell it 8 different 6935 * sizes of commands which there may be. It's a way of 6936 * reducing the DMA done to fetch each command. Encoded into 6937 * each command's tag are 3 bits which communicate to the controller 6938 * which of the eight sizes that command fits within. The size of 6939 * each command depends on how many scatter gather entries there are. 6940 * Each SG entry requires 16 bytes. The eight registers are programmed 6941 * with the number of 16-byte blocks a command of that size requires. 6942 * The smallest command possible requires 5 such 16 byte blocks. 6943 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 6944 * blocks. Note, this only extends to the SG entries contained 6945 * within the command block, and does not extend to chained blocks 6946 * of SG elements. bft[] contains the eight values we write to 6947 * the registers. They are not evenly distributed, but have more 6948 * sizes for small commands, and fewer sizes for larger commands. 6949 */ 6950 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 6951 #define MIN_IOACCEL2_BFT_ENTRY 5 6952 #define HPSA_IOACCEL2_HEADER_SZ 4 6953 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 6954 13, 14, 15, 16, 17, 18, 19, 6955 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 6956 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 6957 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 6958 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 6959 16 * MIN_IOACCEL2_BFT_ENTRY); 6960 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 6961 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 6962 /* 5 = 1 s/g entry or 4k 6963 * 6 = 2 s/g entry or 8k 6964 * 8 = 4 s/g entry or 16k 6965 * 10 = 6 s/g entry or 24k 6966 */ 6967 6968 /* If the controller supports either ioaccel method then 6969 * we can also use the RAID stack submit path that does not 6970 * perform the superfluous readl() after each command submission. 6971 */ 6972 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) 6973 access = SA5_performant_access_no_read; 6974 6975 /* Controller spec: zero out this buffer. */ 6976 for (i = 0; i < h->nreply_queues; i++) 6977 memset(h->reply_queue[i].head, 0, h->reply_queue_size); 6978 6979 bft[7] = SG_ENTRIES_IN_CMD + 4; 6980 calc_bucket_map(bft, ARRAY_SIZE(bft), 6981 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 6982 for (i = 0; i < 8; i++) 6983 writel(bft[i], &h->transtable->BlockFetch[i]); 6984 6985 /* size of controller ring buffer */ 6986 writel(h->max_commands, &h->transtable->RepQSize); 6987 writel(h->nreply_queues, &h->transtable->RepQCount); 6988 writel(0, &h->transtable->RepQCtrAddrLow32); 6989 writel(0, &h->transtable->RepQCtrAddrHigh32); 6990 6991 for (i = 0; i < h->nreply_queues; i++) { 6992 writel(0, &h->transtable->RepQAddr[i].upper); 6993 writel(h->reply_queue[i].busaddr, 6994 &h->transtable->RepQAddr[i].lower); 6995 } 6996 6997 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6998 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 6999 /* 7000 * enable outbound interrupt coalescing in accelerator mode; 7001 */ 7002 if (trans_support & CFGTBL_Trans_io_accel1) { 7003 access = SA5_ioaccel_mode1_access; 7004 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7005 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7006 } else { 7007 if (trans_support & CFGTBL_Trans_io_accel2) { 7008 access = SA5_ioaccel_mode2_access; 7009 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7010 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7011 } 7012 } 7013 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7014 hpsa_wait_for_mode_change_ack(h); 7015 register_value = readl(&(h->cfgtable->TransportActive)); 7016 if (!(register_value & CFGTBL_Trans_Performant)) { 7017 dev_err(&h->pdev->dev, 7018 "performant mode problem - transport not active\n"); 7019 return; 7020 } 7021 /* Change the access methods to the performant access methods */ 7022 h->access = access; 7023 h->transMethod = transMethod; 7024 7025 if (!((trans_support & CFGTBL_Trans_io_accel1) || 7026 (trans_support & CFGTBL_Trans_io_accel2))) 7027 return; 7028 7029 if (trans_support & CFGTBL_Trans_io_accel1) { 7030 /* Set up I/O accelerator mode */ 7031 for (i = 0; i < h->nreply_queues; i++) { 7032 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 7033 h->reply_queue[i].current_entry = 7034 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 7035 } 7036 bft[7] = h->ioaccel_maxsg + 8; 7037 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 7038 h->ioaccel1_blockFetchTable); 7039 7040 /* initialize all reply queue entries to unused */ 7041 for (i = 0; i < h->nreply_queues; i++) 7042 memset(h->reply_queue[i].head, 7043 (u8) IOACCEL_MODE1_REPLY_UNUSED, 7044 h->reply_queue_size); 7045 7046 /* set all the constant fields in the accelerator command 7047 * frames once at init time to save CPU cycles later. 7048 */ 7049 for (i = 0; i < h->nr_cmds; i++) { 7050 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 7051 7052 cp->function = IOACCEL1_FUNCTION_SCSIIO; 7053 cp->err_info = (u32) (h->errinfo_pool_dhandle + 7054 (i * sizeof(struct ErrorInfo))); 7055 cp->err_info_len = sizeof(struct ErrorInfo); 7056 cp->sgl_offset = IOACCEL1_SGLOFFSET; 7057 cp->host_context_flags = 7058 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); 7059 cp->timeout_sec = 0; 7060 cp->ReplyQueue = 0; 7061 cp->tag = 7062 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); 7063 cp->host_addr = 7064 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + 7065 (i * sizeof(struct io_accel1_cmd))); 7066 } 7067 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7068 u64 cfg_offset, cfg_base_addr_index; 7069 u32 bft2_offset, cfg_base_addr; 7070 int rc; 7071 7072 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7073 &cfg_base_addr_index, &cfg_offset); 7074 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 7075 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 7076 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 7077 4, h->ioaccel2_blockFetchTable); 7078 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 7079 BUILD_BUG_ON(offsetof(struct CfgTable, 7080 io_accel_request_size_offset) != 0xb8); 7081 h->ioaccel2_bft2_regs = 7082 remap_pci_mem(pci_resource_start(h->pdev, 7083 cfg_base_addr_index) + 7084 cfg_offset + bft2_offset, 7085 ARRAY_SIZE(bft2) * 7086 sizeof(*h->ioaccel2_bft2_regs)); 7087 for (i = 0; i < ARRAY_SIZE(bft2); i++) 7088 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 7089 } 7090 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7091 hpsa_wait_for_mode_change_ack(h); 7092 } 7093 7094 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 7095 { 7096 h->ioaccel_maxsg = 7097 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7098 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 7099 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 7100 7101 /* Command structures must be aligned on a 128-byte boundary 7102 * because the 7 lower bits of the address are used by the 7103 * hardware. 7104 */ 7105 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7106 IOACCEL1_COMMANDLIST_ALIGNMENT); 7107 h->ioaccel_cmd_pool = 7108 pci_alloc_consistent(h->pdev, 7109 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7110 &(h->ioaccel_cmd_pool_dhandle)); 7111 7112 h->ioaccel1_blockFetchTable = 7113 kmalloc(((h->ioaccel_maxsg + 1) * 7114 sizeof(u32)), GFP_KERNEL); 7115 7116 if ((h->ioaccel_cmd_pool == NULL) || 7117 (h->ioaccel1_blockFetchTable == NULL)) 7118 goto clean_up; 7119 7120 memset(h->ioaccel_cmd_pool, 0, 7121 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 7122 return 0; 7123 7124 clean_up: 7125 if (h->ioaccel_cmd_pool) 7126 pci_free_consistent(h->pdev, 7127 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7128 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 7129 kfree(h->ioaccel1_blockFetchTable); 7130 return 1; 7131 } 7132 7133 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 7134 { 7135 /* Allocate ioaccel2 mode command blocks and block fetch table */ 7136 7137 h->ioaccel_maxsg = 7138 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7139 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7140 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7141 7142 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7143 IOACCEL2_COMMANDLIST_ALIGNMENT); 7144 h->ioaccel2_cmd_pool = 7145 pci_alloc_consistent(h->pdev, 7146 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7147 &(h->ioaccel2_cmd_pool_dhandle)); 7148 7149 h->ioaccel2_blockFetchTable = 7150 kmalloc(((h->ioaccel_maxsg + 1) * 7151 sizeof(u32)), GFP_KERNEL); 7152 7153 if ((h->ioaccel2_cmd_pool == NULL) || 7154 (h->ioaccel2_blockFetchTable == NULL)) 7155 goto clean_up; 7156 7157 memset(h->ioaccel2_cmd_pool, 0, 7158 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 7159 return 0; 7160 7161 clean_up: 7162 if (h->ioaccel2_cmd_pool) 7163 pci_free_consistent(h->pdev, 7164 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7165 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 7166 kfree(h->ioaccel2_blockFetchTable); 7167 return 1; 7168 } 7169 7170 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 7171 { 7172 u32 trans_support; 7173 unsigned long transMethod = CFGTBL_Trans_Performant | 7174 CFGTBL_Trans_use_short_tags; 7175 int i; 7176 7177 if (hpsa_simple_mode) 7178 return; 7179 7180 trans_support = readl(&(h->cfgtable->TransportSupport)); 7181 if (!(trans_support & PERFORMANT_MODE)) 7182 return; 7183 7184 /* Check for I/O accelerator mode support */ 7185 if (trans_support & CFGTBL_Trans_io_accel1) { 7186 transMethod |= CFGTBL_Trans_io_accel1 | 7187 CFGTBL_Trans_enable_directed_msix; 7188 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 7189 goto clean_up; 7190 } else { 7191 if (trans_support & CFGTBL_Trans_io_accel2) { 7192 transMethod |= CFGTBL_Trans_io_accel2 | 7193 CFGTBL_Trans_enable_directed_msix; 7194 if (ioaccel2_alloc_cmds_and_bft(h)) 7195 goto clean_up; 7196 } 7197 } 7198 7199 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7200 hpsa_get_max_perf_mode_cmds(h); 7201 /* Performant mode ring buffer and supporting data structures */ 7202 h->reply_queue_size = h->max_commands * sizeof(u64); 7203 7204 for (i = 0; i < h->nreply_queues; i++) { 7205 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 7206 h->reply_queue_size, 7207 &(h->reply_queue[i].busaddr)); 7208 if (!h->reply_queue[i].head) 7209 goto clean_up; 7210 h->reply_queue[i].size = h->max_commands; 7211 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7212 h->reply_queue[i].current_entry = 0; 7213 } 7214 7215 /* Need a block fetch table for performant mode */ 7216 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7217 sizeof(u32)), GFP_KERNEL); 7218 if (!h->blockFetchTable) 7219 goto clean_up; 7220 7221 hpsa_enter_performant_mode(h, trans_support); 7222 return; 7223 7224 clean_up: 7225 hpsa_free_reply_queues(h); 7226 kfree(h->blockFetchTable); 7227 } 7228 7229 static int is_accelerated_cmd(struct CommandList *c) 7230 { 7231 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 7232 } 7233 7234 static void hpsa_drain_accel_commands(struct ctlr_info *h) 7235 { 7236 struct CommandList *c = NULL; 7237 int i, accel_cmds_out; 7238 7239 do { /* wait for all outstanding ioaccel commands to drain out */ 7240 accel_cmds_out = 0; 7241 for (i = 0; i < h->nr_cmds; i++) { 7242 if (!test_bit(i & (BITS_PER_LONG - 1), 7243 h->cmd_pool_bits + (i / BITS_PER_LONG))) 7244 continue; 7245 c = h->cmd_pool + i; 7246 accel_cmds_out += is_accelerated_cmd(c); 7247 } 7248 if (accel_cmds_out <= 0) 7249 break; 7250 msleep(100); 7251 } while (1); 7252 } 7253 7254 /* 7255 * This is it. Register the PCI driver information for the cards we control 7256 * the OS will call our registered routines when it finds one of our cards. 7257 */ 7258 static int __init hpsa_init(void) 7259 { 7260 return pci_register_driver(&hpsa_pci_driver); 7261 } 7262 7263 static void __exit hpsa_cleanup(void) 7264 { 7265 pci_unregister_driver(&hpsa_pci_driver); 7266 } 7267 7268 static void __attribute__((unused)) verify_offsets(void) 7269 { 7270 #define VERIFY_OFFSET(member, offset) \ 7271 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 7272 7273 VERIFY_OFFSET(structure_size, 0); 7274 VERIFY_OFFSET(volume_blk_size, 4); 7275 VERIFY_OFFSET(volume_blk_cnt, 8); 7276 VERIFY_OFFSET(phys_blk_shift, 16); 7277 VERIFY_OFFSET(parity_rotation_shift, 17); 7278 VERIFY_OFFSET(strip_size, 18); 7279 VERIFY_OFFSET(disk_starting_blk, 20); 7280 VERIFY_OFFSET(disk_blk_cnt, 28); 7281 VERIFY_OFFSET(data_disks_per_row, 36); 7282 VERIFY_OFFSET(metadata_disks_per_row, 38); 7283 VERIFY_OFFSET(row_cnt, 40); 7284 VERIFY_OFFSET(layout_map_count, 42); 7285 VERIFY_OFFSET(flags, 44); 7286 VERIFY_OFFSET(dekindex, 46); 7287 /* VERIFY_OFFSET(reserved, 48 */ 7288 VERIFY_OFFSET(data, 64); 7289 7290 #undef VERIFY_OFFSET 7291 7292 #define VERIFY_OFFSET(member, offset) \ 7293 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 7294 7295 VERIFY_OFFSET(IU_type, 0); 7296 VERIFY_OFFSET(direction, 1); 7297 VERIFY_OFFSET(reply_queue, 2); 7298 /* VERIFY_OFFSET(reserved1, 3); */ 7299 VERIFY_OFFSET(scsi_nexus, 4); 7300 VERIFY_OFFSET(Tag, 8); 7301 VERIFY_OFFSET(cdb, 16); 7302 VERIFY_OFFSET(cciss_lun, 32); 7303 VERIFY_OFFSET(data_len, 40); 7304 VERIFY_OFFSET(cmd_priority_task_attr, 44); 7305 VERIFY_OFFSET(sg_count, 45); 7306 /* VERIFY_OFFSET(reserved3 */ 7307 VERIFY_OFFSET(err_ptr, 48); 7308 VERIFY_OFFSET(err_len, 56); 7309 /* VERIFY_OFFSET(reserved4 */ 7310 VERIFY_OFFSET(sg, 64); 7311 7312 #undef VERIFY_OFFSET 7313 7314 #define VERIFY_OFFSET(member, offset) \ 7315 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 7316 7317 VERIFY_OFFSET(dev_handle, 0x00); 7318 VERIFY_OFFSET(reserved1, 0x02); 7319 VERIFY_OFFSET(function, 0x03); 7320 VERIFY_OFFSET(reserved2, 0x04); 7321 VERIFY_OFFSET(err_info, 0x0C); 7322 VERIFY_OFFSET(reserved3, 0x10); 7323 VERIFY_OFFSET(err_info_len, 0x12); 7324 VERIFY_OFFSET(reserved4, 0x13); 7325 VERIFY_OFFSET(sgl_offset, 0x14); 7326 VERIFY_OFFSET(reserved5, 0x15); 7327 VERIFY_OFFSET(transfer_len, 0x1C); 7328 VERIFY_OFFSET(reserved6, 0x20); 7329 VERIFY_OFFSET(io_flags, 0x24); 7330 VERIFY_OFFSET(reserved7, 0x26); 7331 VERIFY_OFFSET(LUN, 0x34); 7332 VERIFY_OFFSET(control, 0x3C); 7333 VERIFY_OFFSET(CDB, 0x40); 7334 VERIFY_OFFSET(reserved8, 0x50); 7335 VERIFY_OFFSET(host_context_flags, 0x60); 7336 VERIFY_OFFSET(timeout_sec, 0x62); 7337 VERIFY_OFFSET(ReplyQueue, 0x64); 7338 VERIFY_OFFSET(reserved9, 0x65); 7339 VERIFY_OFFSET(tag, 0x68); 7340 VERIFY_OFFSET(host_addr, 0x70); 7341 VERIFY_OFFSET(CISS_LUN, 0x78); 7342 VERIFY_OFFSET(SG, 0x78 + 8); 7343 #undef VERIFY_OFFSET 7344 } 7345 7346 module_init(hpsa_init); 7347 module_exit(hpsa_cleanup); 7348